Spaces:
Sleeping
Sleeping
File size: 1,293 Bytes
1d70192 8678a4f 1d70192 50c7e7d 1d70192 2e550ae 1d70192 50c7e7d 1d70192 2e550ae 50c7e7d 1d70192 50c7e7d 1d70192 2e550ae 50c7e7d 416be73 50c7e7d 1d70192 2e550ae 1d70192 2e550ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import streamlit as st
from transformers import pipeline
# Define the path to the saved model
model_path = './QAModel' # Path to your fine-tuned model
# Load the question-answering pipeline
qa_pipeline = pipeline("question-answering", model=model_path, tokenizer=model_path)
# Load the context from a file
context_file = 'context.txt' # Replace with your context file path
with open(context_file, 'r', encoding='utf-8') as f:
default_context = f.read()
# Set the title for the Streamlit app
st.title("Movie Trivia Question Answering")
# Text input for the user question
question = st.text_area("Enter your question:")
def generate_answer(question, context):
# Perform question answering
result = qa_pipeline(question=question, context=context)
return result['answer']
if st.button("Get Answer"):
if question:
generated_answer = generate_answer(question, default_context)
# Display the generated answer
st.subheader("Answer")
st.write(generated_answer)
else:
st.warning("Please enter a question.")
# Optionally, add instructions or information about the app
st.write("""
Enter a question related to the provided movie-related context above. The model will provide the answer based on the context provided.
""")
|