Spaces:
Sleeping
Sleeping
pip install -r requirements.txt | |
# Load model and tokenizer | |
model_name = "t5_history_qa" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
qa_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer) | |
# Streamlit app | |
st.title("History QA with T5 Model") | |
st.write("Enter the historical context and your question below:") | |
context = st.text_area("Context", height=200) | |
question = st.text_input("Question") | |
if st.button("Get Answer"): | |
input_text = f"question: {question} context: {context}" | |
result = qa_pipeline(input_text) | |
answer = result[0]['generated_text'] | |
st.write("**Answer:**") | |
st.write(answer) | |