Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline | |
model_repo_path = 'nxmwxm/answer_generator' | |
tokenizer_path = 'nxmwxm/answer_generator' | |
# Load the tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path) | |
# Load the pipeline with your model and tokenizer | |
qa_pipeline = pipeline( | |
'question-answering', | |
model=model, | |
tokenizer=tokenizer | |
) | |
def generate_answer(question, context): | |
result = qa_pipeline(question=question, context=context) | |
return result.get('answer', 'No answer found') | |
st.title('Question Answering Model') | |
question = st.text_input('Enter your question:') | |
context = st.text_area('Enter context for the question:') | |
if st.button('Generate Answer'): | |
answer = generate_answer(question, context) | |
st.write('Answer:', answer) | |