import streamlit as st from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration # Replace with your Hugging Face model repository path model_repo_path = 'nxmwxm/correct_answer' # Load the pipeline with your model and tokenizer qa_pipeline = pipeline( 'question-answering', model=model_repo_path, tokenizer=model_repo_path ) def generate_answer(question, distractors): input_text = f"Question: {question} Distractors: {' '.join(distractors)}" result = qa_pipeline(question=input_text, context=input_text) return result['answer'] st.title('Question Answering Model') question = st.text_input('Enter your question:') distractors = st.text_input('Enter distractors (comma separated):').split(',') if st.button('Generate Answer'): answer = generate_answer(question, distractors) st.write('Answer:', answer)