nxmwxm commited on
Commit
47e14ee
·
verified ·
1 Parent(s): b93e68e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -0
app.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration
3
+
4
+ # Replace with your Hugging Face model repository path
5
+ model_repo_path = 'nxmwxm/answer_generator'
6
+
7
+ # Load the pipeline with your model and tokenizer
8
+ qa_pipeline = pipeline(
9
+ 'question-answering',
10
+ model=model_repo_path,
11
+ tokenizer=model_repo_path
12
+ )
13
+
14
+ def generate_answer(question, distractors):
15
+ input_text = f"Question: {question} Distractors: {' '.join(distractors)}"
16
+ result = qa_pipeline(question=input_text, context=input_text)
17
+ return result['answer']
18
+
19
+ st.title('Question Answering Model')
20
+ question = st.text_input('Enter your question:')
21
+ distractors = st.text_input('Enter distractors (comma separated):').split(',')
22
+
23
+ if st.button('Generate Answer'):
24
+ answer = generate_answer(question, distractors)
25
+ st.write('Answer:', answer)