Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,20 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
3 |
|
4 |
# Replace with your Hugging Face model repository path
|
5 |
model_repo_path = 'nxmwxm/correct_answer'
|
6 |
|
7 |
-
# Load the model and tokenizer
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
10 |
|
11 |
def generate_answer(question, distractors):
|
12 |
input_text = f"Question: {question} Distractors: {' '.join(distractors)}"
|
13 |
-
|
14 |
-
|
15 |
-
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
|
16 |
-
return decoded_output
|
17 |
|
18 |
st.title('Question Answering Model')
|
19 |
question = st.text_input('Enter your question:')
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration
|
3 |
|
4 |
# Replace with your Hugging Face model repository path
|
5 |
model_repo_path = 'nxmwxm/correct_answer'
|
6 |
|
7 |
+
# Load the pipeline with your model and tokenizer
|
8 |
+
qa_pipeline = pipeline(
|
9 |
+
'question-answering',
|
10 |
+
model=model_repo_path,
|
11 |
+
tokenizer=model_repo_path
|
12 |
+
)
|
13 |
|
14 |
def generate_answer(question, distractors):
|
15 |
input_text = f"Question: {question} Distractors: {' '.join(distractors)}"
|
16 |
+
result = qa_pipeline(question=input_text, context=input_text)
|
17 |
+
return result['answer']
|
|
|
|
|
18 |
|
19 |
st.title('Question Answering Model')
|
20 |
question = st.text_input('Enter your question:')
|