nxmwxm commited on
Commit
fab1fa8
·
verified ·
1 Parent(s): 70e157c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -1,19 +1,20 @@
1
  import streamlit as st
2
- from transformers import T5Tokenizer, T5ForConditionalGeneration
3
 
4
  # Replace with your Hugging Face model repository path
5
  model_repo_path = 'nxmwxm/correct_answer'
6
 
7
- # Load the model and tokenizer
8
- model = BartForConditionalGeneration.from_pretrained(model_repo_path)
9
- tokenizer = BartTokenizer.from_pretrained(model_repo_path)
 
 
 
10
 
11
  def generate_answer(question, distractors):
12
  input_text = f"Question: {question} Distractors: {' '.join(distractors)}"
13
- inputs = tokenizer(input_text, return_tensors='pt')
14
- output = model.generate(inputs['input_ids'], attention_mask=inputs['attention_mask'])
15
- decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
16
- return decoded_output
17
 
18
  st.title('Question Answering Model')
19
  question = st.text_input('Enter your question:')
 
1
  import streamlit as st
2
+ from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration
3
 
4
  # Replace with your Hugging Face model repository path
5
  model_repo_path = 'nxmwxm/correct_answer'
6
 
7
+ # Load the pipeline with your model and tokenizer
8
+ qa_pipeline = pipeline(
9
+ 'question-answering',
10
+ model=model_repo_path,
11
+ tokenizer=model_repo_path
12
+ )
13
 
14
  def generate_answer(question, distractors):
15
  input_text = f"Question: {question} Distractors: {' '.join(distractors)}"
16
+ result = qa_pipeline(question=input_text, context=input_text)
17
+ return result['answer']
 
 
18
 
19
  st.title('Question Answering Model')
20
  question = st.text_input('Enter your question:')