QuindeelFatima commited on
Commit
6fb1ccb
·
verified ·
1 Parent(s): f94db01

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ from transformers import BartForConditionalGeneration, BartTokenizer
4
+
5
+ # Load the model and tokenizer
6
+ model_repo_path = 'AbdurRehman313/hotpotQA_BART_Finetuned_E5'
7
+ model = BartForConditionalGeneration.from_pretrained(model_repo_path)
8
+ tokenizer = BartTokenizer.from_pretrained(model_repo_path)
9
+
10
+ # Ensure the model is in evaluation mode
11
+ model.eval()
12
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
13
+ model.to(device)
14
+
15
+ # Streamlit app layout
16
+ st.title("Multi-Hop Question Answering Application")
17
+
18
+ # User input for context and question
19
+ context_input = st.text_area("Enter context", height=200)
20
+ question_input = st.text_area("Enter question")
21
+
22
+ # Generate the answer
23
+ if st.button("Get Answer"):
24
+ if context_input and question_input:
25
+ with st.spinner("Generating answer..."):
26
+ try:
27
+ # Prepare the input for the model
28
+ input_text = f"context: {context_input} question: {question_input}"
29
+ inputs = tokenizer(input_text, return_tensors='pt')
30
+ inputs = {key: value.to(device) for key, value in inputs.items()}
31
+
32
+ # Perform inference
33
+ with torch.no_grad():
34
+ outputs = model.generate(inputs['input_ids'], max_length=50)
35
+
36
+ # Decode the output
37
+ answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
38
+
39
+ st.subheader("Answer")
40
+ st.write(answer)
41
+ except Exception as e:
42
+ st.error(f"Error during question answering: {e}")
43
+ else:
44
+ st.warning("Please enter both context and question.")