AbdurRehman313 commited on
Commit
d699977
·
verified ·
1 Parent(s): c754cc2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -0
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ from transformers import BartForConditionalGeneration, BartTokenizer
4
+
5
+ # Load the model and tokenizer
6
+ model_repo_path = 'ABC/hotpotQA_BART_Finetuned_E5'
7
+ model = BartForConditionalGeneration.from_pretrained(model_repo_path)
8
+ tokenizer = BartTokenizer.from_pretrained(model_repo_path)
9
+
10
+ # Ensure the model is in evaluation mode
11
+ model.eval()
12
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
13
+ model.to(device)
14
+
15
+ # Streamlit app layout
16
+ st.title("Question Answering App")
17
+
18
+ # User input for context and question
19
+ context_input = st.text_area("Enter context", height=200)
20
+ question_input = st.text_area("Enter question")
21
+
22
+ # Generate the answer
23
+ if st.button("Get Answer"):
24
+ if context_input and question_input:
25
+ with st.spinner("Generating answer..."):
26
+ try:
27
+ # Prepare the input for the model
28
+ input_text = f"context: {context_input} question: {question_input}"
29
+ inputs = tokenizer(input_text, return_tensors='pt')
30
+ inputs = {key: value.to(device) for key, value in inputs.items()}
31
+
32
+ # Perform inference
33
+ with torch.no_grad():
34
+ outputs = model.generate(inputs['input_ids'], max_length=50)
35
+
36
+ # Decode the output
37
+ answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
38
+
39
+ st.subheader("Answer")
40
+ st.write(answer)
41
+ except Exception as e:
42
+ st.error(f"Error during question answering: {e}")
43
+ else:
44
+ st.warning("Please enter both context and question.")
45
+
46
+
47
+
48
+ # import streamlit as st
49
+ # import requests
50
+ # import torch
51
+ # from transformers import pipeline
52
+ # from transformers import T5ForConditionalGeneration, T5Tokenizer
53
+
54
+ # # Replace with your Hugging Face model repository path
55
+ # model_repo_path = 'AbdurRehman313/hotpotQA_BART_Finetuned_E5'
56
+
57
+ # # Load the model and tokenizer
58
+ # model = T5ForConditionalGeneration.from_pretrained(model_repo_path)
59
+ # tokenizer = T5Tokenizer.from_pretrained(model_repo_path)
60
+
61
+ # #Take model in evaluation mode
62
+ # model.eval()
63
+
64
+ # # Example input question and context (replace with your actual inputs)
65
+ # # question = "What is the capital of France?"
66
+ # # context = "France is a country in Europe. Its capital is Paris, which is known for its art, culture, and history."
67
+
68
+ # # print(f'Predicted answer: {answer}')
69
+ # ###
70
+
71
+ # # Prepare the input for the model
72
+ # # input_text = f"context: {context} question: {question} "
73
+ # # inputs = tokenizer(input_text, return_tensors='pt')
74
+
75
+ # # # Move inputs to the appropriate device
76
+ # # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
77
+ # # model.to(device)
78
+ # # inputs = {key: value.to(device) for key, value in inputs.items()}
79
+
80
+ # # # Perform inference
81
+ # # with torch.no_grad():
82
+ # # outputs = model.generate(inputs['input_ids'], max_length=50)
83
+
84
+ # # # Decode the output
85
+ # # answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
86
+
87
+ # # Streamlit app layout
88
+ # st.title("Text Summarization App")
89
+
90
+ # # User input
91
+ # text_input = st.text_area("Enter text to summarize", height=300)
92
+
93
+ # # Summarize the text
94
+ # if st.button("Summarize"):
95
+ # if text_input:
96
+ # with st.spinner("Generating summary..."):
97
+ # try:
98
+ # summary = summarizer(text_input, max_length=150, min_length=30, do_sample=False)
99
+ # st.subheader("Summary")
100
+ # st.write(summary[0]['summary_text'])
101
+ # except Exception as e:
102
+ # st.error(f"Error during summarization: {e}")
103
+ # else:
104
+ # st.warning("Please enter some text to summarize.")