File size: 3,676 Bytes
d699977
 
 
 
 
b73318b
d699977
 
 
 
 
 
 
 
 
eec324a
d699977
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import streamlit as st
import torch
from transformers import BartForConditionalGeneration, BartTokenizer

# Load the model and tokenizer
model_repo_path = 'AbdurRehman313/hotpotQA_BART_Finetuned_E5'
model = BartForConditionalGeneration.from_pretrained(model_repo_path)
tokenizer = BartTokenizer.from_pretrained(model_repo_path)

# Ensure the model is in evaluation mode
model.eval()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)

# Streamlit app layout
st.title("Multi-Hop Question Answering Application")

# User input for context and question
context_input = st.text_area("Enter context", height=200)
question_input = st.text_area("Enter question")

# Generate the answer
if st.button("Get Answer"):
    if context_input and question_input:
        with st.spinner("Generating answer..."):
            try:
                # Prepare the input for the model
                input_text = f"context: {context_input} question: {question_input}"
                inputs = tokenizer(input_text, return_tensors='pt')
                inputs = {key: value.to(device) for key, value in inputs.items()}

                # Perform inference
                with torch.no_grad():
                    outputs = model.generate(inputs['input_ids'], max_length=50)

                # Decode the output
                answer = tokenizer.decode(outputs[0], skip_special_tokens=True)

                st.subheader("Answer")
                st.write(answer)
            except Exception as e:
                st.error(f"Error during question answering: {e}")
    else:
        st.warning("Please enter both context and question.")



# import streamlit as st
# import requests
# import torch
# from transformers import pipeline
# from transformers import T5ForConditionalGeneration, T5Tokenizer

# # Replace with your Hugging Face model repository path
# model_repo_path = 'AbdurRehman313/hotpotQA_BART_Finetuned_E5'

# # Load the model and tokenizer
# model = T5ForConditionalGeneration.from_pretrained(model_repo_path)
# tokenizer = T5Tokenizer.from_pretrained(model_repo_path)

# #Take model in evaluation mode
# model.eval()

# # Example input question and context (replace with your actual inputs)
# # question = "What is the capital of France?"
# # context = "France is a country in Europe. Its capital is Paris, which is known for its art, culture, and history."

# # print(f'Predicted answer: {answer}')
# ###

# # Prepare the input for the model
# # input_text = f"context: {context} question: {question} "
# # inputs = tokenizer(input_text, return_tensors='pt')

# # # Move inputs to the appropriate device
# # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# # model.to(device)
# # inputs = {key: value.to(device) for key, value in inputs.items()}

# # # Perform inference
# # with torch.no_grad():
# #     outputs = model.generate(inputs['input_ids'], max_length=50)

# # # Decode the output
# # answer = tokenizer.decode(outputs[0], skip_special_tokens=True)

# # Streamlit app layout
# st.title("Text Summarization App")

# # User input
# text_input = st.text_area("Enter text to summarize", height=300)

# # Summarize the text
# if st.button("Summarize"):
#     if text_input:
#         with st.spinner("Generating summary..."):
#             try:
#                 summary = summarizer(text_input, max_length=150, min_length=30, do_sample=False)
#                 st.subheader("Summary")
#                 st.write(summary[0]['summary_text'])
#             except Exception as e:
#                 st.error(f"Error during summarization: {e}")
#     else:
#         st.warning("Please enter some text to summarize.")