import streamlit as st | |
import torch | |
from transformers import BartForConditionalGeneration, BartTokenizer | |
# Load the model and tokenizer | |
model_repo_path = 'AbdurRehman313/hotpotQA_BART_Finetuned_E5' | |
model = BartForConditionalGeneration.from_pretrained(model_repo_path) | |
tokenizer = BartTokenizer.from_pretrained(model_repo_path) | |
# Ensure the model is in evaluation mode | |
model.eval() | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
model.to(device) | |
# Streamlit app layout | |
st.title("Multi-Hop Question Answering Application") | |
# User input for context and question | |
context_input = st.text_area("Enter context", height=200) | |
question_input = st.text_area("Enter question") | |
# Generate the answer | |
if st.button("Get Answer"): | |
if context_input and question_input: | |
with st.spinner("Generating answer..."): | |
try: | |
# Prepare the input for the model | |
input_text = f"context: {context_input} question: {question_input}" | |
inputs = tokenizer(input_text, return_tensors='pt') | |
inputs = {key: value.to(device) for key, value in inputs.items()} | |
# Perform inference | |
with torch.no_grad(): | |
outputs = model.generate(inputs['input_ids'], max_length=50) | |
# Decode the output | |
answer = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
st.subheader("Answer") | |
st.write(answer) | |
except Exception as e: | |
st.error(f"Error during question answering: {e}") | |
else: | |
st.warning("Please enter both context and question.") | |
# import streamlit as st | |
# import requests | |
# import torch | |
# from transformers import pipeline | |
# from transformers import T5ForConditionalGeneration, T5Tokenizer | |
# # Replace with your Hugging Face model repository path | |
# model_repo_path = 'AbdurRehman313/hotpotQA_BART_Finetuned_E5' | |
# # Load the model and tokenizer | |
# model = T5ForConditionalGeneration.from_pretrained(model_repo_path) | |
# tokenizer = T5Tokenizer.from_pretrained(model_repo_path) | |
# #Take model in evaluation mode | |
# model.eval() | |
# # Example input question and context (replace with your actual inputs) | |
# # question = "What is the capital of France?" | |
# # context = "France is a country in Europe. Its capital is Paris, which is known for its art, culture, and history." | |
# # print(f'Predicted answer: {answer}') | |
# ### | |
# # Prepare the input for the model | |
# # input_text = f"context: {context} question: {question} " | |
# # inputs = tokenizer(input_text, return_tensors='pt') | |
# # # Move inputs to the appropriate device | |
# # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
# # model.to(device) | |
# # inputs = {key: value.to(device) for key, value in inputs.items()} | |
# # # Perform inference | |
# # with torch.no_grad(): | |
# # outputs = model.generate(inputs['input_ids'], max_length=50) | |
# # # Decode the output | |
# # answer = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# # Streamlit app layout | |
# st.title("Text Summarization App") | |
# # User input | |
# text_input = st.text_area("Enter text to summarize", height=300) | |
# # Summarize the text | |
# if st.button("Summarize"): | |
# if text_input: | |
# with st.spinner("Generating summary..."): | |
# try: | |
# summary = summarizer(text_input, max_length=150, min_length=30, do_sample=False) | |
# st.subheader("Summary") | |
# st.write(summary[0]['summary_text']) | |
# except Exception as e: | |
# st.error(f"Error during summarization: {e}") | |
# else: | |
# st.warning("Please enter some text to summarize.") |