Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline, BartTokenizer, BartForConditionalGeneration | |
# Replace with your Hugging Face model repository path for QnA | |
model_repo_path_qna = 'ASaboor/Bart_Therapy' | |
# Load the model and tokenizer for QnA | |
model_qna = BartForConditionalGeneration.from_pretrained(model_repo_path_qna) | |
tokenizer_qna = BartTokenizer.from_pretrained(model_repo_path_qna) | |
# Initialize the QnA pipeline | |
qna_pipeline = pipeline('question-answering', model=model_qna, tokenizer=tokenizer_qna) | |
# Streamlit app layout | |
st.set_page_config(page_title="QnA App", page_icon=":memo:", layout="wide") | |
st.title("Question and Answer App") | |
st.write(""" | |
This app uses a fine-tuned BART model to answer questions based on the provided context. | |
Enter the context and your question below, then click "Get Answer" to see the result. | |
""") | |
# User input for QnA | |
context_input = st.text_area("Enter context for QnA", height=300, placeholder="Paste your context here...") | |
question_input = st.text_input("Enter question", placeholder="Type your question here...") | |
# Generate the answer | |
if st.button("Get Answer"): | |
if context_input and question_input: | |
with st.spinner("Generating answer..."): | |
try: | |
# Generate answer | |
answer = qna_pipeline({'context': context_input, 'question': question_input}) | |
# Display answer | |
st.subheader("Answer") | |
st.write(answer['answer']) | |
except Exception as e: | |
st.error(f"An error occurred during QnA: {e}") | |
else: | |
st.warning("Please enter both context and question for QnA.") | |