Spaces:
Sleeping
Sleeping
File size: 1,672 Bytes
36b4a9d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import streamlit as st
from transformers import pipeline, BartTokenizer, BartForConditionalGeneration
# Replace with your Hugging Face model repository path for QnA
model_repo_path_qna = 'ASaboor/Bart_Therapy'
# Load the model and tokenizer for QnA
model_qna = BartForConditionalGeneration.from_pretrained(model_repo_path_qna)
tokenizer_qna = BartTokenizer.from_pretrained(model_repo_path_qna)
# Initialize the QnA pipeline
qna_pipeline = pipeline('question-answering', model=model_qna, tokenizer=tokenizer_qna)
# Streamlit app layout
st.set_page_config(page_title="QnA App", page_icon=":memo:", layout="wide")
st.title("Question and Answer App")
st.write("""
This app uses a fine-tuned BART model to answer questions based on the provided context.
Enter the context and your question below, then click "Get Answer" to see the result.
""")
# User input for QnA
context_input = st.text_area("Enter context for QnA", height=300, placeholder="Paste your context here...")
question_input = st.text_input("Enter question", placeholder="Type your question here...")
# Generate the answer
if st.button("Get Answer"):
if context_input and question_input:
with st.spinner("Generating answer..."):
try:
# Generate answer
answer = qna_pipeline({'context': context_input, 'question': question_input})
# Display answer
st.subheader("Answer")
st.write(answer['answer'])
except Exception as e:
st.error(f"An error occurred during QnA: {e}")
else:
st.warning("Please enter both context and question for QnA.")
|