import streamlit as st from transformers import pipeline, AutoTokenizer, AutoModelForQuestionAnswering @st.cache(allow_output_mutation=True) def load_qa_model(): model_name = "google/mobilebert-uncased" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForQuestionAnswering.from_pretrained(model_name) qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer) return qa_pipeline qa = load_qa_model() st.title("Ask Questions about your Text") sentence = st.text_area('Please paste your article :', height=30) question = st.text_input("Questions from this article?") button = st.button("Get me Answers") max_seq_length = st.sidebar.slider('Select max sequence length', 50, 500, step=10, value=150) do_sample = st.sidebar.checkbox("Do sample", value=False) with st.spinner("Discovering Answers.."): if button and sentence: answers = qa(question=question, context=sentence) st.write("Answer:", answers['answer']) st.write("Score:", answers['score'])