|
|
|
import streamlit as st |
|
from transformers import GPT2Tokenizer, GPT2LMHeadModel, pipeline |
|
import os |
|
|
|
|
|
def load_model_and_tokenizer(model_dir): |
|
try: |
|
tokenizer = GPT2Tokenizer.from_pretrained(model_dir) |
|
model = GPT2LMHeadModel.from_pretrained(model_dir) |
|
nlp = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
return nlp, None |
|
except Exception as e: |
|
return None, str(e) |
|
|
|
|
|
model_dir = "adeel300/QA_doctor" |
|
|
|
|
|
if os.path.isdir(model_dir): |
|
print("Contents of the model directory:") |
|
print(os.listdir(model_dir)) |
|
|
|
|
|
nested_dirs = [d for d in os.listdir(model_dir) if os.path.isdir(os.path.join(model_dir, d))] |
|
if nested_dirs: |
|
|
|
model_dir = os.path.join(model_dir, nested_dirs[0]) |
|
print(f"Updated model directory to: {model_dir}") |
|
|
|
|
|
nlp, error = load_model_and_tokenizer(model_dir) |
|
|
|
|
|
st.title("Hugging Face Model Inference") |
|
|
|
st.write("Enter your text below and get the model's response.") |
|
|
|
|
|
input_text = st.text_area("Input Text", value="", height=200) |
|
|
|
if st.button("Generate Response"): |
|
if error: |
|
st.error(f"Error loading model or tokenizer: {error}") |
|
elif input_text: |
|
with st.spinner("Generating response..."): |
|
try: |
|
response = nlp(input_text, max_length=50) |
|
st.success("Response generated!") |
|
st.text_area("Response", value=response[0]['generated_text'], height=200) |
|
except Exception as e: |
|
st.error(f"Error during inference: {e}") |
|
else: |
|
st.warning("Please enter some text to generate a response.") |
|
|
|
|
|
if st.button("Save Model and Tokenizer"): |
|
save_directory = "./saved_model" |
|
try: |
|
if nlp: |
|
nlp.model.save_pretrained(save_directory) |
|
nlp.tokenizer.save_pretrained(save_directory) |
|
st.success("Model and tokenizer saved successfully!") |
|
except Exception as e: |
|
st.error(f"Error saving model or tokenizer: {e}") |
|
|
|
|
|
if st.button("Upload to Hugging Face Hub"): |
|
try: |
|
if nlp: |
|
nlp.model.push_to_hub("QA_doctor") |
|
nlp.tokenizer.push_to_hub("QA_doctor") |
|
st.success("Model and tokenizer pushed to Hugging Face Hub successfully!") |
|
except Exception as e: |
|
st.error(f"Error pushing model or tokenizer to Hugging Face Hub: {e}") |
|
|