import streamlit as st from transformers import GPT2Tokenizer, GPT2LMHeadModel, pipeline import os # Function to verify and load model and tokenizer def load_model_and_tokenizer(model_dir): try: tokenizer = GPT2Tokenizer.from_pretrained(model_dir) model = GPT2LMHeadModel.from_pretrained(model_dir) nlp = pipeline("text-generation", model=model, tokenizer=tokenizer) return nlp, None except Exception as e: return None, str(e) # Path to the fine-tuned model directory on Google Drive model_dir = "adeel300/QA_doctor" # Verify the contents of the model directory if os.path.isdir(model_dir): print("Contents of the model directory:") print(os.listdir(model_dir)) # Check if there's a nested directory nested_dirs = [d for d in os.listdir(model_dir) if os.path.isdir(os.path.join(model_dir, d))] if nested_dirs: # Update model_dir to point to the nested directory model_dir = os.path.join(model_dir, nested_dirs[0]) print(f"Updated model directory to: {model_dir}") # Load the model and tokenizer nlp, error = load_model_and_tokenizer(model_dir) # Streamlit app st.title("Hugging Face Model Inference") st.write("Enter your text below and get the model's response.") # Input text input_text = st.text_area("Input Text", value="", height=200) if st.button("Generate Response"): if error: st.error(f"Error loading model or tokenizer: {error}") elif input_text: with st.spinner("Generating response..."): try: response = nlp(input_text, max_length=50) # Adjust max_length as needed st.success("Response generated!") st.text_area("Response", value=response[0]['generated_text'], height=200) except Exception as e: st.error(f"Error during inference: {e}") else: st.warning("Please enter some text to generate a response.") # Save the model and tokenizer to a directory (optional) if st.button("Save Model and Tokenizer"): save_directory = "./saved_model" try: if nlp: nlp.model.save_pretrained(save_directory) nlp.tokenizer.save_pretrained(save_directory) st.success("Model and tokenizer saved successfully!") except Exception as e: st.error(f"Error saving model or tokenizer: {e}") # Upload the model to Hugging Face (optional) if st.button("Upload to Hugging Face Hub"): try: if nlp: nlp.model.push_to_hub("QA_doctor") nlp.tokenizer.push_to_hub("QA_doctor") st.success("Model and tokenizer pushed to Hugging Face Hub successfully!") except Exception as e: st.error(f"Error pushing model or tokenizer to Hugging Face Hub: {e}")