File size: 2,775 Bytes
93d2800
117a5ba
93d2800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117a5ba
93d2800
 
 
 
 
 
 
 
 
117a5ba
 
 
 
 
 
 
 
 
57239f7
93d2800
 
 
117a5ba
93d2800
 
 
 
 
 
117a5ba
93d2800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76

import streamlit as st
from transformers import GPT2Tokenizer, GPT2LMHeadModel, pipeline
import os

# Function to verify and load model and tokenizer
def load_model_and_tokenizer(model_dir):
    try:
        tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
        model = GPT2LMHeadModel.from_pretrained(model_dir)
        nlp = pipeline("text-generation", model=model, tokenizer=tokenizer)
        return nlp, None
    except Exception as e:
        return None, str(e)

# Path to the fine-tuned model directory on Google Drive
model_dir = "adeel300/QA_doctor"

# Verify the contents of the model directory
if os.path.isdir(model_dir):
    print("Contents of the model directory:")
    print(os.listdir(model_dir))

    # Check if there's a nested directory
    nested_dirs = [d for d in os.listdir(model_dir) if os.path.isdir(os.path.join(model_dir, d))]
    if nested_dirs:
        # Update model_dir to point to the nested directory
        model_dir = os.path.join(model_dir, nested_dirs[0])
        print(f"Updated model directory to: {model_dir}")

# Load the model and tokenizer
nlp, error = load_model_and_tokenizer(model_dir)

# Streamlit app
st.title("Hugging Face Model Inference")

st.write("Enter your text below and get the model's response.")

# Input text
input_text = st.text_area("Input Text", value="", height=200)

if st.button("Generate Response"):
    if error:
        st.error(f"Error loading model or tokenizer: {error}")
    elif input_text:
        with st.spinner("Generating response..."):
            try:
                response = nlp(input_text, max_length=50)  # Adjust max_length as needed
                st.success("Response generated!")
                st.text_area("Response", value=response[0]['generated_text'], height=200)
            except Exception as e:
                st.error(f"Error during inference: {e}")
    else:
        st.warning("Please enter some text to generate a response.")

# Save the model and tokenizer to a directory (optional)
if st.button("Save Model and Tokenizer"):
    save_directory = "./saved_model"
    try:
        if nlp:
            nlp.model.save_pretrained(save_directory)
            nlp.tokenizer.save_pretrained(save_directory)
            st.success("Model and tokenizer saved successfully!")
    except Exception as e:
        st.error(f"Error saving model or tokenizer: {e}")

# Upload the model to Hugging Face (optional)
if st.button("Upload to Hugging Face Hub"):
    try:
        if nlp:
            nlp.model.push_to_hub("QA_doctor")
            nlp.tokenizer.push_to_hub("QA_doctor")
            st.success("Model and tokenizer pushed to Hugging Face Hub successfully!")
    except Exception as e:
        st.error(f"Error pushing model or tokenizer to Hugging Face Hub: {e}")