adeel300 commited on
Commit
93d2800
·
verified ·
1 Parent(s): c5c2b15

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -9
app.py CHANGED
@@ -1,9 +1,35 @@
 
1
  import streamlit as st
2
- from transformers import pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- # Load the Hugging Face model
5
- model_name = "adeel300/QA_doctor"
6
- nlp = pipeline("text-generation", model=model_name)
 
 
 
 
 
 
7
 
8
  # Streamlit app
9
  st.title("Hugging Face Model Inference")
@@ -14,10 +40,36 @@ st.write("Enter your text below and get the model's response.")
14
  input_text = st.text_area("Input Text", value="", height=200)
15
 
16
  if st.button("Generate Response"):
17
- if input_text:
 
 
18
  with st.spinner("Generating response..."):
19
- response = nlp(input_text)
20
- st.success("Response generated!")
21
- st.text_area("Response", value=response[0]['generated_text'], height=200)
 
 
 
22
  else:
23
- st.warning("Please enter some text to generate a response.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
  import streamlit as st
3
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel, pipeline
4
+ import os
5
+
6
+ # Function to verify and load model and tokenizer
7
+ def load_model_and_tokenizer(model_dir):
8
+ try:
9
+ tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
10
+ model = GPT2LMHeadModel.from_pretrained(model_dir)
11
+ nlp = pipeline("text-generation", model=model, tokenizer=tokenizer)
12
+ return nlp, None
13
+ except Exception as e:
14
+ return None, str(e)
15
+
16
+ # Path to the fine-tuned model directory on Google Drive
17
+ model_dir = "adeel300/QA_doctor"
18
+
19
+ # Verify the contents of the model directory
20
+ if os.path.isdir(model_dir):
21
+ print("Contents of the model directory:")
22
+ print(os.listdir(model_dir))
23
 
24
+ # Check if there's a nested directory
25
+ nested_dirs = [d for d in os.listdir(model_dir) if os.path.isdir(os.path.join(model_dir, d))]
26
+ if nested_dirs:
27
+ # Update model_dir to point to the nested directory
28
+ model_dir = os.path.join(model_dir, nested_dirs[0])
29
+ print(f"Updated model directory to: {model_dir}")
30
+
31
+ # Load the model and tokenizer
32
+ nlp, error = load_model_and_tokenizer(model_dir)
33
 
34
  # Streamlit app
35
  st.title("Hugging Face Model Inference")
 
40
  input_text = st.text_area("Input Text", value="", height=200)
41
 
42
  if st.button("Generate Response"):
43
+ if error:
44
+ st.error(f"Error loading model or tokenizer: {error}")
45
+ elif input_text:
46
  with st.spinner("Generating response..."):
47
+ try:
48
+ response = nlp(input_text, max_length=50) # Adjust max_length as needed
49
+ st.success("Response generated!")
50
+ st.text_area("Response", value=response[0]['generated_text'], height=200)
51
+ except Exception as e:
52
+ st.error(f"Error during inference: {e}")
53
  else:
54
+ st.warning("Please enter some text to generate a response.")
55
+
56
+ # Save the model and tokenizer to a directory (optional)
57
+ if st.button("Save Model and Tokenizer"):
58
+ save_directory = "./saved_model"
59
+ try:
60
+ if nlp:
61
+ nlp.model.save_pretrained(save_directory)
62
+ nlp.tokenizer.save_pretrained(save_directory)
63
+ st.success("Model and tokenizer saved successfully!")
64
+ except Exception as e:
65
+ st.error(f"Error saving model or tokenizer: {e}")
66
+
67
+ # Upload the model to Hugging Face (optional)
68
+ if st.button("Upload to Hugging Face Hub"):
69
+ try:
70
+ if nlp:
71
+ nlp.model.push_to_hub("QA_doctor")
72
+ nlp.tokenizer.push_to_hub("QA_doctor")
73
+ st.success("Model and tokenizer pushed to Hugging Face Hub successfully!")
74
+ except Exception as e:
75
+ st.error(f"Error pushing model or tokenizer to Hugging Face Hub: {e}")