Dhahlan2000 commited on
Commit
3baf333
·
1 Parent(s): 9b0e653

Refactor app.py to transition from Gradio to Streamlit for the job application email generator interface. Update UI components including text areas, file upload, and sliders for user input. Modify requirements.txt to remove Gradio and include necessary dependencies for Streamlit and Hugging Face. This change enhances user experience and streamlines the email generation process.

Browse files
Files changed (2) hide show
  1. app.py +28 -22
  2. requirements.txt +3 -5
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
@@ -48,7 +48,7 @@ client = InferenceClient(token=access_token)
48
  def conversation_predict(input_text):
49
  """Generate a response for single-turn input using the model."""
50
  # Tokenize the input text
51
- input_ids = tokenizer("""Job Description:
52
  {input_text}
53
 
54
  Instructions: Write a concise and professional email expressing interest in the position.
@@ -106,24 +106,30 @@ CV Summary:
106
  response += token
107
  yield response
108
 
109
- # Create a Gradio ChatInterface demo
110
- demo = gr.ChatInterface(
111
- fn=respond,
112
- additional_inputs=[
113
- gr.Textbox(value="Instructions: Write a concise and professional email expressing interest in the position.",
114
- label="System message"),
115
- gr.File(label="Upload CV (PDF or DOCX)", file_types=[".pdf", ".docx"]),
116
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
117
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
118
- gr.Slider(
119
- minimum=0.1,
120
- maximum=1.0,
121
- value=0.95,
122
- step=0.05,
123
- label="Top-p (nucleus sampling)",
124
- ),
125
- ],
126
- )
127
 
128
- if __name__ == "__main__":
129
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
 
48
  def conversation_predict(input_text):
49
  """Generate a response for single-turn input using the model."""
50
  # Tokenize the input text
51
+ input_ids = tokenizer(f"""Job Description:
52
  {input_text}
53
 
54
  Instructions: Write a concise and professional email expressing interest in the position.
 
106
  response += token
107
  yield response
108
 
109
+ # Streamlit UI
110
+ st.title("Job Application Email Generator")
111
+
112
+ # Instructions text area
113
+ system_message = st.text_area("System message",
114
+ "Instructions: Write a concise and professional email expressing interest in the position.",
115
+ height=150)
116
+
117
+ # CV file upload
118
+ cv_file = st.file_uploader("Upload CV (PDF or DOCX)", type=["pdf", "docx"])
 
 
 
 
 
 
 
 
119
 
120
+ # Sliders for max tokens, temperature, and top-p
121
+ max_tokens = st.slider("Max new tokens", min_value=1, max_value=2048, value=512, step=1)
122
+ temperature = st.slider("Temperature", min_value=0.1, max_value=4.0, value=0.7, step=0.1)
123
+ top_p = st.slider("Top-p (nucleus sampling)", min_value=0.1, max_value=1.0, value=0.95, step=0.05)
124
+
125
+ # Input message field
126
+ message = st.text_input("Job Description", "")
127
+
128
+ # Button to generate response
129
+ if st.button("Generate Email"):
130
+ if message:
131
+ response = conversation_predict(message)
132
+ st.write("Generated Email:")
133
+ st.write(response)
134
+ else:
135
+ st.warning("Please enter a job description.")
requirements.txt CHANGED
@@ -1,11 +1,9 @@
1
  # add requirements
2
  streamlit
3
- python-dotenv
4
- langchain
5
  transformers
6
  torch
7
- PyPDF2
8
- gradio
9
- huggingface_hub
10
  python-docx
 
 
 
11
 
 
1
  # add requirements
2
  streamlit
 
 
3
  transformers
4
  torch
 
 
 
5
  python-docx
6
+ PyPDF2
7
+ huggingface-hub
8
+ python-dotenv
9