sagegu commited on
Commit
62a11d4
·
1 Parent(s): 135bd1e
.idea/gradio_space.iml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$">
5
+ <excludeFolder url="file://$MODULE_DIR$/.venv" />
6
+ </content>
7
+ <orderEntry type="inheritedJdk" />
8
+ <orderEntry type="sourceFolder" forTests="false" />
9
+ </component>
10
+ </module>
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
5
+ <option name="ignoredErrors">
6
+ <list>
7
+ <option value="N806" />
8
+ </list>
9
+ </option>
10
+ </inspection_tool>
11
+ <inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
12
+ <option name="ignoredIdentifiers">
13
+ <list>
14
+ <option value="apis.main.models" />
15
+ </list>
16
+ </option>
17
+ </inspection_tool>
18
+ </profile>
19
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="Black">
4
+ <option name="sdkName" value="Python 3.12 (gradio_space)" />
5
+ </component>
6
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12 (gradio_space)" project-jdk-type="Python SDK" />
7
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/gradio_space.iml" filepath="$PROJECT_DIR$/.idea/gradio_space.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ </component>
6
+ </project>
app.py CHANGED
@@ -1,137 +1,167 @@
1
- import io
2
- from threading import Thread
3
- import random
4
- import os
5
-
6
- import numpy as np
7
- import spaces
8
  import gradio as gr
9
- import torch
10
-
11
- from parler_tts import ParlerTTSForConditionalGeneration
12
- from pydub import AudioSegment
13
- from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed
14
- from huggingface_hub import InferenceClient
15
- from streamer import ParlerTTSStreamer
16
- import time
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
19
- torch_dtype = torch.float16 if device != "cpu" else torch.float32
20
-
21
- repo_id = "parler-tts/parler_tts_mini_v0.1"
22
-
23
- jenny_repo_id = "ylacombe/parler-tts-mini-jenny-30H"
24
-
25
- model = ParlerTTSForConditionalGeneration.from_pretrained(
26
- jenny_repo_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True
27
- ).to(device)
28
-
29
- client = InferenceClient(token=os.getenv("HF_TOKEN"))
30
-
31
- tokenizer = AutoTokenizer.from_pretrained(repo_id)
32
- feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
33
-
34
- SAMPLE_RATE = feature_extractor.sampling_rate
35
- SEED = 42
36
-
37
-
38
- def numpy_to_mp3(audio_array, sampling_rate):
39
- # Normalize audio_array if it's floating-point
40
- if np.issubdtype(audio_array.dtype, np.floating):
41
- max_val = np.max(np.abs(audio_array))
42
- audio_array = (audio_array / max_val) * 32767 # Normalize to 16-bit range
43
- audio_array = audio_array.astype(np.int16)
44
-
45
- # Create an audio segment from the numpy array
46
- audio_segment = AudioSegment(
47
- audio_array.tobytes(),
48
- frame_rate=sampling_rate,
49
- sample_width=audio_array.dtype.itemsize,
50
- channels=1
51
- )
52
-
53
- # Export the audio segment to MP3 bytes - use a high bitrate to maximise quality
54
- mp3_io = io.BytesIO()
55
- audio_segment.export(mp3_io, format="mp3", bitrate="320k")
56
-
57
- # Get the MP3 bytes
58
- mp3_bytes = mp3_io.getvalue()
59
- mp3_io.close()
60
-
61
- return mp3_bytes
62
-
63
-
64
- sampling_rate = model.audio_encoder.config.sampling_rate
65
- frame_rate = model.audio_encoder.config.frame_rate
66
-
67
-
68
- def generate_response(audio):
69
- gr.Info("Transcribing Audio", duration=5)
70
- question = client.automatic_speech_recognition(audio).text
71
- messages = [{"role": "system", "content": ("You are a magic 8 ball."
72
- "Someone will present to you a situation or question and your job "
73
- "is to answer with a cryptic addage or proverb such as "
74
- "'curiosity killed the cat' or 'The early bird gets the worm'."
75
- "Keep your answers short and do not include the phrase 'Magic 8 Ball' in your response. If the question does not make sense or is off-topic, say 'Foolish questions get foolish answers.'"
76
- "For example, 'Magic 8 Ball, should I get a dog?', 'A dog is ready for you but are you ready for the dog?'")},
77
- {"role": "user", "content": f"Magic 8 Ball please answer this question - {question}"}]
78
-
79
- response = client.chat_completion(messages, max_tokens=64, seed=random.randint(1, 5000),
80
- model="mistralai/Mistral-7B-Instruct-v0.3")
81
- response = response.choices[0].message.content.replace("Magic 8 Ball", "")
82
- return response, None, None
83
-
84
-
85
- @spaces.GPU
86
- def read_response(answer):
87
- play_steps_in_s = 2.0
88
- play_steps = int(frame_rate * play_steps_in_s)
89
-
90
- description = "Jenny speaks at an average pace with a calm delivery in a very confined sounding environment with clear audio quality."
91
- description_tokens = tokenizer(description, return_tensors="pt").to(device)
92
-
93
- streamer = ParlerTTSStreamer(model, device=device, play_steps=play_steps)
94
- prompt = tokenizer(answer, return_tensors="pt").to(device)
95
-
96
- generation_kwargs = dict(
97
- input_ids=description_tokens.input_ids,
98
- prompt_input_ids=prompt.input_ids,
99
- streamer=streamer,
100
- do_sample=True,
101
- temperature=1.0,
102
- min_new_tokens=10,
103
- )
104
-
105
- set_seed(SEED)
106
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
107
- thread.start()
108
- start = time.time()
109
- for new_audio in streamer:
110
- print(
111
- f"Sample of length: {round(new_audio.shape[0] / sampling_rate, 2)} seconds after {time.time() - start} seconds")
112
- yield answer, numpy_to_mp3(new_audio, sampling_rate=sampling_rate)
113
-
114
-
115
- with gr.Blocks() as block:
116
- gr.HTML(
117
- f"""
118
- <h1 style='text-align: center;'> Magic 8 Ball 🎱 </h1>
119
- <h3 style='text-align: center;'> Ask a question and receive wisdom </h3>
120
- <p style='text-align: center;'> Powered by <a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a>
121
- """
122
- )
123
- with gr.Group():
124
- with gr.Row():
125
- audio_out = gr.Audio(label="Spoken Answer", streaming=True, autoplay=True, loop=False)
126
- answer = gr.Textbox(label="Answer")
127
- state = gr.State()
128
- with gr.Row():
129
- audio_in = gr.Audio(label="Speak you question", sources="microphone", type="filepath")
130
- with gr.Row():
131
- gr.HTML(
132
- """<h3 style='text-align: center;'> Examples: 'What is the meaning of life?', 'Should I get a dog?' </h3>""")
133
- audio_in.stop_recording(generate_response, audio_in, [state, answer, audio_out]).then(fn=read_response,
134
- inputs=state,
135
- outputs=[answer, audio_out])
136
-
137
- block.launch()
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import openai
3
+ from openai import OpenAI
4
+
5
+ # Configure your API keys
6
+ OPENAI_API_KEY = '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'
7
+ PERPLEXITY_API_KEY = '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'
8
+
9
+ # Setup API clients
10
+ openai.api_key = OPENAI_API_KEY
11
+ perplexity_client = OpenAI(api_key=PERPLEXITY_API_KEY, base_url="https://api.perplexity.ai")
12
+ client = OpenAI(api_key=OPENAI_API_KEY)
13
+
14
+
15
+ def search_linkedin_person(name, company):
16
+ """Search for a person on LinkedIn via Perplexity API."""
17
+ query = f"fine this person {name} at {company}, try LinkedIn"
18
+ try:
19
+ messages = [
20
+ {"role": "system", "content": "You are an AI assistant. Provide summary of the person."},
21
+ {"role": "user", "content": query}
22
+ ]
23
+ response = perplexity_client.chat.completions.create(
24
+ model="llama-3.1-sonar-large-128k-online",
25
+ messages=messages,
26
+ )
27
+ return response.choices[0].message.content
28
+ except Exception as e:
29
+ return f"Error searching: {str(e)}"
30
+
31
+
32
+ def create_multi_block_app():
33
+ with gr.Blocks() as demo:
34
+ # Block 1: LinkedIn Search
35
+ with gr.Column(variant="panel"):
36
+ gr.Markdown("## LinkedIn Profile Search")
37
+ with gr.Row():
38
+ name_input = gr.Textbox(label="Person's Name", placeholder="Enter the name")
39
+ company_input = gr.Textbox(label="Company", placeholder="Enter the company")
40
+
41
+ search_btn = gr.Button("Search Profile")
42
+ profile_output = gr.Textbox(label="LinkedIn Profile Info", interactive=False)
43
+
44
+ search_btn.click(
45
+ fn=search_linkedin_person,
46
+ inputs=[name_input, company_input],
47
+ outputs=profile_output
48
+ )
49
+
50
+ # Block 2: Introductory Email Chatbot
51
+ with gr.Column(variant="panel"):
52
+ gr.Markdown("## 1 Email Chatbot")
53
+
54
+ # Create Chatbot and Input Elements
55
+ intro_chatbot = gr.Chatbot(label="Intro Email Generation")
56
+ intro_msg_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
57
+ intro_submit_btn = gr.Button("Send")
58
+
59
+ # Define the Chatbot Conversation Function
60
+ def intro_email_conversation(message, history, profile_info):
61
+ try:
62
+ # Format the conversation history for the OpenAI API
63
+ formatted_history = [
64
+ {"role": "user" if i % 2 == 0 else "assistant", "content": msg[0]}
65
+ for i, msg in enumerate(history)
66
+ ]
67
+
68
+ # Add the current user message to the conversation
69
+ formatted_history.append({"role": "user", "content": message})
70
+
71
+ # Add profile info to guide the assistant
72
+ system_message = {
73
+ "role": "system",
74
+ "content": (
75
+ f"You are an AI assistant helping to draft a professional email"
76
+ f"to the following individual: {profile_info}. Make it short and engaging."
77
+ )
78
+ }
79
+
80
+ # Make a request to the OpenAI API
81
+ response = client.chat.completions.create(
82
+ model="gpt-4",
83
+ messages=[system_message] + formatted_history
84
+ )
85
+
86
+ # Extract the AI's response
87
+ ai_response = response.choices[0].message.content
88
+
89
+ # Append the new message-response pair to the history
90
+ history.append([message, ai_response])
91
+
92
+ return history, "" # Clear the input box
93
+ except Exception as e:
94
+ # Handle exceptions gracefully and append the error message
95
+ history.append([message, f"Error: {str(e)}"])
96
+ return history, ""
97
+
98
+ # Set up the button click behavior
99
+ intro_submit_btn.click(
100
+ fn=intro_email_conversation,
101
+ inputs=[intro_msg_input, intro_chatbot, profile_output],
102
+ outputs=[intro_chatbot, intro_msg_input]
103
+ )
104
+
105
+
106
+ # Block 2: Introductory Email Chatbot
107
+ with gr.Column(variant="panel"):
108
+ gr.Markdown("## 2 Email Chatbot")
109
+
110
+ # Create Chatbot and Input Elements
111
+ intro_chatbot = gr.Chatbot(label="Intro Email Generation")
112
+ intro_msg_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
113
+ intro_submit_btn = gr.Button("Send")
114
+
115
+ # Define the Chatbot Conversation Function
116
+ def intro_email_conversation(message, history, profile_info):
117
+ try:
118
+ # Format the conversation history for the OpenAI API
119
+ formatted_history = [
120
+ {"role": "user" if i % 2 == 0 else "assistant", "content": msg[0]}
121
+ for i, msg in enumerate(history)
122
+ ]
123
+
124
+ # Add the current user message to the conversation
125
+ formatted_history.append({"role": "user", "content": message})
126
+
127
+ # Add profile info to guide the assistant
128
+ system_message = {
129
+ "role": "system",
130
+ "content": (
131
+ f"You are an AI assistant helping to draft a professional email"
132
+ f"to the following individual: {profile_info}. introduce my semicondoctor company."
133
+ )
134
+ }
135
+
136
+ # Make a request to the OpenAI API
137
+ response = client.chat.completions.create(
138
+ model="gpt-3.5-turbo",
139
+ messages=[system_message] + formatted_history
140
+ )
141
+
142
+ # Extract the AI's response
143
+ ai_response = response.choices[0].message.content
144
+
145
+ # Append the new message-response pair to the history
146
+ history.append([message, ai_response])
147
+
148
+ return history, "" # Clear the input box
149
+ except Exception as e:
150
+ # Handle exceptions gracefully and append the error message
151
+ history.append([message, f"Error: {str(e)}"])
152
+ return history, ""
153
+
154
+ # Set up the button click behavior
155
+ intro_submit_btn.click(
156
+ fn=intro_email_conversation,
157
+ inputs=[intro_msg_input, intro_chatbot, profile_output],
158
+ outputs=[intro_chatbot, intro_msg_input]
159
+ )
160
+
161
+ return demo
162
+
163
+ if __name__ == "__main__":
164
+ app = create_multi_block_app()
165
+ # app.launch()
166
+ app.launch(share=True) # Share your demo with just 1 extra parameter 🚀
167