Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,27 +8,12 @@ openai.api_key = os.environ["OPEN_AI_KEY"]
|
|
8 |
|
9 |
|
10 |
|
11 |
-
#messages = gr.State([
|
12 |
-
# {"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."}
|
13 |
-
#])
|
14 |
-
messages = [{"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."}]
|
15 |
-
|
16 |
-
|
17 |
def transcribe(audio):
|
18 |
audio_file = open(audio, "rb")
|
19 |
# Call the transcribe method with the file-like object
|
20 |
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
21 |
-
|
22 |
-
|
23 |
-
#msg_contents.append({"role": "user", "content": transcript["text"]})
|
24 |
-
|
25 |
-
#chat_transcript = ""
|
26 |
-
#for message in msg_contents:
|
27 |
-
# if (message["role"] != "system"):
|
28 |
-
# chat_transcript += message["role"] + ": " + message["content"] + "\n\n"
|
29 |
-
system_response = botResponse(transcript["text"])
|
30 |
|
31 |
-
return
|
32 |
|
33 |
def botResponse(user_input):
|
34 |
response = openai.ChatCompletion.create(
|
@@ -41,16 +26,9 @@ def botResponse(user_input):
|
|
41 |
)
|
42 |
|
43 |
system_message = response["choices"][0]["message"]["content"]
|
44 |
-
#msg_contents.append({"role": "assistant", "content": system_message})
|
45 |
-
|
46 |
-
#chat_transcript = chat_log
|
47 |
-
#for message in msg_contents:
|
48 |
-
# if (message["role"] != "system"):
|
49 |
-
# chat_transcript += message["role"] + ": " + message["content"] + "\n\n"
|
50 |
|
51 |
return system_message
|
52 |
|
53 |
-
|
54 |
def giveVoice(bot_message):
|
55 |
myobj = gTTS(text=bot_message)
|
56 |
myobj.save("temp.mp3")
|
@@ -71,9 +49,11 @@ with gr.Blocks() as demo:
|
|
71 |
submit_btn = gr.Button(value="Transcribe")
|
72 |
with gr.Column():
|
73 |
#gpt_response = gr.Audio(label="Voice Response")
|
74 |
-
|
|
|
75 |
gpt_voice = gr.Audio(label="Voice Response")
|
76 |
-
submit_btn.click(transcribe, inputs=user_audio, outputs=
|
|
|
77 |
gpt_transcript.change(giveVoice, inputs=gpt_transcript, outputs=gpt_voice)
|
78 |
|
79 |
|
|
|
8 |
|
9 |
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
def transcribe(audio):
|
12 |
audio_file = open(audio, "rb")
|
13 |
# Call the transcribe method with the file-like object
|
14 |
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
return transcript["text"]
|
17 |
|
18 |
def botResponse(user_input):
|
19 |
response = openai.ChatCompletion.create(
|
|
|
26 |
)
|
27 |
|
28 |
system_message = response["choices"][0]["message"]["content"]
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
return system_message
|
31 |
|
|
|
32 |
def giveVoice(bot_message):
|
33 |
myobj = gTTS(text=bot_message)
|
34 |
myobj.save("temp.mp3")
|
|
|
49 |
submit_btn = gr.Button(value="Transcribe")
|
50 |
with gr.Column():
|
51 |
#gpt_response = gr.Audio(label="Voice Response")
|
52 |
+
user_transcript = gr.Text(label="User Transcript")
|
53 |
+
gpt_transcript = gr.Text(label="GPT Transcript")
|
54 |
gpt_voice = gr.Audio(label="Voice Response")
|
55 |
+
submit_btn.click(transcribe, inputs=user_audio, outputs=user_transcript)
|
56 |
+
user_transcript.change(botResponse, inputs=user_transcript, outputs=gpt_transcript)
|
57 |
gpt_transcript.change(giveVoice, inputs=gpt_transcript, outputs=gpt_voice)
|
58 |
|
59 |
|