hwberry2 commited on
Commit
a110eb7
·
1 Parent(s): b7cf674

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -34
app.py CHANGED
@@ -6,49 +6,51 @@ from gtts import gTTS
6
 
7
  openai.api_key = os.environ["OPEN_AI_KEY"]
8
 
9
- messages = gr.State(value=[{"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."}])
10
- #messages = [{"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."}]
11
 
 
 
 
12
 
13
- def transcribe(audio):
14
- audio_file = open(audio, "rb")
15
- # Call the transcribe method with the file-like object
16
- transcript = openai.Audio.transcribe("whisper-1", audio_file)
17
-
18
- return transcript["text"]
19
-
20
- def botResponse(user_input, messages):
21
- #global messages
22
- #messages = msg_contents
23
-
24
- messages.append({"role": "user", "content": user_input})
25
- response = openai.ChatCompletion.create(
26
- model="gpt-3.5-turbo",
27
- messages=messages
28
- )
29
-
30
- system_message = response["choices"][0]["message"]["content"]
31
- messages.append({"role": "assistant", "content": system_message})
32
-
33
- chat_transcript = ""
34
- for message in messages:
35
- if (message["role"] != "system"):
36
- chat_transcript += message["role"] + ": " + message["content"] + "\n\n"
37
 
38
- return chat_transcript
 
 
 
 
 
39
 
40
- def giveVoice(bot_message):
41
- myobj = gTTS(text=bot_message)
42
- myobj.save("temp.mp3")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
- dir = os.getcwd()
45
- new_path = os.path.join(dir, "temp.mp3")
 
 
 
 
 
 
46
 
47
- return new_path
48
 
49
 
50
 
51
- with gr.Blocks() as demo:
52
  with gr.Row():
53
  with gr.Column():
54
  user_audio = gr.Audio(source="microphone", type="filepath", label="Input Phrase")
 
6
 
7
  openai.api_key = os.environ["OPEN_AI_KEY"]
8
 
 
 
9
 
10
+ with gr.Blocks() as demo:
11
+ messages = gr.State(value=[{"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."}])
12
+ #messages = [{"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."}]
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ def transcribe(audio):
16
+ audio_file = open(audio, "rb")
17
+ # Call the transcribe method with the file-like object
18
+ transcript = openai.Audio.transcribe("whisper-1", audio_file)
19
+
20
+ return transcript["text"]
21
 
22
+ def botResponse(user_input, messages):
23
+ #global messages
24
+ #messages = msg_contents
25
+
26
+ messages.append({"role": "user", "content": user_input})
27
+ response = openai.ChatCompletion.create(
28
+ model="gpt-3.5-turbo",
29
+ messages=messages
30
+ )
31
+
32
+ system_message = response["choices"][0]["message"]["content"]
33
+ messages.append({"role": "assistant", "content": system_message})
34
+
35
+ chat_transcript = ""
36
+ for message in messages:
37
+ if (message["role"] != "system"):
38
+ chat_transcript += message["role"] + ": " + message["content"] + "\n\n"
39
+
40
+ return chat_transcript
41
 
42
+ def giveVoice(bot_message):
43
+ myobj = gTTS(text=bot_message)
44
+ myobj.save("temp.mp3")
45
+
46
+ dir = os.getcwd()
47
+ new_path = os.path.join(dir, "temp.mp3")
48
+
49
+ return new_path
50
 
 
51
 
52
 
53
 
 
54
  with gr.Row():
55
  with gr.Column():
56
  user_audio = gr.Audio(source="microphone", type="filepath", label="Input Phrase")