hwberry2 commited on
Commit
df6ffb4
·
1 Parent(s): 03ee9fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -8
app.py CHANGED
@@ -6,26 +6,28 @@ from gtts import gTTS
6
 
7
  openai.api_key = os.environ["OPEN_AI_KEY"]
8
 
 
9
 
10
 
11
- def transcribe(audio):
 
12
  audio_file = open(audio, "rb")
13
  # Call the transcribe method with the file-like object
14
  transcript = openai.Audio.transcribe("whisper-1", audio_file)
15
 
16
  return transcript["text"]
17
 
18
- def botResponse(user_input):
 
 
 
19
  response = openai.ChatCompletion.create(
20
  model="gpt-3.5-turbo",
21
- #messages=user_input)
22
- messages=[
23
- {"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."},
24
- {"role": "user", "content": user_input}
25
- ]
26
  )
27
 
28
  system_message = response["choices"][0]["message"]["content"]
 
29
 
30
  return system_message
31
 
@@ -58,7 +60,7 @@ with gr.Blocks() as demo:
58
  submit_btn.click(transcribe, inputs=user_audio, outputs=user_transcript)
59
  user_transcript.change(botResponse, inputs=user_transcript, outputs=gpt_transcript)
60
  gpt_transcript.change(giveVoice, inputs=gpt_transcript, outputs=gpt_voice)
61
- user_audio.change(keepLog, inputs=user_transcript, outputs=chat_log)
62
 
63
 
64
 
 
6
 
7
  openai.api_key = os.environ["OPEN_AI_KEY"]
8
 
9
+ messages = [{"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."}]
10
 
11
 
12
+
13
+ def transcribe(audio):
14
  audio_file = open(audio, "rb")
15
  # Call the transcribe method with the file-like object
16
  transcript = openai.Audio.transcribe("whisper-1", audio_file)
17
 
18
  return transcript["text"]
19
 
20
+ def botResponse(user_input):
21
+ global messages
22
+
23
+ messages.append({"role": "user", "content": user_input})
24
  response = openai.ChatCompletion.create(
25
  model="gpt-3.5-turbo",
26
+ messages=messages
 
 
 
 
27
  )
28
 
29
  system_message = response["choices"][0]["message"]["content"]
30
+ messages.append({"role": "assistant", "content": system_message})
31
 
32
  return system_message
33
 
 
60
  submit_btn.click(transcribe, inputs=user_audio, outputs=user_transcript)
61
  user_transcript.change(botResponse, inputs=user_transcript, outputs=gpt_transcript)
62
  gpt_transcript.change(giveVoice, inputs=gpt_transcript, outputs=gpt_voice)
63
+ gpt_voice.change(keepLog, inputs=user_transcript, outputs=chat_log)
64
 
65
 
66