Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -14,8 +14,9 @@ openai.api_key = os.environ["OPEN_AI_KEY"]
|
|
14 |
messages = [{"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."}]
|
15 |
|
16 |
|
17 |
-
def transcribe(audio):
|
18 |
-
|
|
|
19 |
audio_file = open(audio, "rb")
|
20 |
# Call the transcribe method with the file-like object
|
21 |
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
@@ -72,7 +73,7 @@ with gr.Blocks() as demo:
|
|
72 |
gpt_transcript = gr.Text(label="Generate Transcript")
|
73 |
gpt_transcript2 = gr.Text(label="Bot Response")
|
74 |
gpt_response = gr.Audio(label="Voice Response")
|
75 |
-
submit_btn.click(transcribe, inputs=user_audio, outputs=gpt_transcript)
|
76 |
|
77 |
|
78 |
|
|
|
14 |
messages = [{"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."}]
|
15 |
|
16 |
|
17 |
+
def transcribe(audio, test):
|
18 |
+
stringIn = test
|
19 |
+
|
20 |
audio_file = open(audio, "rb")
|
21 |
# Call the transcribe method with the file-like object
|
22 |
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
|
|
73 |
gpt_transcript = gr.Text(label="Generate Transcript")
|
74 |
gpt_transcript2 = gr.Text(label="Bot Response")
|
75 |
gpt_response = gr.Audio(label="Voice Response")
|
76 |
+
submit_btn.click(transcribe, inputs=[user_audio, "test"] outputs=gpt_transcript)
|
77 |
|
78 |
|
79 |
|