Spaces:
Running
Running
File size: 2,065 Bytes
b3b0738 a669973 b3b0738 df6ffb4 b3b0738 efbb364 1ef1929 b3b0738 81c648e a669973 959f8ef df6ffb4 b3b0738 df6ffb4 9a48a5d b3b0738 df6ffb4 b3b0738 854d54e 460ccd7 854d54e b3b0738 98c64ba b3b0738 50f2112 b3b0738 1ef1929 854d54e 98c64ba 1ef1929 7a6d372 98c64ba b3b0738 99c65a9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
import os
import gradio as gr
import openai
from gtts import gTTS
openai.api_key = os.environ["OPEN_AI_KEY"]
messages = gr.State(value=[{"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."}])
#messages = [{"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."}]
def transcribe(audio):
audio_file = open(audio, "rb")
# Call the transcribe method with the file-like object
transcript = openai.Audio.transcribe("whisper-1", audio_file)
return transcript["text"]
def botResponse(user_input, messages):
#global messages
#messages = msg_contents
messages.append({"role": "user", "content": user_input})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
system_message = response["choices"][0]["message"]["content"]
messages.append({"role": "assistant", "content": system_message})
chat_transcript = ""
for message in messages:
if (message["role"] != "system"):
chat_transcript += message["role"] + ": " + message["content"] + "\n\n"
return chat_transcript
def giveVoice(bot_message):
myobj = gTTS(text=bot_message)
myobj.save("temp.mp3")
dir = os.getcwd()
new_path = os.path.join(dir, "temp.mp3")
return new_path
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
user_audio = gr.Audio(source="microphone", type="filepath", label="Input Phrase")
submit_btn = gr.Button(value="Transcribe")
with gr.Column():
user_transcript = gr.Text(label="User Transcript")
gpt_transcript = gr.Text(label="Chat Transcript")
gpt_voice = gr.Audio(label="Voice Response")
submit_btn.click(transcribe, inputs=user_audio, outputs=user_transcript)
#user_transcript.change(botResponse, inputs=[user_transcript, messages], outputs=gpt_transcript)
gpt_transcript.change(giveVoice, inputs=gpt_transcript, outputs=gpt_voice)
demo.launch(share=False) |