File size: 1,826 Bytes
b3b0738
 
 
 
 
 
 
 
 
 
5ea2a93
b3b0738
 
 
efbb364
1ef1929
b3b0738
efbb364
b3b0738
 
bb1f1b6
b5d3f3a
 
 
 
9a48a5d
b3b0738
 
 
21bcfae
b3b0738
 
 
 
 
 
 
 
98c64ba
b3b0738
f0b4a59
0b794cc
b3b0738
0b794cc
b3b0738
 
 
 
 
 
 
50f2112
b3b0738
1ef1929
 
98c64ba
8fac9c0
1ef1929
 
98c64ba
03ee9fd
b3b0738
 
 
99c65a9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import os
import gradio as gr
import openai
from gtts import gTTS


openai.api_key = os.environ["OPEN_AI_KEY"]



def transcribe(audio):    
    audio_file = open(audio, "rb")    
    # Call the transcribe method with the file-like object
    transcript = openai.Audio.transcribe("whisper-1", audio_file)
    
    return transcript["text"]

def botResponse(user_input):    
    response = openai.ChatCompletion.create(
      model="gpt-3.5-turbo",
      #messages=user_input)
      messages=[
                {"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."},
                {"role": "user", "content": user_input}
               ]
    )
  
    system_message = response["choices"][0]["message"]["content"]

    return system_message

def giveVoice(bot_message):
    myobj = gTTS(text=bot_message)
    myobj.save("temp.mp3")

    dir = os.getcwd()
    new_path = os.path.join(dir, "temp.mp3")

    return new_path

def keepLog(gpt):
    

    return "gpt"



with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column():
            user_audio = gr.Audio(source="microphone", type="filepath", label="Input Phrase")
            submit_btn = gr.Button(value="Transcribe")
        with gr.Column():
            user_transcript = gr.Text(label="User Transcript")
            gpt_transcript = gr.Text(label="GPT Transcript")
            gpt_voice = gr.Audio(label="Voice Response")
            chat_log = gr.Text(label="Chat Log")
    submit_btn.click(transcribe, inputs=user_audio, outputs=user_transcript)
    user_transcript.change(botResponse, inputs=user_transcript, outputs=gpt_transcript)
    gpt_transcript.change(giveVoice, inputs=gpt_transcript, outputs=gpt_voice)
    user_audio.change(keepLog, inputs=user_transcript, outputs=chat_log)
    
    

demo.launch(share=False)