import gradio as gr from huggingface_hub import InferenceClient client = InferenceClient(token=os.getenv("HF_TOKEN")) def generate_response(audio): gr.Info("Transcribing Audio", duration=5) question = client.automatic_speech_recognition(audio).text messages = [{"role": "system", "content": ("You are a magic 8 ball." "Someone will present to you a situation or question and your job " "is to answer with a cryptic adage or proverb such as " "'curiosity killed the cat' or 'The early bird gets the worm'." "Keep your answers short and do not include the phrase 'Magic 8 Ball' in your response. If the question does not make sense or is off-topic, say 'Foolish questions get foolish answers.'" "For example, 'Magic 8 Ball, should I get a dog?', 'A dog is ready for you but are you ready for the dog?'")}, {"role": "user", "content": f"Magic 8 Ball please answer this question - {question}"}] response = client.chat_completion(messages, max_tokens=64, seed=random.randint(1, 5000), model="mistralai/Mistral-7B-Instruct-v0.3") response = response.choices[0].message.content.replace("Magic 8 Ball", "").replace(":", "") return response, None, None with gr.Blocks() as block: gr.HTML( f"""
Powered by Radar Interactive """ ) with gr.Group(): with gr.Row(): audio_out = gr.Audio(label="Spoken Answer", streaming=True, autoplay=True) answer = gr.Textbox(label="Answer") state = gr.State() with gr.Row(): audio_in = gr.Audio(label="Speak your question", sources="microphone", type="filepath") audio_in.stop_recording(generate_response, audio_in, [state, answer, audio_out])\ .then(fn=read_response, inputs=state, outputs=[answer, audio_out]) block.launch()