File size: 2,533 Bytes
dc00aa1 c04b26f 2d4f489 dc00aa1 4725d2e dc00aa1 2d4f489 dc00aa1 2d4f489 dc00aa1 c04b26f 2d4f489 c04b26f 4725d2e c04b26f 4725d2e c04b26f 4725d2e c04b26f dc00aa1 a3550a5 4725d2e 8f06561 dc00aa1 8f06561 dc00aa1 8f06561 7057232 8f06561 dc00aa1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import os
import openai
import gradio as gr
from gtts import gTTS
from transformers import pipeline
openai.api_key = os.getenv("OPENAI_API_KEY")
pipe = pipeline(model="lyimo/whisper-small-sw2") # Use your custom ASR model for transcription
def transcribe(audio):
text = pipe(audio)["text"]
return text
def generate_response(transcribed_text):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "All your answers should be in swahili only, users undertands swahili only so here we start... Wewe ni mtaalamu wa viazi lishe na utajibu maswali yote kwa kiswahili tu!"},
{"role": "user", "content": "Mambo vipi?"},
{"role": "assistant", "content": "Salama je una swali lolote kuhusu viazi lishe?"},
{"role": "user", "content": "nini maana ya Viazi lishe?"},
{"role": "assistant", "content": "viazi lishe ni Viazi vitamu vyenye rangi ya karoti kwa ndani ambavyo vina vitamin A kwa wingi"},
{"role": "user", "content": "nini matumizi ya viazi lishe?"},
{"role": "assistant", "content": "viazi lishe vinaweza kutengenezea chakula kama Keki, Maandazi, Kalimati na tambi: Ukisaga unga wa viazi lishe, unaweza kutumika kupika vyakula ambavyo huwa watu hutumia unga wa ngano kupika, unga wa viazi lishe una virutubisho vingi zaidi kuliko unga wa ngano na ukitumika kupikia vyakula tajwa hapo juu watumiaji watakuwa wanakula vyakula vyenye virutubisho Zaidi."},
{"role": "user", "content": transcribed_text},
]
)
return response['choices'][0]['message']['content']
def inference(text):
tts = gTTS(text=text, lang='sw', tld='co.tz')
output_file = "tts_output.mp3"
tts.save(output_file)
return output_file
def process_audio_and_respond(audio):
text = transcribe(audio)
response_text = generate_response(text)
output_file = inference(response_text)
return response_text, output_file
demo = gr.Interface(
process_audio_and_respond,
gr.inputs.Audio(source="microphone", type="filepath", label="Bonyeza kitufe cha kurekodi na uliza swali lako"),
[gr.outputs.Textbox(label="Jibu (kwa njia ya maandishi)"), gr.outputs.Audio(type="filepath", label="Jibu kwa njia ya sauti (Bofya kusikiliza Jibu)")],
title="Mtaalamu wa Viazi Lishe",
description="Uliza Mtaalamu wetu swali lolote Kuhusu viazi Lishe",
theme="compact",
layout="vertical",
allow_flagging=False,
live=True,
)
demo.launch() |