import gradio as gr from AinaTheme import theme from faster_whisper import WhisperModel import torch device, torch_dtype = ("cuda", "float32") if torch.cuda.is_available() else ("cpu", "int8") MODEL_NAME = "xezpeleta/whisper-tiny-eu-ct2" print("Loading model ...") model = WhisperModel(MODEL_NAME, compute_type=torch_dtype) print("Loading model done.") def transcribe(inputs): print("transcribe()") if inputs is None: raise gr.Error("Ez da audio fitxategirik aukeratu. Mesedez, igo audio fitxategi bat"\ "edo grabatu zure ahotsa mikrofono bidez") segments, _ = model.transcribe( inputs, chunk_length=30, task="transcribe", word_timestamps=True, repetition_penalty=1.1, temperature=[0.0, 0.1, 0.2, 0,3, 0.4, 0.6, 0.8, 1.0], ) text = "" for segment in segments: text += " " + segment.text.strip() return text description_string = "Mikrofono grabazioaren edo audio fitxategi baten transkripzio automatikoa\n Demo hau hurrengo eredu hauek erabiliz"\ " sortua izan da: "\ f"[{MODEL_NAME}](https://huggingface.co/{MODEL_NAME})" def clear(): return (None) with gr.Blocks(theme=theme) as demo: gr.Markdown(description_string) with gr.Row(): with gr.Column(scale=1): input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio") with gr.Column(scale=1): output = gr.Textbox(label="Output", lines=8) with gr.Row(variant="panel"): clear_btn = gr.Button("Clear") submit_btn = gr.Button("Submit", variant="primary") submit_btn.click(fn=transcribe, inputs=[input], outputs=[output]) clear_btn.click(fn=clear,inputs=[], outputs=[input], queue=False,) if __name__ == "__main__": demo.launch()