Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,854 Bytes
bef66de f4fa6cb bef66de f4fa6cb d01f68f ab55ccc bef66de ab55ccc d01f68f 2abc41d d01f68f ab55ccc bef66de d01f68f bef66de 5331478 f4fa6cb bef66de f4fa6cb d4d32c6 bef66de f4fa6cb bef66de 5331478 d01f68f bef66de d01f68f bef66de d01f68f bef66de d01f68f bef66de d01f68f bef66de d01f68f bef66de d01f68f bef66de a0b4d2a 2abc41d ab55ccc bef66de 5331478 ab55ccc 5331478 bef66de 5331478 ab55ccc 409084a d4d32c6 409084a 5331478 409084a bef66de 409084a bef66de d01f68f bef66de d01f68f bef66de d01f68f bef66de d01f68f bef66de d01f68f bef66de d01f68f bef66de a0b4d2a ab55ccc bef66de 2abc41d a0b4d2a bef66de |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
import os
import time
from pathlib import Path
import gradio as gr
import librosa
import spaces
import torch
from loguru import logger
from transformers import pipeline
is_hf = os.getenv("SYSTEM") == "spaces"
generate_kwargs = {
"language": "Japanese",
"do_sample": False,
"num_beams": 1,
"no_repeat_ngram_size": 3,
}
model_dict = {
"whisper-large-v2": "openai/whisper-large-v2",
"whisper-large-v3": "openai/whisper-large-v3",
"whisper-large-v3-turbo": "openai/whisper-large-v3-turbo",
"kotoba-whisper-v1.0": "kotoba-tech/kotoba-whisper-v1.0",
"kotoba-whisper-v2.0": "kotoba-tech/kotoba-whisper-v2.0",
"galgame-whisper-wip": (
"litagin/galgame-whisper-wip"
if is_hf
else "../whisper_finetune/galgame-whisper"
),
}
logger.info("Initializing pipelines...")
pipe_dict = {
k: pipeline(
"automatic-speech-recognition",
model=v,
device="cuda" if torch.cuda.is_available() else "cpu",
)
for k, v in model_dict.items()
}
logger.success("Pipelines initialized!")
@spaces.GPU
def transcribe_common(audio: str, model: str) -> tuple[str, float]:
filename = Path(audio).name
logger.info(f"Model: {model}")
logger.info(f"Audio: {filename}")
# Read and resample audio to 16kHz
y, sr = librosa.load(audio, mono=True, sr=16000)
# Get duration of audio
duration = librosa.get_duration(y=y, sr=sr)
logger.info(f"Duration: {duration:.2f}s")
if duration > 15:
return "Audio too long, limit is 15 seconds", 0
start_time = time.time()
result = pipe_dict[model](y, generate_kwargs=generate_kwargs)["text"]
end_time = time.time()
logger.success(f"Finished in {end_time - start_time:.2f}s\n{result}")
return result, end_time - start_time
def transcribe_large_v2(audio) -> tuple[str, float]:
return transcribe_common(audio, "whisper-large-v2")
def transcribe_large_v3(audio) -> tuple[str, float]:
return transcribe_common(audio, "whisper-large-v3")
def transcribe_large_v3_turbo(audio) -> tuple[str, float]:
return transcribe_common(audio, "whisper-large-v3-turbo")
def transcribe_kotoba_v1(audio) -> tuple[str, float]:
return transcribe_common(audio, "kotoba-whisper-v1.0")
def transcribe_kotoba_v2(audio) -> tuple[str, float]:
return transcribe_common(audio, "kotoba-whisper-v2.0")
def transcribe_galgame_whisper(audio) -> tuple[str, float]:
return transcribe_common(audio, "galgame-whisper-wip")
# def warmup():
# logger.info("Warm-up...")
# return transcribe_large_v3_turbo("test.wav")
initial_md = """
# Galgame-Whisper (WIP) Demo
- 音声認識モデル [kotoba-whisper-v2.0](https://huggingface.co/kotoba-tech/kotoba-whisper-v2.0) をファインチューンした**未完成のモデル**のお試し
- https://huggingface.co/litagin/galgame-whisper-wip
- 現在0.1エポックくらい
- 日本語のみ対応
- デモでは音声は15秒まで
- 比較できるように他モデルもついでに試せる
pipeに渡しているkwargsは以下の通り:
```python
generate_kwargs = {
"language": "Japanese",
"do_sample": False,
"num_beams": 1,
"no_repeat_ngram_size": 3, # 3回以上の繰り返しを防ぐ
}
```
"""
with gr.Blocks() as app:
gr.Markdown(initial_md)
audio = gr.Audio(type="filepath")
with gr.Row():
with gr.Column():
gr.Markdown("### Galgame-Whisper (WIP)")
button_galgame = gr.Button("Transcribe with Galgame-Whisper (WIP)")
time_galgame = gr.Textbox(label="Time taken")
output_galgame = gr.Textbox(label="Result")
with gr.Row():
with gr.Column():
gr.Markdown("### Whisper-Large-V2")
button_v2 = gr.Button("Transcribe with Whisper-Large-V2")
time_v2 = gr.Textbox(label="Time taken")
output_v2 = gr.Textbox(label="Result")
with gr.Column():
gr.Markdown("### Whisper-Large-V3")
button_v3 = gr.Button("Transcribe with Whisper-Large-V3")
time_v3 = gr.Textbox(label="Time taken")
output_v3 = gr.Textbox(label="Result")
with gr.Column():
gr.Markdown("### Whisper-Large-V3-Turbo")
button_v3_turbo = gr.Button("Transcribe with Whisper-Large-V3-Turbo")
time_v3_turbo = gr.Textbox(label="Time taken")
output_v3_turbo = gr.Textbox(label="Result")
with gr.Row():
with gr.Column():
gr.Markdown("### Kotoba-Whisper-V1.0")
button_kotoba_v1 = gr.Button("Transcribe with Kotoba-Whisper-V1.0")
time_kotoba_v1 = gr.Textbox(label="Time taken")
output_kotoba_v1 = gr.Textbox(label="Result")
with gr.Column():
gr.Markdown("### Kotoba-Whisper-V2.0")
button_kotoba_v2 = gr.Button("Transcribe with Kotoba-Whisper-V2.0")
time_kotoba_v2 = gr.Textbox(label="Time taken")
output_kotoba_v2 = gr.Textbox(label="Result")
# warmup_result = gr.Textbox(label="Warm-up result", visible=False)
button_v2.click(transcribe_large_v2, inputs=audio, outputs=[output_v2, time_v2])
button_v3.click(transcribe_large_v3, inputs=audio, outputs=[output_v3, time_v3])
button_v3_turbo.click(
transcribe_large_v3_turbo,
inputs=audio,
outputs=[output_v3_turbo, time_v3_turbo],
)
button_kotoba_v1.click(
transcribe_kotoba_v1, inputs=audio, outputs=[output_kotoba_v1, time_kotoba_v1]
)
button_kotoba_v2.click(
transcribe_kotoba_v2, inputs=audio, outputs=[output_kotoba_v2, time_kotoba_v2]
)
button_galgame.click(
transcribe_galgame_whisper,
inputs=audio,
outputs=[output_galgame, time_galgame],
)
# app.load(warmup, inputs=[], outputs=[warmup_result], queue=True)
app.launch(inbrowser=True)
|