Spaces:
Running
on
Zero
Running
on
Zero
Fix return_timestamps
Browse files
app.py
CHANGED
@@ -17,7 +17,7 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
17 |
print(f"Using device: {device}")
|
18 |
|
19 |
@spaces.GPU(duration=120)
|
20 |
-
def pipe(file):
|
21 |
asr = pipeline(
|
22 |
task="automatic-speech-recognition",
|
23 |
model=MODEL_NAME,
|
@@ -26,7 +26,7 @@ def pipe(file):
|
|
26 |
token=auth_token,
|
27 |
)
|
28 |
asr.model.config.forced_decoder_ids = asr.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")
|
29 |
-
return asr(file)
|
30 |
|
31 |
def transcribe(file, return_timestamps=False):
|
32 |
if not return_timestamps:
|
|
|
17 |
print(f"Using device: {device}")
|
18 |
|
19 |
@spaces.GPU(duration=120)
|
20 |
+
def pipe(file, return_timestamps=False):
|
21 |
asr = pipeline(
|
22 |
task="automatic-speech-recognition",
|
23 |
model=MODEL_NAME,
|
|
|
26 |
token=auth_token,
|
27 |
)
|
28 |
asr.model.config.forced_decoder_ids = asr.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")
|
29 |
+
return asr(file, return_timestamps=return_timestamps)
|
30 |
|
31 |
def transcribe(file, return_timestamps=False):
|
32 |
if not return_timestamps:
|