Automatic Speech Recognition
Transformers
Safetensors
Japanese
whisper
audio
hf-asr-leaderboard
Eval Results
Inference Endpoints
asahi417 commited on
Commit
6a1eb4a
1 Parent(s): ffbf23c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -205,7 +205,7 @@ pipe = pipeline(
205
  device=device,
206
  )
207
 
208
- # load sample audio (concatenate instances to creaete a long audio)
209
  dataset = load_dataset("japanese-asr/ja_asr.reazonspeech_test", split="test")
210
  dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
211
  sample = {"array": np.concatenate([i["array"] for i in dataset[:20]["audio"]]), "sampling_rate": dataset[0]['audio']['sampling_rate'], "path": "tmp"}
@@ -248,7 +248,7 @@ pipe = pipeline(
248
  device=device,
249
  )
250
 
251
- # load sample audio (concatenate instances to creaete a long audio)
252
  dataset = load_dataset("japanese-asr/ja_asr.reazonspeech_test", split="test")
253
  dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
254
  sample = {"array": np.concatenate([i["array"] for i in dataset[:20]["audio"]]), "sampling_rate": dataset[0]['audio']['sampling_rate'], "path": "tmp"}
 
205
  device=device,
206
  )
207
 
208
+ # load sample audio (concatenate instances to create a long audio)
209
  dataset = load_dataset("japanese-asr/ja_asr.reazonspeech_test", split="test")
210
  dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
211
  sample = {"array": np.concatenate([i["array"] for i in dataset[:20]["audio"]]), "sampling_rate": dataset[0]['audio']['sampling_rate'], "path": "tmp"}
 
248
  device=device,
249
  )
250
 
251
+ # load sample audio (concatenate instances to create a long audio)
252
  dataset = load_dataset("japanese-asr/ja_asr.reazonspeech_test", split="test")
253
  dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
254
  sample = {"array": np.concatenate([i["array"] for i in dataset[:20]["audio"]]), "sampling_rate": dataset[0]['audio']['sampling_rate'], "path": "tmp"}