Baselhany commited on
Commit
00690dd
·
verified ·
1 Parent(s): 7ca7f29

Training finished

Browse files
README.md CHANGED
@@ -1,20 +1,28 @@
1
  ---
2
  library_name: transformers
 
 
3
  license: apache-2.0
4
  base_model: openai/whisper-tiny
5
  tags:
6
  - generated_from_trainer
 
 
7
  model-index:
8
- - name: test_basel
9
  results: []
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
15
- # test_basel
16
 
17
- This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on an unknown dataset.
 
 
 
 
18
 
19
  ## Model description
20
 
 
1
  ---
2
  library_name: transformers
3
+ language:
4
+ - ar
5
  license: apache-2.0
6
  base_model: openai/whisper-tiny
7
  tags:
8
  - generated_from_trainer
9
+ metrics:
10
+ - wer
11
  model-index:
12
+ - name: Whisper tiny AR - BH
13
  results: []
14
  ---
15
 
16
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
  should probably proofread and complete it, then remove this comment. -->
18
 
19
+ # Whisper tiny AR - BH
20
 
21
+ This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on the quran-ayat-speech-to-text dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 9.1584
24
+ - Wer: 409.4976
25
+ - Cer: 202.1874
26
 
27
  ## Model description
28
 
runs/Feb13_17-36-49_e9c2a6f87655/events.out.tfevents.1739468399.e9c2a6f87655.31.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:285522fc41fbe6a66ebb78aed2482c525fc7bb3ad815ae30f024b13ec5979a28
3
+ size 446
trainer_state.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 332.2475570032573,
3
+ "best_model_checkpoint": "./temp/checkpoint-10",
4
+ "epoch": 0.9523809523809523,
5
+ "eval_steps": 10,
6
+ "global_step": 15,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.6349206349206349,
13
+ "eval_cer": 186.56571539506893,
14
+ "eval_loss": 9.826007843017578,
15
+ "eval_runtime": 21.7924,
16
+ "eval_samples_per_second": 2.294,
17
+ "eval_steps_per_second": 0.184,
18
+ "eval_wer": 332.2475570032573,
19
+ "step": 10
20
+ },
21
+ {
22
+ "epoch": 0.9523809523809523,
23
+ "step": 15,
24
+ "total_flos": 2.36341297152e+16,
25
+ "train_loss": 9.86193135579427,
26
+ "train_runtime": 128.734,
27
+ "train_samples_per_second": 7.768,
28
+ "train_steps_per_second": 0.117
29
+ }
30
+ ],
31
+ "logging_steps": 50,
32
+ "max_steps": 15,
33
+ "num_input_tokens_seen": 0,
34
+ "num_train_epochs": 1,
35
+ "save_steps": 50,
36
+ "stateful_callbacks": {
37
+ "TrainerControl": {
38
+ "args": {
39
+ "should_epoch_stop": false,
40
+ "should_evaluate": false,
41
+ "should_log": false,
42
+ "should_save": true,
43
+ "should_training_stop": true
44
+ },
45
+ "attributes": {}
46
+ }
47
+ },
48
+ "total_flos": 2.36341297152e+16,
49
+ "train_batch_size": 16,
50
+ "trial_name": null,
51
+ "trial_params": null
52
+ }