End of training
Browse files- README.md +16 -16
- config.json +1 -1
- generation_config.json +1 -1
- model.safetensors +1 -1
- runs/Jul06_13-20-54_ebda98d46a64/events.out.tfevents.1720272062.ebda98d46a64.601.0 +3 -0
- training_args.bin +2 -2
README.md
CHANGED
@@ -6,7 +6,7 @@ base_model: openai/whisper-small.en
|
|
6 |
tags:
|
7 |
- generated_from_trainer
|
8 |
datasets:
|
9 |
-
- bika5/
|
10 |
metrics:
|
11 |
- wer
|
12 |
model-index:
|
@@ -16,13 +16,13 @@ model-index:
|
|
16 |
name: Automatic Speech Recognition
|
17 |
type: automatic-speech-recognition
|
18 |
dataset:
|
19 |
-
name:
|
20 |
-
type: bika5/
|
21 |
args: 'config: en, split: test'
|
22 |
metrics:
|
23 |
- name: Wer
|
24 |
type: wer
|
25 |
-
value:
|
26 |
---
|
27 |
|
28 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
@@ -30,10 +30,11 @@ should probably proofread and complete it, then remove this comment. -->
|
|
30 |
|
31 |
# Whisper pfe - bika5
|
32 |
|
33 |
-
This model is a fine-tuned version of [openai/whisper-small.en](https://huggingface.co/openai/whisper-small.en) on the
|
34 |
It achieves the following results on the evaluation set:
|
35 |
-
- Loss:
|
36 |
-
-
|
|
|
37 |
|
38 |
## Model description
|
39 |
|
@@ -59,21 +60,20 @@ The following hyperparameters were used during training:
|
|
59 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
60 |
- lr_scheduler_type: linear
|
61 |
- lr_scheduler_warmup_steps: 500
|
62 |
-
- training_steps:
|
63 |
- mixed_precision_training: Native AMP
|
64 |
|
65 |
### Training results
|
66 |
|
67 |
-
| Training Loss | Epoch
|
68 |
-
|
69 |
-
| 0.0001 |
|
70 |
-
| 0.
|
71 |
-
| 0.0 | 1500.0 | 3000 | 1.8848 | 78.7611 |
|
72 |
|
73 |
|
74 |
### Framework versions
|
75 |
|
76 |
-
- Transformers 4.
|
77 |
-
- Pytorch 2.
|
78 |
-
- Datasets 2.
|
79 |
- Tokenizers 0.19.1
|
|
|
6 |
tags:
|
7 |
- generated_from_trainer
|
8 |
datasets:
|
9 |
+
- bika5/pfedrax
|
10 |
metrics:
|
11 |
- wer
|
12 |
model-index:
|
|
|
16 |
name: Automatic Speech Recognition
|
17 |
type: automatic-speech-recognition
|
18 |
dataset:
|
19 |
+
name: pfedrax
|
20 |
+
type: bika5/pfedrax
|
21 |
args: 'config: en, split: test'
|
22 |
metrics:
|
23 |
- name: Wer
|
24 |
type: wer
|
25 |
+
value: 92.92035398230088
|
26 |
---
|
27 |
|
28 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
|
|
30 |
|
31 |
# Whisper pfe - bika5
|
32 |
|
33 |
+
This model is a fine-tuned version of [openai/whisper-small.en](https://huggingface.co/openai/whisper-small.en) on the pfedrax dataset.
|
34 |
It achieves the following results on the evaluation set:
|
35 |
+
- Loss: 3.6101
|
36 |
+
- Model Preparation Time: 0.006
|
37 |
+
- Wer: 92.9204
|
38 |
|
39 |
## Model description
|
40 |
|
|
|
60 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
61 |
- lr_scheduler_type: linear
|
62 |
- lr_scheduler_warmup_steps: 500
|
63 |
+
- training_steps: 2000
|
64 |
- mixed_precision_training: Native AMP
|
65 |
|
66 |
### Training results
|
67 |
|
68 |
+
| Training Loss | Epoch | Step | Validation Loss | Model Preparation Time | Wer |
|
69 |
+
|:-------------:|:--------:|:----:|:---------------:|:----------------------:|:-------:|
|
70 |
+
| 0.0001 | 83.3333 | 1000 | 3.5216 | 0.006 | 92.9204 |
|
71 |
+
| 0.0 | 166.6667 | 2000 | 3.6101 | 0.006 | 92.9204 |
|
|
|
72 |
|
73 |
|
74 |
### Framework versions
|
75 |
|
76 |
+
- Transformers 4.43.0.dev0
|
77 |
+
- Pytorch 2.3.0+cu121
|
78 |
+
- Datasets 2.20.0
|
79 |
- Tokenizers 0.19.1
|
config.json
CHANGED
@@ -45,7 +45,7 @@
|
|
45 |
"scale_embedding": false,
|
46 |
"suppress_tokens": [],
|
47 |
"torch_dtype": "float32",
|
48 |
-
"transformers_version": "4.
|
49 |
"use_cache": true,
|
50 |
"use_weighted_layer_sum": false,
|
51 |
"vocab_size": 51864
|
|
|
45 |
"scale_embedding": false,
|
46 |
"suppress_tokens": [],
|
47 |
"torch_dtype": "float32",
|
48 |
+
"transformers_version": "4.43.0.dev0",
|
49 |
"use_cache": true,
|
50 |
"use_weighted_layer_sum": false,
|
51 |
"vocab_size": 51864
|
generation_config.json
CHANGED
@@ -188,5 +188,5 @@
|
|
188 |
50360,
|
189 |
50361
|
190 |
],
|
191 |
-
"transformers_version": "4.
|
192 |
}
|
|
|
188 |
50360,
|
189 |
50361
|
190 |
],
|
191 |
+
"transformers_version": "4.43.0.dev0"
|
192 |
}
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 966992008
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e5a0d7f01d6ff21c0585402b9608c1231b370a7ba3b74ecf18c92934d01242d8
|
3 |
size 966992008
|
runs/Jul06_13-20-54_ebda98d46a64/events.out.tfevents.1720272062.ebda98d46a64.601.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec1f38234dd6894395208c8bdbf5a1f4d439fda56270ac0e98b63476b003835a
|
3 |
+
size 24051
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aef6cf43118ff1463051f283573d7eab92f26727df7edf9c64a3a5877bc9dd8f
|
3 |
+
size 5304
|