End of training
Browse files- README.md +3 -3
- all_results.json +15 -0
- eval_results.json +9 -0
- train_results.json +9 -0
- trainer_state.json +0 -0
README.md
CHANGED
@@ -14,10 +14,10 @@ should probably proofread and complete it, then remove this comment. -->
|
|
14 |
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/pszemraj/eduscore-regression/runs/04oc07hx)
|
15 |
# bigbird-roberta-base-fineweb-edu-llama3-annotations-4096-vN
|
16 |
|
17 |
-
This model is a fine-tuned version of [google/bigbird-roberta-base](https://huggingface.co/google/bigbird-roberta-base) on
|
18 |
It achieves the following results on the evaluation set:
|
19 |
-
- Loss: 0.
|
20 |
-
- Mse: 0.
|
21 |
|
22 |
## Model description
|
23 |
|
|
|
14 |
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/pszemraj/eduscore-regression/runs/04oc07hx)
|
15 |
# bigbird-roberta-base-fineweb-edu-llama3-annotations-4096-vN
|
16 |
|
17 |
+
This model is a fine-tuned version of [google/bigbird-roberta-base](https://huggingface.co/google/bigbird-roberta-base) on the HuggingFaceFW/fineweb-edu-llama3-annotations dataset.
|
18 |
It achieves the following results on the evaluation set:
|
19 |
+
- Loss: 0.2176
|
20 |
+
- Mse: 0.2176
|
21 |
|
22 |
## Model description
|
23 |
|
all_results.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 0.999954960229883,
|
3 |
+
"eval_loss": 0.2176452875137329,
|
4 |
+
"eval_mse": 0.21764529895741724,
|
5 |
+
"eval_runtime": 64.1346,
|
6 |
+
"eval_samples": 1000,
|
7 |
+
"eval_samples_per_second": 15.592,
|
8 |
+
"eval_steps_per_second": 3.898,
|
9 |
+
"total_flos": 9.410742865058857e+17,
|
10 |
+
"train_loss": 0.2716184546260883,
|
11 |
+
"train_runtime": 39469.9974,
|
12 |
+
"train_samples": 444052,
|
13 |
+
"train_samples_per_second": 11.25,
|
14 |
+
"train_steps_per_second": 0.088
|
15 |
+
}
|
eval_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 0.999954960229883,
|
3 |
+
"eval_loss": 0.2176452875137329,
|
4 |
+
"eval_mse": 0.21764529895741724,
|
5 |
+
"eval_runtime": 64.1346,
|
6 |
+
"eval_samples": 1000,
|
7 |
+
"eval_samples_per_second": 15.592,
|
8 |
+
"eval_steps_per_second": 3.898
|
9 |
+
}
|
train_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 0.999954960229883,
|
3 |
+
"total_flos": 9.410742865058857e+17,
|
4 |
+
"train_loss": 0.2716184546260883,
|
5 |
+
"train_runtime": 39469.9974,
|
6 |
+
"train_samples": 444052,
|
7 |
+
"train_samples_per_second": 11.25,
|
8 |
+
"train_steps_per_second": 0.088
|
9 |
+
}
|
trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|