nomsgadded commited on
Commit
a764d9f
1 Parent(s): ba7457a

End of training

Browse files
Files changed (5) hide show
  1. README.md +2 -2
  2. all_results.json +15 -0
  3. eval_results.json +10 -0
  4. train_results.json +8 -0
  5. trainer_state.json +79 -0
README.md CHANGED
@@ -14,7 +14,7 @@ model-index:
14
  name: Causal Language Modeling
15
  type: text-generation
16
  dataset:
17
- name: wikitext
18
  type: wikitext
19
  config: wikitext-2-raw-v1
20
  split: validation
@@ -30,7 +30,7 @@ should probably proofread and complete it, then remove this comment. -->
30
 
31
  # clm
32
 
33
- This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the wikitext dataset.
34
  It achieves the following results on the evaluation set:
35
  - Loss: 3.4802
36
  - Accuracy: 0.3719
 
14
  name: Causal Language Modeling
15
  type: text-generation
16
  dataset:
17
+ name: wikitext wikitext-2-raw-v1
18
  type: wikitext
19
  config: wikitext-2-raw-v1
20
  split: validation
 
30
 
31
  # clm
32
 
33
+ This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the wikitext wikitext-2-raw-v1 dataset.
34
  It achieves the following results on the evaluation set:
35
  - Loss: 3.4802
36
  - Accuracy: 0.3719
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.99,
3
+ "eval_accuracy": 0.37187601824698596,
4
+ "eval_loss": 3.4801909923553467,
5
+ "eval_runtime": 104.7921,
6
+ "eval_samples": 240,
7
+ "eval_samples_per_second": 2.29,
8
+ "eval_steps_per_second": 0.286,
9
+ "perplexity": 32.46592222670883,
10
+ "train_loss": 3.7061044375101724,
11
+ "train_runtime": 2296.6495,
12
+ "train_samples": 2318,
13
+ "train_samples_per_second": 1.009,
14
+ "train_steps_per_second": 0.031
15
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.99,
3
+ "eval_accuracy": 0.37187601824698596,
4
+ "eval_loss": 3.4801909923553467,
5
+ "eval_runtime": 104.7921,
6
+ "eval_samples": 240,
7
+ "eval_samples_per_second": 2.29,
8
+ "eval_steps_per_second": 0.286,
9
+ "perplexity": 32.46592222670883
10
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.99,
3
+ "train_loss": 3.7061044375101724,
4
+ "train_runtime": 2296.6495,
5
+ "train_samples": 2318,
6
+ "train_samples_per_second": 1.009,
7
+ "train_steps_per_second": 0.031
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.37187601824698596,
3
+ "best_model_checkpoint": ".\\output\\checkpoint-72",
4
+ "epoch": 0.993103448275862,
5
+ "eval_steps": 500,
6
+ "global_step": 72,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.14,
13
+ "learning_rate": 2.90625e-05,
14
+ "loss": 3.9372,
15
+ "step": 10
16
+ },
17
+ {
18
+ "epoch": 0.28,
19
+ "learning_rate": 2.4375e-05,
20
+ "loss": 3.76,
21
+ "step": 20
22
+ },
23
+ {
24
+ "epoch": 0.41,
25
+ "learning_rate": 1.96875e-05,
26
+ "loss": 3.6869,
27
+ "step": 30
28
+ },
29
+ {
30
+ "epoch": 0.55,
31
+ "learning_rate": 1.5e-05,
32
+ "loss": 3.6689,
33
+ "step": 40
34
+ },
35
+ {
36
+ "epoch": 0.69,
37
+ "learning_rate": 1.03125e-05,
38
+ "loss": 3.6483,
39
+ "step": 50
40
+ },
41
+ {
42
+ "epoch": 0.83,
43
+ "learning_rate": 5.625e-06,
44
+ "loss": 3.6523,
45
+ "step": 60
46
+ },
47
+ {
48
+ "epoch": 0.97,
49
+ "learning_rate": 9.375e-07,
50
+ "loss": 3.6158,
51
+ "step": 70
52
+ },
53
+ {
54
+ "epoch": 0.99,
55
+ "eval_accuracy": 0.37187601824698596,
56
+ "eval_loss": 3.4801909923553467,
57
+ "eval_runtime": 101.4158,
58
+ "eval_samples_per_second": 2.366,
59
+ "eval_steps_per_second": 0.296,
60
+ "step": 72
61
+ },
62
+ {
63
+ "epoch": 0.99,
64
+ "step": 72,
65
+ "total_flos": 602027713363968.0,
66
+ "train_loss": 3.7061044375101724,
67
+ "train_runtime": 2296.6495,
68
+ "train_samples_per_second": 1.009,
69
+ "train_steps_per_second": 0.031
70
+ }
71
+ ],
72
+ "logging_steps": 10,
73
+ "max_steps": 72,
74
+ "num_train_epochs": 1,
75
+ "save_steps": 100.0,
76
+ "total_flos": 602027713363968.0,
77
+ "trial_name": null,
78
+ "trial_params": null
79
+ }