ivangrapher commited on
Commit
963e25a
·
verified ·
1 Parent(s): 5c847b9

Training in progress, step 30, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:289092b7402162660e501cb3bf1a85d76106a518b9f1ff0b7d37db3e33702bdc
3
  size 503884594
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:173f30dc58dab74d2bea2343bc2c34154dcb7c1f879d81a28b92b1980e87e538
3
  size 503884594
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:140af42b3dd3ce72edf59502c4890f5e5ea80c1962733616ffe90a60e5476d07
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8499756812b26ce8f2ce9d439126e69f58899480f58682d1e03352b776e532e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef527702b4e2147a500c0b29beb26751d245ebb60338b3910b449b6549ac4d59
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2def2cd24154d8cecbaa07c36ae27e5ebb9b7273a78abfea27aa67c480e4ae2b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.02631578947368421,
5
  "eval_steps": 5,
6
- "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -74,6 +74,65 @@
74
  "eval_samples_per_second": 4.808,
75
  "eval_steps_per_second": 2.404,
76
  "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  }
78
  ],
79
  "logging_steps": 3,
@@ -88,12 +147,12 @@
88
  "should_evaluate": false,
89
  "should_log": false,
90
  "should_save": true,
91
- "should_training_stop": false
92
  },
93
  "attributes": {}
94
  }
95
  },
96
- "total_flos": 1.4544490891051008e+16,
97
  "train_batch_size": 2,
98
  "trial_name": null,
99
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.05263157894736842,
5
  "eval_steps": 5,
6
+ "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
74
  "eval_samples_per_second": 4.808,
75
  "eval_steps_per_second": 2.404,
76
  "step": 15
77
+ },
78
+ {
79
+ "epoch": 0.031578947368421054,
80
+ "grad_norm": NaN,
81
+ "learning_rate": 0.00018090169943749476,
82
+ "loss": 0.0,
83
+ "step": 18
84
+ },
85
+ {
86
+ "epoch": 0.03508771929824561,
87
+ "eval_loss": NaN,
88
+ "eval_runtime": 49.9139,
89
+ "eval_samples_per_second": 4.808,
90
+ "eval_steps_per_second": 2.404,
91
+ "step": 20
92
+ },
93
+ {
94
+ "epoch": 0.03684210526315789,
95
+ "grad_norm": NaN,
96
+ "learning_rate": 0.00013090169943749476,
97
+ "loss": 0.0,
98
+ "step": 21
99
+ },
100
+ {
101
+ "epoch": 0.042105263157894736,
102
+ "grad_norm": NaN,
103
+ "learning_rate": 6.909830056250527e-05,
104
+ "loss": 0.0,
105
+ "step": 24
106
+ },
107
+ {
108
+ "epoch": 0.043859649122807015,
109
+ "eval_loss": NaN,
110
+ "eval_runtime": 49.9239,
111
+ "eval_samples_per_second": 4.807,
112
+ "eval_steps_per_second": 2.404,
113
+ "step": 25
114
+ },
115
+ {
116
+ "epoch": 0.04736842105263158,
117
+ "grad_norm": NaN,
118
+ "learning_rate": 1.9098300562505266e-05,
119
+ "loss": 0.0,
120
+ "step": 27
121
+ },
122
+ {
123
+ "epoch": 0.05263157894736842,
124
+ "grad_norm": NaN,
125
+ "learning_rate": 0.0,
126
+ "loss": 0.0,
127
+ "step": 30
128
+ },
129
+ {
130
+ "epoch": 0.05263157894736842,
131
+ "eval_loss": NaN,
132
+ "eval_runtime": 49.9657,
133
+ "eval_samples_per_second": 4.803,
134
+ "eval_steps_per_second": 2.402,
135
+ "step": 30
136
  }
137
  ],
138
  "logging_steps": 3,
 
147
  "should_evaluate": false,
148
  "should_log": false,
149
  "should_save": true,
150
+ "should_training_stop": true
151
  },
152
  "attributes": {}
153
  }
154
  },
155
+ "total_flos": 2.9088981782102016e+16,
156
  "train_batch_size": 2,
157
  "trial_name": null,
158
  "trial_params": null