lesso01 commited on
Commit
c71ff86
·
verified ·
1 Parent(s): 836ee04

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bea408bee92d56e6e10dd66b1022433f1a020ce3c8f307d3d39b037aa96a1c4e
3
  size 48679352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:156f79d9d2a088dacaa1528b970cdc9812b62dd113d4f3c0fee58d8ec80586e0
3
  size 48679352
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b412f3b1951d17ac8ae88b5821599182640f8e978fea0494972d8483947de2b1
3
  size 25152500
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e379ca099be724d74a63fbc1cf6b3102b5e5587da085cf79691c84a7d4383eb
3
  size 25152500
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbc745c9c1c891b528c4bc43acca5683d7c2b2f8d02a3b49911a8e0a63034a9b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0057f81d6fb413d17eb7fd106b72832ac1dd0fe44d33531d88d739c1a7f21ccc
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d95f3fb1f9df4cdd5f470d1684a242bf1d1940d47b9622802f603a91ffa5bc0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-10",
4
- "epoch": 0.011025358324145534,
5
  "eval_steps": 5,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,92 @@
101
  "eval_samples_per_second": 7.114,
102
  "eval_steps_per_second": 3.557,
103
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 1,
@@ -115,7 +201,7 @@
115
  "early_stopping_threshold": 0.0
116
  },
117
  "attributes": {
118
- "early_stopping_patience_counter": 0
119
  }
120
  },
121
  "TrainerControl": {
@@ -124,12 +210,12 @@
124
  "should_evaluate": false,
125
  "should_log": false,
126
  "should_save": true,
127
- "should_training_stop": false
128
  },
129
  "attributes": {}
130
  }
131
  },
132
- "total_flos": 1391446091366400.0,
133
  "train_batch_size": 2,
134
  "trial_name": null,
135
  "trial_params": null
 
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-10",
4
+ "epoch": 0.022050716648291068,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 7.114,
102
  "eval_steps_per_second": 3.557,
103
  "step": 10
104
+ },
105
+ {
106
+ "epoch": 0.012127894156560088,
107
+ "grad_norm": 1.2060033082962036,
108
+ "learning_rate": 0.00019781476007338058,
109
+ "loss": 1.7359,
110
+ "step": 11
111
+ },
112
+ {
113
+ "epoch": 0.013230429988974642,
114
+ "grad_norm": 1.1315321922302246,
115
+ "learning_rate": 0.0001913545457642601,
116
+ "loss": 1.5669,
117
+ "step": 12
118
+ },
119
+ {
120
+ "epoch": 0.014332965821389196,
121
+ "grad_norm": 1.1741143465042114,
122
+ "learning_rate": 0.00018090169943749476,
123
+ "loss": 1.606,
124
+ "step": 13
125
+ },
126
+ {
127
+ "epoch": 0.015435501653803748,
128
+ "grad_norm": NaN,
129
+ "learning_rate": 0.00016691306063588583,
130
+ "loss": 2.3572,
131
+ "step": 14
132
+ },
133
+ {
134
+ "epoch": 0.016538037486218304,
135
+ "grad_norm": 0.6324079036712646,
136
+ "learning_rate": 0.00015000000000000001,
137
+ "loss": 1.4557,
138
+ "step": 15
139
+ },
140
+ {
141
+ "epoch": 0.016538037486218304,
142
+ "eval_loss": NaN,
143
+ "eval_runtime": 53.6318,
144
+ "eval_samples_per_second": 7.123,
145
+ "eval_steps_per_second": 3.561,
146
+ "step": 15
147
+ },
148
+ {
149
+ "epoch": 0.017640573318632856,
150
+ "grad_norm": 0.648357093334198,
151
+ "learning_rate": 0.00013090169943749476,
152
+ "loss": 1.5305,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.018743109151047408,
157
+ "grad_norm": 0.5638170838356018,
158
+ "learning_rate": 0.00011045284632676536,
159
+ "loss": 1.386,
160
+ "step": 17
161
+ },
162
+ {
163
+ "epoch": 0.019845644983461964,
164
+ "grad_norm": 0.7388977408409119,
165
+ "learning_rate": 8.954715367323468e-05,
166
+ "loss": 1.3905,
167
+ "step": 18
168
+ },
169
+ {
170
+ "epoch": 0.020948180815876516,
171
+ "grad_norm": NaN,
172
+ "learning_rate": 6.909830056250527e-05,
173
+ "loss": 2.1132,
174
+ "step": 19
175
+ },
176
+ {
177
+ "epoch": 0.022050716648291068,
178
+ "grad_norm": 0.5183130502700806,
179
+ "learning_rate": 5.000000000000002e-05,
180
+ "loss": 1.3396,
181
+ "step": 20
182
+ },
183
+ {
184
+ "epoch": 0.022050716648291068,
185
+ "eval_loss": NaN,
186
+ "eval_runtime": 53.1373,
187
+ "eval_samples_per_second": 7.189,
188
+ "eval_steps_per_second": 3.594,
189
+ "step": 20
190
  }
191
  ],
192
  "logging_steps": 1,
 
201
  "early_stopping_threshold": 0.0
202
  },
203
  "attributes": {
204
+ "early_stopping_patience_counter": 2
205
  }
206
  },
207
  "TrainerControl": {
 
210
  "should_evaluate": false,
211
  "should_log": false,
212
  "should_save": true,
213
+ "should_training_stop": true
214
  },
215
  "attributes": {}
216
  }
217
  },
218
+ "total_flos": 2782892182732800.0,
219
  "train_batch_size": 2,
220
  "trial_name": null,
221
  "trial_params": null