kooff11 commited on
Commit
7f2a295
·
verified ·
1 Parent(s): 1e5e382

Training in progress, step 26, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5236324e3a7a622b3d3a8c3b4c5e490c95e2b6e344054539dc8e628cd01a41df
3
  size 50503544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25c4471d4c9cb64d09a6a7b30ac9acaaee7bcb2ad736e42b5cae06ec31ff4d3a
3
  size 50503544
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b1139378ee5687e8f880b32c1381b49cc84b4e697788cbff25b2ff8b3d92428
3
  size 25986148
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce7a8feea41db270f56d340882d0d3416c58bd18cc1b291769b71a91d463cd0f
3
  size 25986148
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:010506f5183effa53d7aa8d46a25684a39261fcc416c599950a69030c9f3e88c
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63f4f0d73cfa12954b18866e2bc71834d7bd1e65711fb8787e02fc9fa799dbae
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1022e527da06cbaf638e84a3ed40784692ff247255b0b6df21fb38828d3c2cd9
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b20095ae7e248e019efdc0c5bbc6d09f2da8b05d1a427b4f2fe08d3ff1ff17a2
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5adea6def46a60dd5782726d51a89bdf0f30226cd791e511d8af09a5644f99e2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e65bdb10468d12c8b6afa89fe8730e38a94a4f704431de04b0c1bf27440afce
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.19818961410195332,
5
  "eval_steps": 13,
6
- "global_step": 13,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -114,6 +114,105 @@
114
  "eval_samples_per_second": 39.24,
115
  "eval_steps_per_second": 9.854,
116
  "step": 13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  }
118
  ],
119
  "logging_steps": 1,
@@ -133,7 +232,7 @@
133
  "attributes": {}
134
  }
135
  },
136
- "total_flos": 4.28217409166377e+16,
137
  "train_batch_size": 2,
138
  "trial_name": null,
139
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.39637922820390664,
5
  "eval_steps": 13,
6
+ "global_step": 26,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
114
  "eval_samples_per_second": 39.24,
115
  "eval_steps_per_second": 9.854,
116
  "step": 13
117
+ },
118
+ {
119
+ "epoch": 0.2134349690328728,
120
+ "grad_norm": 0.2040378302335739,
121
+ "learning_rate": 8.535533905932738e-05,
122
+ "loss": 1.0044,
123
+ "step": 14
124
+ },
125
+ {
126
+ "epoch": 0.2286803239637923,
127
+ "grad_norm": 0.2001662701368332,
128
+ "learning_rate": 8.296729075500344e-05,
129
+ "loss": 0.9819,
130
+ "step": 15
131
+ },
132
+ {
133
+ "epoch": 0.24392567889471176,
134
+ "grad_norm": 0.19261719286441803,
135
+ "learning_rate": 8.043807145043604e-05,
136
+ "loss": 0.9881,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.2591710338256313,
141
+ "grad_norm": 0.17622911930084229,
142
+ "learning_rate": 7.777851165098012e-05,
143
+ "loss": 0.9625,
144
+ "step": 17
145
+ },
146
+ {
147
+ "epoch": 0.2744163887565507,
148
+ "grad_norm": 0.15968532860279083,
149
+ "learning_rate": 7.500000000000001e-05,
150
+ "loss": 0.9984,
151
+ "step": 18
152
+ },
153
+ {
154
+ "epoch": 0.2896617436874702,
155
+ "grad_norm": 0.1644553393125534,
156
+ "learning_rate": 7.211443451095007e-05,
157
+ "loss": 0.9865,
158
+ "step": 19
159
+ },
160
+ {
161
+ "epoch": 0.3049070986183897,
162
+ "grad_norm": 0.1669108122587204,
163
+ "learning_rate": 6.91341716182545e-05,
164
+ "loss": 0.9395,
165
+ "step": 20
166
+ },
167
+ {
168
+ "epoch": 0.3201524535493092,
169
+ "grad_norm": 0.17810705304145813,
170
+ "learning_rate": 6.607197326515808e-05,
171
+ "loss": 0.9736,
172
+ "step": 21
173
+ },
174
+ {
175
+ "epoch": 0.33539780848022865,
176
+ "grad_norm": 0.17343126237392426,
177
+ "learning_rate": 6.294095225512603e-05,
178
+ "loss": 0.9803,
179
+ "step": 22
180
+ },
181
+ {
182
+ "epoch": 0.35064316341114815,
183
+ "grad_norm": 0.1609954833984375,
184
+ "learning_rate": 5.9754516100806423e-05,
185
+ "loss": 0.9451,
186
+ "step": 23
187
+ },
188
+ {
189
+ "epoch": 0.36588851834206765,
190
+ "grad_norm": 0.15459877252578735,
191
+ "learning_rate": 5.6526309611002594e-05,
192
+ "loss": 0.9486,
193
+ "step": 24
194
+ },
195
+ {
196
+ "epoch": 0.38113387327298714,
197
+ "grad_norm": 0.21900911629199982,
198
+ "learning_rate": 5.327015646150716e-05,
199
+ "loss": 0.9981,
200
+ "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.39637922820390664,
204
+ "grad_norm": 0.15433238446712494,
205
+ "learning_rate": 5e-05,
206
+ "loss": 0.9631,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.39637922820390664,
211
+ "eval_loss": 0.9836097359657288,
212
+ "eval_runtime": 11.2612,
213
+ "eval_samples_per_second": 39.25,
214
+ "eval_steps_per_second": 9.857,
215
+ "step": 26
216
  }
217
  ],
218
  "logging_steps": 1,
 
232
  "attributes": {}
233
  }
234
  },
235
+ "total_flos": 8.56434818332754e+16,
236
  "train_batch_size": 2,
237
  "trial_name": null,
238
  "trial_params": null