leixa commited on
Commit
d35dee4
·
verified ·
1 Parent(s): 17915d0

Training in progress, step 462, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:206183a2c99fca5965a2c3ad9629779d179ddd58a11f46227151b1b3c3c15bb7
3
  size 201892112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd9ad15d29deb18d7163bbcaaba1f33909384d83d67f8c9f379c9aa241eb789b
3
  size 201892112
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:870b066dde9df96bf56be9482f9ed2fef198f645980e68d46f8741963c7929a8
3
  size 102864868
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c27e39946f3eacdee6bd6c425f1858f6ace622df8d4e7314e81174ca9b2209b
3
  size 102864868
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebf598a3ab84e12b7b9b0f1cf713464c6133fe6762dd3be870c678737b1f8aaa
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f36562187cf3b52a09213050f690637853db506cc488d604d4a5bde81a2629d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9507fdefdfac1d5dbc7a23a3aeb675b6dd3cc22a3762b7e85ff02a1c9c43105
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b58b44a2d5024ddc12e64ead45d5d25c7fc985d9aaeb44c7bc3de9b8cf56f23
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0880410858400587,
5
  "eval_steps": 42,
6
- "global_step": 420,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1075,6 +1075,112 @@
1075
  "eval_samples_per_second": 48.659,
1076
  "eval_steps_per_second": 6.086,
1077
  "step": 420
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078
  }
1079
  ],
1080
  "logging_steps": 3,
@@ -1094,7 +1200,7 @@
1094
  "attributes": {}
1095
  }
1096
  },
1097
- "total_flos": 8.959220980383744e+16,
1098
  "train_batch_size": 8,
1099
  "trial_name": null,
1100
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09684519442406456,
5
  "eval_steps": 42,
6
+ "global_step": 462,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1075
  "eval_samples_per_second": 48.659,
1076
  "eval_steps_per_second": 6.086,
1077
  "step": 420
1078
+ },
1079
+ {
1080
+ "epoch": 0.08866995073891626,
1081
+ "grad_norm": 0.4187450110912323,
1082
+ "learning_rate": 5.9702234071631e-06,
1083
+ "loss": 1.0578,
1084
+ "step": 423
1085
+ },
1086
+ {
1087
+ "epoch": 0.08929881563777382,
1088
+ "grad_norm": 0.3692411780357361,
1089
+ "learning_rate": 5.5226705990794155e-06,
1090
+ "loss": 1.0522,
1091
+ "step": 426
1092
+ },
1093
+ {
1094
+ "epoch": 0.08992768053663137,
1095
+ "grad_norm": 0.4078660309314728,
1096
+ "learning_rate": 5.091571939329048e-06,
1097
+ "loss": 1.0434,
1098
+ "step": 429
1099
+ },
1100
+ {
1101
+ "epoch": 0.09055654543548894,
1102
+ "grad_norm": 0.4134742319583893,
1103
+ "learning_rate": 4.677086910538092e-06,
1104
+ "loss": 1.074,
1105
+ "step": 432
1106
+ },
1107
+ {
1108
+ "epoch": 0.0911854103343465,
1109
+ "grad_norm": 0.36238303780555725,
1110
+ "learning_rate": 4.279368849209381e-06,
1111
+ "loss": 1.0455,
1112
+ "step": 435
1113
+ },
1114
+ {
1115
+ "epoch": 0.09181427523320407,
1116
+ "grad_norm": 0.4054090976715088,
1117
+ "learning_rate": 3.898564888996476e-06,
1118
+ "loss": 1.0706,
1119
+ "step": 438
1120
+ },
1121
+ {
1122
+ "epoch": 0.09244314013206163,
1123
+ "grad_norm": 0.3747381567955017,
1124
+ "learning_rate": 3.534815906272404e-06,
1125
+ "loss": 1.0354,
1126
+ "step": 441
1127
+ },
1128
+ {
1129
+ "epoch": 0.0930720050309192,
1130
+ "grad_norm": 0.3801961839199066,
1131
+ "learning_rate": 3.18825646801314e-06,
1132
+ "loss": 1.0818,
1133
+ "step": 444
1134
+ },
1135
+ {
1136
+ "epoch": 0.09370086992977675,
1137
+ "grad_norm": 0.3710772395133972,
1138
+ "learning_rate": 2.8590147820153513e-06,
1139
+ "loss": 1.0605,
1140
+ "step": 447
1141
+ },
1142
+ {
1143
+ "epoch": 0.09432973482863431,
1144
+ "grad_norm": 0.3717288672924042,
1145
+ "learning_rate": 2.547212649466568e-06,
1146
+ "loss": 1.0153,
1147
+ "step": 450
1148
+ },
1149
+ {
1150
+ "epoch": 0.09495859972749188,
1151
+ "grad_norm": 0.370351105928421,
1152
+ "learning_rate": 2.2529654198854835e-06,
1153
+ "loss": 1.037,
1154
+ "step": 453
1155
+ },
1156
+ {
1157
+ "epoch": 0.09558746462634944,
1158
+ "grad_norm": 0.38800954818725586,
1159
+ "learning_rate": 1.9763819484490355e-06,
1160
+ "loss": 1.0668,
1161
+ "step": 456
1162
+ },
1163
+ {
1164
+ "epoch": 0.096216329525207,
1165
+ "grad_norm": 0.39085131883621216,
1166
+ "learning_rate": 1.7175645557220566e-06,
1167
+ "loss": 1.0601,
1168
+ "step": 459
1169
+ },
1170
+ {
1171
+ "epoch": 0.09684519442406456,
1172
+ "grad_norm": 0.38424739241600037,
1173
+ "learning_rate": 1.4766089898042678e-06,
1174
+ "loss": 1.0742,
1175
+ "step": 462
1176
+ },
1177
+ {
1178
+ "epoch": 0.09684519442406456,
1179
+ "eval_loss": 1.0629161596298218,
1180
+ "eval_runtime": 165.0993,
1181
+ "eval_samples_per_second": 48.668,
1182
+ "eval_steps_per_second": 6.087,
1183
+ "step": 462
1184
  }
1185
  ],
1186
  "logging_steps": 3,
 
1200
  "attributes": {}
1201
  }
1202
  },
1203
+ "total_flos": 9.855143078422118e+16,
1204
  "train_batch_size": 8,
1205
  "trial_name": null,
1206
  "trial_params": null