rizkyjun commited on
Commit
7abd80e
1 Parent(s): c2268ac

Training in progress, step 800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8725196c2f79209246c12a790ee169423016258c3022f5f27e4fea285bf1cb3e
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43da30e397164c844764dda7a20e99bb190dd6e3c5b7e98c8e13f16599f27015
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ddb7433355ac0f8a2d80f5c7bb5e3d022707d482f7f0280b8af45643c6de474e
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cbed1dd5d4230da5bd0ad3d43d81e8c121a3bd9f38955b269a3df6875150b56
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed490e04c6dbbc7419c7365a93701b5d2a1915c9651cc1654bfd578cd611575f
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41eb3cc90529708ec3050e7a2a3e956faee68b39027a1409168255234e541c3d
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30423cc69a1c7d18caef4643847588d5884db012263cbe7b7224fa4719ca4156
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de7840bcb72f2f480fd301578d289cdfa174589e831b0d33e5772f3956b6beae
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.4199516773223877,
3
- "best_model_checkpoint": "./outputs/checkpoint-700",
4
- "epoch": 0.5100182149362478,
5
  "eval_steps": 100,
6
- "global_step": 700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -105,13 +105,27 @@
105
  "eval_samples_per_second": 30.444,
106
  "eval_steps_per_second": 3.809,
107
  "step": 700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  }
109
  ],
110
  "logging_steps": 100,
111
  "max_steps": 4116,
112
  "num_train_epochs": 3,
113
  "save_steps": 100,
114
- "total_flos": 2.055883980423168e+16,
115
  "trial_name": null,
116
  "trial_params": null
117
  }
 
1
  {
2
+ "best_metric": 2.399696111679077,
3
+ "best_model_checkpoint": "./outputs/checkpoint-800",
4
+ "epoch": 0.5828779599271403,
5
  "eval_steps": 100,
6
+ "global_step": 800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
105
  "eval_samples_per_second": 30.444,
106
  "eval_steps_per_second": 3.809,
107
  "step": 700
108
+ },
109
+ {
110
+ "epoch": 0.58,
111
+ "learning_rate": 0.0002,
112
+ "loss": 2.4041,
113
+ "step": 800
114
+ },
115
+ {
116
+ "epoch": 0.58,
117
+ "eval_loss": 2.399696111679077,
118
+ "eval_runtime": 205.7858,
119
+ "eval_samples_per_second": 30.488,
120
+ "eval_steps_per_second": 3.815,
121
+ "step": 800
122
  }
123
  ],
124
  "logging_steps": 100,
125
  "max_steps": 4116,
126
  "num_train_epochs": 3,
127
  "save_steps": 100,
128
+ "total_flos": 2.349160133935104e+16,
129
  "trial_name": null,
130
  "trial_params": null
131
  }