rizkyjun commited on
Commit
8d7020a
1 Parent(s): af0af83

Training in progress, step 800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c041f80349a07291cb4478dd734bf2f05c648266cd7c02cf08754d0dcee6c36b
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6219c6f96c04c63ddbffee5fad2f0dbd62e9616ce460b4ec6c10b49932d48d46
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0d7c52871c5724a8c0d883cf69c5d2adb8d72ef4da19bdfa2acc8471b0743ec5
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d08da7901afe79c41eb68eb63c0e7b7ea08a6cf21789ab0b9527eb44731bb92
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed490e04c6dbbc7419c7365a93701b5d2a1915c9651cc1654bfd578cd611575f
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41eb3cc90529708ec3050e7a2a3e956faee68b39027a1409168255234e541c3d
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30423cc69a1c7d18caef4643847588d5884db012263cbe7b7224fa4719ca4156
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de7840bcb72f2f480fd301578d289cdfa174589e831b0d33e5772f3956b6beae
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.4212682247161865,
3
- "best_model_checkpoint": "./outputs/checkpoint-700",
4
- "epoch": 0.5100182149362478,
5
  "eval_steps": 100,
6
- "global_step": 700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -105,13 +105,27 @@
105
  "eval_samples_per_second": 30.618,
106
  "eval_steps_per_second": 3.831,
107
  "step": 700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  }
109
  ],
110
  "logging_steps": 100,
111
  "max_steps": 4116,
112
  "num_train_epochs": 3,
113
  "save_steps": 100,
114
- "total_flos": 2.055883980423168e+16,
115
  "trial_name": null,
116
  "trial_params": null
117
  }
 
1
  {
2
+ "best_metric": 2.4029204845428467,
3
+ "best_model_checkpoint": "./outputs/checkpoint-800",
4
+ "epoch": 0.5828779599271403,
5
  "eval_steps": 100,
6
+ "global_step": 800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
105
  "eval_samples_per_second": 30.618,
106
  "eval_steps_per_second": 3.831,
107
  "step": 700
108
+ },
109
+ {
110
+ "epoch": 0.58,
111
+ "learning_rate": 0.0002,
112
+ "loss": 2.4062,
113
+ "step": 800
114
+ },
115
+ {
116
+ "epoch": 0.58,
117
+ "eval_loss": 2.4029204845428467,
118
+ "eval_runtime": 205.0316,
119
+ "eval_samples_per_second": 30.6,
120
+ "eval_steps_per_second": 3.829,
121
+ "step": 800
122
  }
123
  ],
124
  "logging_steps": 100,
125
  "max_steps": 4116,
126
  "num_train_epochs": 3,
127
  "save_steps": 100,
128
+ "total_flos": 2.349160133935104e+16,
129
  "trial_name": null,
130
  "trial_params": null
131
  }