rizkyjun commited on
Commit
92dab1a
1 Parent(s): 51c435b

Training in progress, step 800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b48d88d84faf01890db2155447ac959a5885868161d2be46584cc6de411e88dc
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2288a6d7dd0d9e6860ac8e0acee99e2cf343cec35c24fdf3cd5be8ce74b722e0
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b9a6ce0cdf232b7504a6caca88c93917f9db12c54afd8275db3c49218493c0b
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23f4516aa318f0574d42b4b412aa4e98184e2b85a2b7f79dd8cdd1a764d084b9
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4933eabcc84eee1438f9d6f8169b5cecd886b53e4de0afe6e5bf4f525371b890
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6691656f59836e679b0b16a9e5fb6259737616134b8601994c32a922f156d9a2
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30423cc69a1c7d18caef4643847588d5884db012263cbe7b7224fa4719ca4156
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de7840bcb72f2f480fd301578d289cdfa174589e831b0d33e5772f3956b6beae
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.527393341064453,
3
- "best_model_checkpoint": "./outputs/checkpoint-700",
4
- "epoch": 0.5100182149362478,
5
  "eval_steps": 100,
6
- "global_step": 700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -105,13 +105,27 @@
105
  "eval_samples_per_second": 30.633,
106
  "eval_steps_per_second": 3.833,
107
  "step": 700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  }
109
  ],
110
  "logging_steps": 100,
111
  "max_steps": 4116,
112
  "num_train_epochs": 3,
113
  "save_steps": 100,
114
- "total_flos": 1.993348357668864e+16,
115
  "trial_name": null,
116
  "trial_params": null
117
  }
 
1
  {
2
+ "best_metric": 2.506849527359009,
3
+ "best_model_checkpoint": "./outputs/checkpoint-800",
4
+ "epoch": 0.5828779599271403,
5
  "eval_steps": 100,
6
+ "global_step": 800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
105
  "eval_samples_per_second": 30.633,
106
  "eval_steps_per_second": 3.833,
107
  "step": 700
108
+ },
109
+ {
110
+ "epoch": 0.58,
111
+ "learning_rate": 0.0002,
112
+ "loss": 2.5124,
113
+ "step": 800
114
+ },
115
+ {
116
+ "epoch": 0.58,
117
+ "eval_loss": 2.506849527359009,
118
+ "eval_runtime": 204.0454,
119
+ "eval_samples_per_second": 30.748,
120
+ "eval_steps_per_second": 3.847,
121
+ "step": 800
122
  }
123
  ],
124
  "logging_steps": 100,
125
  "max_steps": 4116,
126
  "num_train_epochs": 3,
127
  "save_steps": 100,
128
+ "total_flos": 2.277701378039808e+16,
129
  "trial_name": null,
130
  "trial_params": null
131
  }