Hanzalwi commited on
Commit
80b80f9
1 Parent(s): 822f98d

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94448be3ee9e159274fc92284b623d57bd482afd85eeb548d798acccdcaef741
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e529a33a2e67a7a487f373be6de76cbfec062dc66fafef1000196a7d36357a1a
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cffe7b5300bc6f8f72c53f60b3086ca10c8a391d36a26e319c9f6350b4cf69f3
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd7e63e4f8f17892b3724afc5649b94c0ba4c33bc05e3c617f78d1b06efc703e
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec69c1f9db00275f2bf788ed50aaee351133623fcb893d2824213ebb8d0cd795
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54edf57dba3a9923bec7bf6c66a78fd0774a6a561f9a15bd2287923ae584c11c
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5efc416a6883409dd7ab6f5c779e107c7c2baa7af6e12ed9fbd9dd73b8b20784
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c1a2ac1c11599601eeac95feb1dbfd49ec5c625e61dcce18b3f094491f9cf2d
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.4804445505142212,
3
- "best_model_checkpoint": "./outputs/checkpoint-500",
4
- "epoch": 0.6666666666666666,
5
  "eval_steps": 100,
6
- "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -77,6 +77,20 @@
77
  "eval_samples_per_second": 15.541,
78
  "eval_steps_per_second": 1.949,
79
  "step": 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  }
81
  ],
82
  "logging_steps": 100,
@@ -84,7 +98,7 @@
84
  "num_input_tokens_seen": 0,
85
  "num_train_epochs": 3,
86
  "save_steps": 100,
87
- "total_flos": 2.927413222981632e+16,
88
  "trial_name": null,
89
  "trial_params": null
90
  }
 
1
  {
2
+ "best_metric": 1.4673335552215576,
3
+ "best_model_checkpoint": "./outputs/checkpoint-600",
4
+ "epoch": 0.8,
5
  "eval_steps": 100,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
77
  "eval_samples_per_second": 15.541,
78
  "eval_steps_per_second": 1.949,
79
  "step": 500
80
+ },
81
+ {
82
+ "epoch": 0.8,
83
+ "learning_rate": 0.0002,
84
+ "loss": 1.2327,
85
+ "step": 600
86
+ },
87
+ {
88
+ "epoch": 0.8,
89
+ "eval_loss": 1.4673335552215576,
90
+ "eval_runtime": 93.4173,
91
+ "eval_samples_per_second": 15.532,
92
+ "eval_steps_per_second": 1.948,
93
+ "step": 600
94
  }
95
  ],
96
  "logging_steps": 100,
 
98
  "num_input_tokens_seen": 0,
99
  "num_train_epochs": 3,
100
  "save_steps": 100,
101
+ "total_flos": 3.510926195245056e+16,
102
  "trial_name": null,
103
  "trial_params": null
104
  }