Hanzalwi commited on
Commit
f6e7d76
1 Parent(s): 7e588ae

Training in progress, step 400, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:376b5eddbde07923e78c8d3161769842db2a582947a1c1c862f0d652245ffb1d
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f4bb689bd65373ef6717dc634345f6cc7c0c2626a9a5a94183aae7778d8c17f
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92c96ba56c1327aa7721235dc72edfeaf5d7bf11f2d9bcbff8e2f326610b9b39
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34dd07745789cd80f6a5b25be9100fc1110ebc44c2ab224f9dc2b548036856c9
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9eda86b469533e86a34682e0068384ac45b349522c1991400b8806ee812a1bf1
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:028655e510a909aa7d6085e69d23b85f281b9fdd6d7a66b5175d2c3d8b80ca35
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c3e02e4b8a74e841ee5a1ce8c453a9becb15f5be6cbcdd100b814e93235dab36
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13276f15dd2b6acc19b970176aa2db4ac9b58241843e72c89b50e3094e903b19
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.4164679050445557,
3
- "best_model_checkpoint": "./outputs/checkpoint-300",
4
- "epoch": 0.4,
5
  "eval_steps": 100,
6
- "global_step": 300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -49,6 +49,20 @@
49
  "eval_samples_per_second": 12.903,
50
  "eval_steps_per_second": 1.618,
51
  "step": 300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  }
53
  ],
54
  "logging_steps": 100,
@@ -56,7 +70,7 @@
56
  "num_input_tokens_seen": 0,
57
  "num_train_epochs": 3,
58
  "save_steps": 100,
59
- "total_flos": 1.909463700824064e+16,
60
  "trial_name": null,
61
  "trial_params": null
62
  }
 
1
  {
2
+ "best_metric": 1.4017506837844849,
3
+ "best_model_checkpoint": "./outputs/checkpoint-400",
4
+ "epoch": 0.5333333333333333,
5
  "eval_steps": 100,
6
+ "global_step": 400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
49
  "eval_samples_per_second": 12.903,
50
  "eval_steps_per_second": 1.618,
51
  "step": 300
52
+ },
53
+ {
54
+ "epoch": 0.53,
55
+ "learning_rate": 0.0002,
56
+ "loss": 1.1691,
57
+ "step": 400
58
+ },
59
+ {
60
+ "epoch": 0.53,
61
+ "eval_loss": 1.4017506837844849,
62
+ "eval_runtime": 112.3429,
63
+ "eval_samples_per_second": 12.916,
64
+ "eval_steps_per_second": 1.62,
65
+ "step": 400
66
  }
67
  ],
68
  "logging_steps": 100,
 
70
  "num_input_tokens_seen": 0,
71
  "num_train_epochs": 3,
72
  "save_steps": 100,
73
+ "total_flos": 2.549292795076608e+16,
74
  "trial_name": null,
75
  "trial_params": null
76
  }