shibajustfor commited on
Commit
07f4a11
·
verified ·
1 Parent(s): a204a2b

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d55bf6da0644cb0d2142e588ebee24edec7c8fbaa1101999298c5f16307d184f
3
  size 6804608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a29bcb1557cb9c58b1031d6c0fd1c859354d3c0a0e37e00fcd17b17a77ae7e4a
3
  size 6804608
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66cc0efe7eada77580833e1b65a2f2bb2bf75cfa8182684aa3f7c47a6bd94512
3
  size 3633530
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:056a9dbeb4a3d35fb6ec99c11f7a9bf8e2515d1236ff0059d99214e344006681
3
  size 3633530
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:abee12ff943f61ea027547de22fd9946ae7c7d29fc3926528191d84051846b55
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea8e8e7375809d41b9b83cdd82d35d8d3f92c8074bf6e7c4d4b9b7c21650ce53
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:216e1a9a1c53e7de93f56319b4855910155cb98dd7ebb5fa08b3717fd303059f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69e2b49ea642509f0c688c16fb190b7cf27dac0a18903a5e2d1467d0343d8b8
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.04441913439635535,
5
  "eval_steps": 13,
6
- "global_step": 39,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -60,6 +60,20 @@
60
  "eval_samples_per_second": 114.01,
61
  "eval_steps_per_second": 57.005,
62
  "step": 39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  }
64
  ],
65
  "logging_steps": 10,
@@ -74,12 +88,12 @@
74
  "should_evaluate": false,
75
  "should_log": false,
76
  "should_save": true,
77
- "should_training_stop": false
78
  },
79
  "attributes": {}
80
  }
81
  },
82
- "total_flos": 259766827352064.0,
83
  "train_batch_size": 2,
84
  "trial_name": null,
85
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.05694760820045558,
5
  "eval_steps": 13,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
60
  "eval_samples_per_second": 114.01,
61
  "eval_steps_per_second": 57.005,
62
  "step": 39
63
+ },
64
+ {
65
+ "epoch": 0.04555808656036447,
66
+ "grad_norm": 0.9612581133842468,
67
+ "learning_rate": 2.339555568810221e-05,
68
+ "loss": 3.2128,
69
+ "step": 40
70
+ },
71
+ {
72
+ "epoch": 0.05694760820045558,
73
+ "grad_norm": 0.9653323292732239,
74
+ "learning_rate": 0.0,
75
+ "loss": 3.1291,
76
+ "step": 50
77
  }
78
  ],
79
  "logging_steps": 10,
 
88
  "should_evaluate": false,
89
  "should_log": false,
90
  "should_save": true,
91
+ "should_training_stop": true
92
  },
93
  "attributes": {}
94
  }
95
  },
96
+ "total_flos": 331781393350656.0,
97
  "train_batch_size": 2,
98
  "trial_name": null,
99
  "trial_params": null