Aivesa commited on
Commit
353ceac
·
verified ·
1 Parent(s): c3664af

Training in progress, step 6, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23cde452e312748315ff7ab84d7bb0fe9bc026445498d1658ab5e3901e6c4a06
3
  size 80013120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcc65a9a05b8f8e987f618b224c4a8358b7bec2c3597f1cb561b75210003824e
3
  size 80013120
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:052e8b6bb9f65d7f707c04cd1bdb30cb7527b7255e26767bc19e6a796abd0762
3
  size 41119636
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:386f9089a6f13a309e5dab5131673c5ff2bbdf64b769a707ecb00302669f632b
3
  size 41119636
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b173ac9ab56406e53f02ac343bf22d13dc84cb35b451121151594928dcc2f4cd
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09acce823b858db1a079a05549bd8528f2fec468928dba778f7e8bdddcd10eea
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ffd8c58e5d02492554dbaa495f8cf80dff41fabc0e1288cb2fd18a103d402219
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0099cb7287625b29b67c4fcf42ff20fae623b429bfb10f5ac695bc54f2be54fd
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.010771992818671455,
5
  "eval_steps": 3,
6
- "global_step": 3,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -36,6 +36,35 @@
36
  "eval_samples_per_second": 23.234,
37
  "eval_steps_per_second": 11.617,
38
  "step": 3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  }
40
  ],
41
  "logging_steps": 1,
@@ -55,7 +84,7 @@
55
  "attributes": {}
56
  }
57
  },
58
- "total_flos": 494346877009920.0,
59
  "train_batch_size": 2,
60
  "trial_name": null,
61
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.02154398563734291,
5
  "eval_steps": 3,
6
+ "global_step": 6,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
36
  "eval_samples_per_second": 23.234,
37
  "eval_steps_per_second": 11.617,
38
  "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.01436265709156194,
42
+ "grad_norm": 3.353957176208496,
43
+ "learning_rate": 8e-05,
44
+ "loss": 3.8607,
45
+ "step": 4
46
+ },
47
+ {
48
+ "epoch": 0.017953321364452424,
49
+ "grad_norm": 3.642730951309204,
50
+ "learning_rate": 0.0001,
51
+ "loss": 3.4281,
52
+ "step": 5
53
+ },
54
+ {
55
+ "epoch": 0.02154398563734291,
56
+ "grad_norm": 4.439634799957275,
57
+ "learning_rate": 0.00012,
58
+ "loss": 2.9842,
59
+ "step": 6
60
+ },
61
+ {
62
+ "epoch": 0.02154398563734291,
63
+ "eval_loss": 2.8470635414123535,
64
+ "eval_runtime": 5.7073,
65
+ "eval_samples_per_second": 20.675,
66
+ "eval_steps_per_second": 10.338,
67
+ "step": 6
68
  }
69
  ],
70
  "logging_steps": 1,
 
84
  "attributes": {}
85
  }
86
  },
87
+ "total_flos": 988693754019840.0,
88
  "train_batch_size": 2,
89
  "trial_name": null,
90
  "trial_params": null