Hanzalwi commited on
Commit
4a72fee
1 Parent(s): cf6f255

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/README.md CHANGED
@@ -217,4 +217,23 @@ The following `bitsandbytes` quantization config was used during training:
217
  ### Framework versions
218
 
219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  - PEFT 0.6.3.dev0
 
217
  ### Framework versions
218
 
219
 
220
+ - PEFT 0.6.3.dev0
221
+ ## Training procedure
222
+
223
+
224
+ The following `bitsandbytes` quantization config was used during training:
225
+ - quant_method: bitsandbytes
226
+ - load_in_8bit: True
227
+ - load_in_4bit: False
228
+ - llm_int8_threshold: 6.0
229
+ - llm_int8_skip_modules: None
230
+ - llm_int8_enable_fp32_cpu_offload: False
231
+ - llm_int8_has_fp16_weight: False
232
+ - bnb_4bit_quant_type: fp4
233
+ - bnb_4bit_use_double_quant: False
234
+ - bnb_4bit_compute_dtype: float32
235
+
236
+ ### Framework versions
237
+
238
+
239
  - PEFT 0.6.3.dev0
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:065d6cdfd25a3592de63b9ea57d49cc10abe0782a56656abb658855f78661c42
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8f074ee0142264d42ff6ac14277529268f6ceb94c56d620fbaa623f9e5cfd77
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ab06dac26f59d076051a357403d2a8514e3f20a26dcde2e2c380d7e94ee1a84
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f610b9589325d7175171087c87fcde851d9e63322240c2f555b520195b099b0
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1ec0da87f5671851cc04bcc5ea52b139af3d8380509986e058dbd0a4f2510f2
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41f270cc5b1e61ed3af1f62839766416b6f4a38dc682e2def2352932fbe9d6d9
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c1a2ac1c11599601eeac95feb1dbfd49ec5c625e61dcce18b3f094491f9cf2d
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:216f76b8039f833c337db298c81f13b12082d5fd4f9d866cecd34b2ca7550b37
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.0246661901474,
3
- "best_model_checkpoint": "./outputs/checkpoint-600",
4
- "epoch": 0.8522727272727273,
5
  "eval_steps": 100,
6
- "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -11,86 +11,16 @@
11
  {
12
  "epoch": 0.14,
13
  "learning_rate": 0.0002,
14
- "loss": 1.5807,
15
  "step": 100
16
  },
17
  {
18
  "epoch": 0.14,
19
- "eval_loss": 1.0911378860473633,
20
- "eval_runtime": 126.8708,
21
- "eval_samples_per_second": 11.76,
22
- "eval_steps_per_second": 1.474,
23
  "step": 100
24
- },
25
- {
26
- "epoch": 0.28,
27
- "learning_rate": 0.0002,
28
- "loss": 1.2278,
29
- "step": 200
30
- },
31
- {
32
- "epoch": 0.28,
33
- "eval_loss": 1.0710176229476929,
34
- "eval_runtime": 123.7755,
35
- "eval_samples_per_second": 12.054,
36
- "eval_steps_per_second": 1.511,
37
- "step": 200
38
- },
39
- {
40
- "epoch": 0.43,
41
- "learning_rate": 0.0002,
42
- "loss": 1.2143,
43
- "step": 300
44
- },
45
- {
46
- "epoch": 0.43,
47
- "eval_loss": 1.0573608875274658,
48
- "eval_runtime": 123.8307,
49
- "eval_samples_per_second": 12.049,
50
- "eval_steps_per_second": 1.51,
51
- "step": 300
52
- },
53
- {
54
- "epoch": 0.57,
55
- "learning_rate": 0.0002,
56
- "loss": 1.1968,
57
- "step": 400
58
- },
59
- {
60
- "epoch": 0.57,
61
- "eval_loss": 1.0461841821670532,
62
- "eval_runtime": 123.7514,
63
- "eval_samples_per_second": 12.056,
64
- "eval_steps_per_second": 1.511,
65
- "step": 400
66
- },
67
- {
68
- "epoch": 0.71,
69
- "learning_rate": 0.0002,
70
- "loss": 1.1868,
71
- "step": 500
72
- },
73
- {
74
- "epoch": 0.71,
75
- "eval_loss": 1.035237431526184,
76
- "eval_runtime": 123.8134,
77
- "eval_samples_per_second": 12.05,
78
- "eval_steps_per_second": 1.51,
79
- "step": 500
80
- },
81
- {
82
- "epoch": 0.85,
83
- "learning_rate": 0.0002,
84
- "loss": 1.1746,
85
- "step": 600
86
- },
87
- {
88
- "epoch": 0.85,
89
- "eval_loss": 1.0246661901474,
90
- "eval_runtime": 123.5637,
91
- "eval_samples_per_second": 12.075,
92
- "eval_steps_per_second": 1.513,
93
- "step": 600
94
  }
95
  ],
96
  "logging_steps": 100,
@@ -98,7 +28,7 @@
98
  "num_input_tokens_seen": 0,
99
  "num_train_epochs": 3,
100
  "save_steps": 100,
101
- "total_flos": 3.333198802526208e+16,
102
  "trial_name": null,
103
  "trial_params": null
104
  }
 
1
  {
2
+ "best_metric": 1.0926024913787842,
3
+ "best_model_checkpoint": "./outputs/checkpoint-100",
4
+ "epoch": 0.14204545454545456,
5
  "eval_steps": 100,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
11
  {
12
  "epoch": 0.14,
13
  "learning_rate": 0.0002,
14
+ "loss": 1.5836,
15
  "step": 100
16
  },
17
  {
18
  "epoch": 0.14,
19
+ "eval_loss": 1.0926024913787842,
20
+ "eval_runtime": 92.4288,
21
+ "eval_samples_per_second": 16.142,
22
+ "eval_steps_per_second": 2.023,
23
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  }
25
  ],
26
  "logging_steps": 100,
 
28
  "num_input_tokens_seen": 0,
29
  "num_train_epochs": 3,
30
  "save_steps": 100,
31
+ "total_flos": 5530295581655040.0,
32
  "trial_name": null,
33
  "trial_params": null
34
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fea25dd0b62f2743404b9270b914baaf658731e1acbc936ce8fd0c2017ee8535
3
  size 4283
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:773fc843ab8e8578c19dea3b7f4669d81e0fdb743a2d8dc54ea40860d6fb2fab
3
  size 4283