cwaud commited on
Commit
4c7bb18
1 Parent(s): 2a15afa

End of training

Browse files
Files changed (2) hide show
  1. README.md +34 -59
  2. adapter_model.bin +2 -2
README.md CHANGED
@@ -21,17 +21,13 @@ axolotl version: `0.4.1`
21
  adapter: lora
22
  base_model: unsloth/Meta-Llama-3.1-8B
23
  bf16: auto
24
- bnb_config_kwargs:
25
- bnb_4bit_quant_type: nf4
26
- bnb_4bit_use_double_quant: true
27
  chat_template: llama3
28
- cosine_min_lr_ratio: 0.1
29
- data_processes: 16
30
  dataset_prepared_path: null
31
  datasets:
32
  - data_files:
33
  - 296bd32b0a7d0eae_train_data.json
34
  ds_type: json
 
35
  path: /workspace/input_data/296bd32b0a7d0eae_train_data.json
36
  type:
37
  field_input: level
@@ -41,79 +37,60 @@ datasets:
41
  system_prompt: ''
42
  debug: null
43
  deepspeed: null
44
- device_map: '{'''':torch.cuda.current_device()}'
45
- do_eval: true
46
- early_stopping_patience: 1
47
- eval_batch_size: 1
48
- eval_sample_packing: false
49
- eval_steps: 25
50
- evaluation_strategy: steps
51
- flash_attention: false
52
  fp16: null
53
  fsdp: null
54
  fsdp_config: null
55
- gradient_accumulation_steps: 32
56
- gradient_checkpointing: true
57
- group_by_length: true
58
  hub_model_id: cwaud/0f6de495-2b3e-4109-828d-b91842a9e39d
59
  hub_repo: cwaud
60
  hub_strategy: checkpoint
61
  hub_token: null
62
- learning_rate: 0.0001
63
  load_in_4bit: false
64
- load_in_8bit: false
65
  local_rank: null
66
  logging_steps: 1
67
- lora_alpha: 64
68
  lora_dropout: 0.05
69
  lora_fan_in_fan_out: null
70
  lora_model_dir: null
71
- lora_r: 32
72
  lora_target_linear: true
73
- lora_target_modules:
74
- - q_proj
75
- - v_proj
76
  lr_scheduler: cosine
77
- max_grad_norm: 1.0
78
- max_memory:
79
- 0: 70GiB
80
- 1: 70GiB
81
- 2: 70GiB
82
- 3: 70GiB
83
- max_steps: 83
84
  micro_batch_size: 1
85
  mlflow_experiment_name: /tmp/296bd32b0a7d0eae_train_data.json
86
  model_type: AutoModelForCausalLM
87
- num_epochs: 3
88
- optim_args:
89
- adam_beta1: 0.9
90
- adam_beta2: 0.95
91
- adam_epsilon: 1e-5
92
- optimizer: adamw_torch
93
  output_dir: miner_id_24
94
  pad_to_sequence_len: true
95
  resume_from_checkpoint: null
96
  s2_attention: null
97
  sample_packing: false
98
- save_steps: 50
99
  save_strategy: steps
100
  sequence_len: 2048
101
  strict: false
102
  tf32: false
103
  tokenizer_type: AutoTokenizer
104
- torch_compile: false
105
  train_on_inputs: false
106
- trust_remote_code: true
107
- val_set_size: 50
108
  wandb_entity: rayonlabs-rayon-labs
109
  wandb_mode: online
110
  wandb_name: 0f6de495-2b3e-4109-828d-b91842a9e39d
111
  wandb_project: Public_TuningSN
112
  wandb_run: miner_id_24
113
  wandb_runid: 0f6de495-2b3e-4109-828d-b91842a9e39d
114
- warmup_raio: 0.03
115
- warmup_ratio: 0.04
116
- weight_decay: 0.01
117
  xformers_attention: null
118
 
119
  ```
@@ -124,7 +101,7 @@ xformers_attention: null
124
 
125
  This model is a fine-tuned version of [unsloth/Meta-Llama-3.1-8B](https://huggingface.co/unsloth/Meta-Llama-3.1-8B) on the None dataset.
126
  It achieves the following results on the evaluation set:
127
- - Loss: 0.6324
128
 
129
  ## Model description
130
 
@@ -143,34 +120,32 @@ More information needed
143
  ### Training hyperparameters
144
 
145
  The following hyperparameters were used during training:
146
- - learning_rate: 0.0001
147
  - train_batch_size: 1
148
  - eval_batch_size: 1
149
  - seed: 42
150
- - distributed_type: multi-GPU
151
- - num_devices: 4
152
- - gradient_accumulation_steps: 32
153
- - total_train_batch_size: 128
154
- - total_eval_batch_size: 4
155
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
156
  - lr_scheduler_type: cosine
157
- - lr_scheduler_warmup_steps: 3
158
- - training_steps: 83
159
 
160
  ### Training results
161
 
162
  | Training Loss | Epoch | Step | Validation Loss |
163
  |:-------------:|:------:|:----:|:---------------:|
164
- | 0.7046 | 0.0002 | 1 | 0.9375 |
165
- | 0.6112 | 0.0043 | 25 | 0.6832 |
166
- | 0.6283 | 0.0085 | 50 | 0.6453 |
167
- | 0.5699 | 0.0128 | 75 | 0.6324 |
 
168
 
169
 
170
  ### Framework versions
171
 
172
  - PEFT 0.13.2
173
- - Transformers 4.45.2
174
- - Pytorch 2.4.1+cu124
175
  - Datasets 3.0.1
176
  - Tokenizers 0.20.1
 
21
  adapter: lora
22
  base_model: unsloth/Meta-Llama-3.1-8B
23
  bf16: auto
 
 
 
24
  chat_template: llama3
 
 
25
  dataset_prepared_path: null
26
  datasets:
27
  - data_files:
28
  - 296bd32b0a7d0eae_train_data.json
29
  ds_type: json
30
+ format: custom
31
  path: /workspace/input_data/296bd32b0a7d0eae_train_data.json
32
  type:
33
  field_input: level
 
37
  system_prompt: ''
38
  debug: null
39
  deepspeed: null
40
+ early_stopping_patience: null
41
+ eval_max_new_tokens: 128
42
+ eval_table_size: null
43
+ evals_per_epoch: 4
44
+ flash_attention: true
 
 
 
45
  fp16: null
46
  fsdp: null
47
  fsdp_config: null
48
+ gradient_accumulation_steps: 4
49
+ gradient_checkpointing: false
50
+ group_by_length: false
51
  hub_model_id: cwaud/0f6de495-2b3e-4109-828d-b91842a9e39d
52
  hub_repo: cwaud
53
  hub_strategy: checkpoint
54
  hub_token: null
55
+ learning_rate: 0.002
56
  load_in_4bit: false
57
+ load_in_8bit: true
58
  local_rank: null
59
  logging_steps: 1
60
+ lora_alpha: 16
61
  lora_dropout: 0.05
62
  lora_fan_in_fan_out: null
63
  lora_model_dir: null
64
+ lora_r: 8
65
  lora_target_linear: true
 
 
 
66
  lr_scheduler: cosine
67
+ max_steps: 100
 
 
 
 
 
 
68
  micro_batch_size: 1
69
  mlflow_experiment_name: /tmp/296bd32b0a7d0eae_train_data.json
70
  model_type: AutoModelForCausalLM
71
+ num_epochs: 1
72
+ optimizer: adamw_bnb_8bit
 
 
 
 
73
  output_dir: miner_id_24
74
  pad_to_sequence_len: true
75
  resume_from_checkpoint: null
76
  s2_attention: null
77
  sample_packing: false
78
+ save_steps: 5
79
  save_strategy: steps
80
  sequence_len: 2048
81
  strict: false
82
  tf32: false
83
  tokenizer_type: AutoTokenizer
 
84
  train_on_inputs: false
85
+ val_set_size: 0.05
 
86
  wandb_entity: rayonlabs-rayon-labs
87
  wandb_mode: online
88
  wandb_name: 0f6de495-2b3e-4109-828d-b91842a9e39d
89
  wandb_project: Public_TuningSN
90
  wandb_run: miner_id_24
91
  wandb_runid: 0f6de495-2b3e-4109-828d-b91842a9e39d
92
+ warmup_steps: 10
93
+ weight_decay: 0.0
 
94
  xformers_attention: null
95
 
96
  ```
 
101
 
102
  This model is a fine-tuned version of [unsloth/Meta-Llama-3.1-8B](https://huggingface.co/unsloth/Meta-Llama-3.1-8B) on the None dataset.
103
  It achieves the following results on the evaluation set:
104
+ - Loss: 0.8763
105
 
106
  ## Model description
107
 
 
120
  ### Training hyperparameters
121
 
122
  The following hyperparameters were used during training:
123
+ - learning_rate: 0.002
124
  - train_batch_size: 1
125
  - eval_batch_size: 1
126
  - seed: 42
127
+ - gradient_accumulation_steps: 4
128
+ - total_train_batch_size: 4
129
+ - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
 
 
 
130
  - lr_scheduler_type: cosine
131
+ - lr_scheduler_warmup_steps: 10
132
+ - training_steps: 100
133
 
134
  ### Training results
135
 
136
  | Training Loss | Epoch | Step | Validation Loss |
137
  |:-------------:|:------:|:----:|:---------------:|
138
+ | 1.0718 | 0.0000 | 1 | 1.0175 |
139
+ | 1.1737 | 0.0001 | 25 | 1.0244 |
140
+ | 1.1765 | 0.0003 | 50 | 1.0543 |
141
+ | 0.9795 | 0.0004 | 75 | 0.9441 |
142
+ | 0.56 | 0.0006 | 100 | 0.8763 |
143
 
144
 
145
  ### Framework versions
146
 
147
  - PEFT 0.13.2
148
+ - Transformers 4.46.0
149
+ - Pytorch 2.5.0+cu124
150
  - Datasets 3.0.1
151
  - Tokenizers 0.20.1
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e588185b00e7e24e3ed2dccded1dccbd19b3679def86b1dd3efc2992586c9dc
3
- size 335706186
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab68b37f5bace056be3f4f6814a6d913b009dfe9afc630df8d111a4a7752b76e
3
+ size 84047370