dmcooller commited on
Commit
056f0c1
1 Parent(s): f0fe376

dmcooller/neural-matia-phi-ft-2

Browse files
README.md CHANGED
@@ -1,9 +1,9 @@
1
  ---
2
- license: apache-2.0
3
  library_name: peft
4
  tags:
5
  - generated_from_trainer
6
- base_model: TheBloke/Mistral-7B-Instruct-v0.2-GPTQ
7
  model-index:
8
  - name: neural-matia-ft
9
  results: []
@@ -14,9 +14,9 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # neural-matia-ft
16
 
17
- This model is a fine-tuned version of [TheBloke/Mistral-7B-Instruct-v0.2-GPTQ](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 0.3807
20
 
21
  ## Model description
22
 
@@ -36,33 +36,25 @@ More information needed
36
 
37
  The following hyperparameters were used during training:
38
  - learning_rate: 0.0002
39
- - train_batch_size: 20
40
- - eval_batch_size: 20
41
  - seed: 42
42
  - gradient_accumulation_steps: 4
43
- - total_train_batch_size: 80
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: linear
46
  - lr_scheduler_warmup_steps: 2
47
- - num_epochs: 12
48
- - mixed_precision_training: Native AMP
49
 
50
  ### Training results
51
 
52
  | Training Loss | Epoch | Step | Validation Loss |
53
  |:-------------:|:-----:|:----:|:---------------:|
54
- | 2.9513 | 1.0 | 8 | 2.1823 |
55
- | 1.6734 | 2.0 | 16 | 1.0195 |
56
- | 0.7782 | 3.0 | 24 | 0.5509 |
57
- | 0.5323 | 4.0 | 32 | 0.4753 |
58
- | 0.4823 | 5.0 | 40 | 0.4507 |
59
- | 0.4596 | 6.0 | 48 | 0.4289 |
60
- | 0.4391 | 7.0 | 56 | 0.4094 |
61
- | 0.4203 | 8.0 | 64 | 0.3926 |
62
- | 0.4075 | 9.0 | 72 | 0.3864 |
63
- | 0.4028 | 10.0 | 80 | 0.3833 |
64
- | 0.3998 | 11.0 | 88 | 0.3815 |
65
- | 0.3989 | 12.0 | 96 | 0.3807 |
66
 
67
 
68
  ### Framework versions
@@ -70,5 +62,5 @@ The following hyperparameters were used during training:
70
  - PEFT 0.10.0
71
  - Transformers 4.38.2
72
  - Pytorch 2.1.2
73
- - Datasets 2.1.0
74
  - Tokenizers 0.15.2
 
1
  ---
2
+ license: mit
3
  library_name: peft
4
  tags:
5
  - generated_from_trainer
6
+ base_model: microsoft/phi-2
7
  model-index:
8
  - name: neural-matia-ft
9
  results: []
 
14
 
15
  # neural-matia-ft
16
 
17
+ This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 0.3535
20
 
21
  ## Model description
22
 
 
36
 
37
  The following hyperparameters were used during training:
38
  - learning_rate: 0.0002
39
+ - train_batch_size: 16
40
+ - eval_batch_size: 16
41
  - seed: 42
42
  - gradient_accumulation_steps: 4
43
+ - total_train_batch_size: 64
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: linear
46
  - lr_scheduler_warmup_steps: 2
47
+ - num_epochs: 5
 
48
 
49
  ### Training results
50
 
51
  | Training Loss | Epoch | Step | Validation Loss |
52
  |:-------------:|:-----:|:----:|:---------------:|
53
+ | 2.8818 | 1.0 | 9 | 2.1682 |
54
+ | 1.6144 | 2.0 | 18 | 0.7967 |
55
+ | 0.6399 | 3.0 | 27 | 0.4156 |
56
+ | 0.4238 | 4.0 | 36 | 0.3653 |
57
+ | 0.381 | 5.0 | 45 | 0.3535 |
 
 
 
 
 
 
 
58
 
59
 
60
  ### Framework versions
 
62
  - PEFT 0.10.0
63
  - Transformers 4.38.2
64
  - Pytorch 2.1.2
65
+ - Datasets 2.16.0
66
  - Tokenizers 0.15.2
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": null,
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -16,11 +16,14 @@
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 11,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj"
 
 
 
24
  ],
25
  "task_type": "CAUSAL_LM",
26
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/phi-2",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 32,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "k_proj",
24
+ "v_proj",
25
+ "q_proj",
26
+ "dense"
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6ad59b20026e05d98cab8510d4f07c0207f9e723255bf72ff6f990f3559aa02
3
- size 11546136
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d04b4222c5ae058f1ddcfb6bcbb463f465b094275333e19a086317d1811ce166
3
+ size 83920464
runs/Apr08_06-10-27_41f0330e0e1d/events.out.tfevents.1712556682.41f0330e0e1d.74.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5e57e7d1aa934072485608cbb39461a1915c2f60f4854e56bb7b459b116794b
3
+ size 7990
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:575b892310e4b825a6f29200242100fba562fbf97f41a108fc25a4b717805eaf
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd98c683b8b0de88b0664fd61391e8c33281c3d5d2229e5f96cb4353bc1e83e2
3
  size 4920