haomingx commited on
Commit
1a9c3ae
·
verified ·
1 Parent(s): 2afd41b

Upload 5 files

Browse files
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "haomingx/llama2-7b_lora_kud_privacy",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "o_proj",
25
+ "q_proj",
26
+ "v_proj",
27
+ "up_proj",
28
+ "down_proj",
29
+ "gate_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": false,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
+ "padding_side": "right",
39
+ "sp_model_kwargs": {},
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }
trainer_state.json ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.9987257088244665,
5
+ "eval_steps": 1569,
6
+ "global_step": 6276,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.24976107040458745,
13
+ "grad_norm": 3.5836572647094727,
14
+ "learning_rate": 2.4984066284257492e-06,
15
+ "loss": 3.2128,
16
+ "step": 392
17
+ },
18
+ {
19
+ "epoch": 0.4995221408091749,
20
+ "grad_norm": 3.5956788063049316,
21
+ "learning_rate": 4.9968132568514985e-06,
22
+ "loss": 2.232,
23
+ "step": 784
24
+ },
25
+ {
26
+ "epoch": 0.7492832112137623,
27
+ "grad_norm": 5.461869239807129,
28
+ "learning_rate": 7.495219885277247e-06,
29
+ "loss": 1.8585,
30
+ "step": 1176
31
+ },
32
+ {
33
+ "epoch": 0.9990442816183498,
34
+ "grad_norm": 4.789157390594482,
35
+ "learning_rate": 9.993626513702997e-06,
36
+ "loss": 1.663,
37
+ "step": 1568
38
+ },
39
+ {
40
+ "epoch": 1.2488053520229372,
41
+ "grad_norm": 3.923671245574951,
42
+ "learning_rate": 9.37719018795795e-06,
43
+ "loss": 1.5195,
44
+ "step": 1960
45
+ },
46
+ {
47
+ "epoch": 1.4985664224275248,
48
+ "grad_norm": 4.947878360748291,
49
+ "learning_rate": 8.75278751194648e-06,
50
+ "loss": 1.481,
51
+ "step": 2352
52
+ },
53
+ {
54
+ "epoch": 1.7483274928321122,
55
+ "grad_norm": 3.5249428749084473,
56
+ "learning_rate": 8.128384835935011e-06,
57
+ "loss": 1.4036,
58
+ "step": 2744
59
+ },
60
+ {
61
+ "epoch": 1.9980885632366996,
62
+ "grad_norm": 1.927209734916687,
63
+ "learning_rate": 7.503982159923543e-06,
64
+ "loss": 1.3675,
65
+ "step": 3136
66
+ },
67
+ {
68
+ "epoch": 2.247849633641287,
69
+ "grad_norm": 5.133701324462891,
70
+ "learning_rate": 6.8795794839120745e-06,
71
+ "loss": 1.2484,
72
+ "step": 3528
73
+ },
74
+ {
75
+ "epoch": 2.4976107040458744,
76
+ "grad_norm": 5.654458045959473,
77
+ "learning_rate": 6.255176807900606e-06,
78
+ "loss": 1.1997,
79
+ "step": 3920
80
+ },
81
+ {
82
+ "epoch": 2.7473717744504618,
83
+ "grad_norm": 7.116914749145508,
84
+ "learning_rate": 5.630774131889137e-06,
85
+ "loss": 1.2026,
86
+ "step": 4312
87
+ },
88
+ {
89
+ "epoch": 2.9971328448550496,
90
+ "grad_norm": 5.798983573913574,
91
+ "learning_rate": 5.006371455877669e-06,
92
+ "loss": 1.1859,
93
+ "step": 4704
94
+ },
95
+ {
96
+ "epoch": 3.246893915259637,
97
+ "grad_norm": 5.535274505615234,
98
+ "learning_rate": 4.3819687798662e-06,
99
+ "loss": 1.0458,
100
+ "step": 5096
101
+ },
102
+ {
103
+ "epoch": 3.4966549856642244,
104
+ "grad_norm": 5.662832736968994,
105
+ "learning_rate": 3.757566103854731e-06,
106
+ "loss": 1.0485,
107
+ "step": 5488
108
+ },
109
+ {
110
+ "epoch": 3.746416056068812,
111
+ "grad_norm": 5.849664211273193,
112
+ "learning_rate": 3.1331634278432627e-06,
113
+ "loss": 1.0564,
114
+ "step": 5880
115
+ },
116
+ {
117
+ "epoch": 3.996177126473399,
118
+ "grad_norm": 6.500792026519775,
119
+ "learning_rate": 2.5087607518317936e-06,
120
+ "loss": 1.0403,
121
+ "step": 6272
122
+ }
123
+ ],
124
+ "logging_steps": 392,
125
+ "max_steps": 7847,
126
+ "num_input_tokens_seen": 0,
127
+ "num_train_epochs": 6,
128
+ "save_steps": 1569,
129
+ "stateful_callbacks": {
130
+ "TrainerControl": {
131
+ "args": {
132
+ "should_epoch_stop": false,
133
+ "should_evaluate": false,
134
+ "should_log": false,
135
+ "should_save": true,
136
+ "should_training_stop": false
137
+ },
138
+ "attributes": {}
139
+ }
140
+ },
141
+ "total_flos": 0.0,
142
+ "train_batch_size": 1,
143
+ "trial_name": null,
144
+ "trial_params": null
145
+ }