jeffwan commited on
Commit
9179234
1 Parent(s): 1878956

Add alpaca lora weights

Browse files
adapter_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "/home/ubuntu/models/llama_7B/",
3
+ "bias": "none",
4
+ "enable_lora": null,
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "lora_alpha": 16,
8
+ "lora_dropout": 0.05,
9
+ "merge_weights": false,
10
+ "modules_to_save": null,
11
+ "peft_type": "LORA",
12
+ "r": 8,
13
+ "target_modules": [
14
+ "q_proj",
15
+ "v_proj"
16
+ ],
17
+ "task_type": "CAUSAL_LM"
18
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a825d8cbde28524b285f1c1715d2582fd57f2c67bf5f19cfa41efbb0cad0915
3
+ size 16822989
checkpoint-200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ecc364b7d27de6764568949f678a3cbf3e783792f1cb2889c4a3389be1dd35c
3
+ size 33661637
checkpoint-200/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78ee9f238fc019bac3e6d601c53840791e5579a86cfaac0b302061a21c814015
3
+ size 16822989
checkpoint-200/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5968c09fc183ec7c9819d009d4e56d63e8e7db3959e90c2e83d3bd9605aca9c
3
+ size 14575
checkpoint-200/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:577d21ec99291b14ec22312d34683ff2133f37672ec277c6f85957f98d6f2f80
3
+ size 557
checkpoint-200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f2cca9b236dcac259574c23e23fc7fce56c88f4f36864b866da31ca5c4faccb
3
+ size 627
checkpoint-200/trainer_state.json ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8030890822410583,
3
+ "best_model_checkpoint": "./lora-alpaca/checkpoint-200",
4
+ "epoch": 0.5144694533762058,
5
+ "global_step": 200,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.03,
12
+ "learning_rate": 2.9999999999999997e-05,
13
+ "loss": 2.2847,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.05,
18
+ "learning_rate": 5.9999999999999995e-05,
19
+ "loss": 2.2109,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.08,
24
+ "learning_rate": 8.699999999999999e-05,
25
+ "loss": 1.9736,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.1,
30
+ "learning_rate": 0.000117,
31
+ "loss": 1.5818,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.13,
36
+ "learning_rate": 0.000147,
37
+ "loss": 1.2586,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.15,
42
+ "learning_rate": 0.00017699999999999997,
43
+ "loss": 1.1373,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.18,
48
+ "learning_rate": 0.00020699999999999996,
49
+ "loss": 0.9888,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.21,
54
+ "learning_rate": 0.000237,
55
+ "loss": 0.8617,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.23,
60
+ "learning_rate": 0.000267,
61
+ "loss": 0.8454,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.26,
66
+ "learning_rate": 0.00029699999999999996,
67
+ "loss": 0.8458,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.28,
72
+ "learning_rate": 0.000290625,
73
+ "loss": 0.8341,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 0.31,
78
+ "learning_rate": 0.0002802083333333333,
79
+ "loss": 0.8218,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 0.33,
84
+ "learning_rate": 0.00026979166666666666,
85
+ "loss": 0.8304,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 0.36,
90
+ "learning_rate": 0.000259375,
91
+ "loss": 0.8161,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 0.39,
96
+ "learning_rate": 0.00024895833333333334,
97
+ "loss": 0.8097,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 0.41,
102
+ "learning_rate": 0.00023854166666666663,
103
+ "loss": 0.8134,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 0.44,
108
+ "learning_rate": 0.00022812499999999997,
109
+ "loss": 0.8214,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 0.46,
114
+ "learning_rate": 0.00021770833333333332,
115
+ "loss": 0.8095,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 0.49,
120
+ "learning_rate": 0.00020729166666666663,
121
+ "loss": 0.8028,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 0.51,
126
+ "learning_rate": 0.00019687499999999997,
127
+ "loss": 0.8014,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 0.51,
132
+ "eval_loss": 0.8030890822410583,
133
+ "eval_runtime": 134.5586,
134
+ "eval_samples_per_second": 14.863,
135
+ "eval_steps_per_second": 1.858,
136
+ "step": 200
137
+ }
138
+ ],
139
+ "max_steps": 388,
140
+ "num_train_epochs": 1,
141
+ "total_flos": 2.5486812238577664e+17,
142
+ "trial_name": null,
143
+ "trial_params": null
144
+ }
checkpoint-200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caf61cff2e80d88998fcc6b744a0d1808f23975fbc1c2fc421f2ce06178efb6c
3
+ size 3579
runs/Mar27_21-39-54_192-9-155-93/1679953194.7849967/events.out.tfevents.1679953194.192-9-155-93.76817.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6316f7f77e43cb7e07dbee9a6837ef7f5d3b0aa4b9848fc2557527aca10d3d0
3
+ size 5773
runs/Mar27_21-39-54_192-9-155-93/events.out.tfevents.1679953194.192-9-155-93.76817.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b509688dbf98a825afdeb51f60a3e629bd2d5857f1f3093fdabc138388e66467
3
+ size 10346