Training in progress, step 10, checkpoint
Browse files
last-checkpoint/adapter_config.json
CHANGED
@@ -21,12 +21,12 @@
|
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
"k_proj",
|
24 |
-
"gate_proj",
|
25 |
-
"down_proj",
|
26 |
-
"v_proj",
|
27 |
"up_proj",
|
|
|
28 |
"q_proj",
|
29 |
-
"
|
|
|
|
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
|
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
"k_proj",
|
|
|
|
|
|
|
24 |
"up_proj",
|
25 |
+
"gate_proj",
|
26 |
"q_proj",
|
27 |
+
"v_proj",
|
28 |
+
"o_proj",
|
29 |
+
"down_proj"
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 27024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d95098d63373bb83b8f55cc9f33fe4602b0d75bf930545ec1e8aae3b194d886a
|
3 |
size 27024
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 63910
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:719cbdeae92fe8707cec988c306b1a04a61b4cddb4f92b5f539886c6519996d8
|
3 |
size 63910
|
last-checkpoint/trainer_state.json
CHANGED
@@ -10,7 +10,7 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.005479452054794521,
|
13 |
-
"grad_norm": 0.
|
14 |
"learning_rate": 2e-05,
|
15 |
"loss": 10.3806,
|
16 |
"step": 1
|
@@ -18,94 +18,94 @@
|
|
18 |
{
|
19 |
"epoch": 0.005479452054794521,
|
20 |
"eval_loss": 10.376846313476562,
|
21 |
-
"eval_runtime": 0.
|
22 |
-
"eval_samples_per_second":
|
23 |
-
"eval_steps_per_second":
|
24 |
"step": 1
|
25 |
},
|
26 |
{
|
27 |
"epoch": 0.010958904109589041,
|
28 |
-
"grad_norm": 0.
|
29 |
"learning_rate": 4e-05,
|
30 |
"loss": 10.3795,
|
31 |
"step": 2
|
32 |
},
|
33 |
{
|
34 |
"epoch": 0.01643835616438356,
|
35 |
-
"grad_norm": 0.
|
36 |
"learning_rate": 6e-05,
|
37 |
"loss": 10.3782,
|
38 |
"step": 3
|
39 |
},
|
40 |
{
|
41 |
"epoch": 0.01643835616438356,
|
42 |
-
"eval_loss": 10.
|
43 |
-
"eval_runtime": 0.
|
44 |
-
"eval_samples_per_second":
|
45 |
-
"eval_steps_per_second":
|
46 |
"step": 3
|
47 |
},
|
48 |
{
|
49 |
"epoch": 0.021917808219178082,
|
50 |
-
"grad_norm": 0.
|
51 |
"learning_rate": 8e-05,
|
52 |
"loss": 10.3785,
|
53 |
"step": 4
|
54 |
},
|
55 |
{
|
56 |
"epoch": 0.0273972602739726,
|
57 |
-
"grad_norm": 0.
|
58 |
"learning_rate": 0.0001,
|
59 |
"loss": 10.3697,
|
60 |
"step": 5
|
61 |
},
|
62 |
{
|
63 |
"epoch": 0.03287671232876712,
|
64 |
-
"grad_norm": 0.
|
65 |
"learning_rate": 0.00012,
|
66 |
"loss": 10.3829,
|
67 |
"step": 6
|
68 |
},
|
69 |
{
|
70 |
"epoch": 0.03287671232876712,
|
71 |
-
"eval_loss": 10.
|
72 |
-
"eval_runtime": 0.
|
73 |
-
"eval_samples_per_second":
|
74 |
-
"eval_steps_per_second":
|
75 |
"step": 6
|
76 |
},
|
77 |
{
|
78 |
"epoch": 0.038356164383561646,
|
79 |
-
"grad_norm": 0.
|
80 |
"learning_rate": 0.00014,
|
81 |
"loss": 10.3818,
|
82 |
"step": 7
|
83 |
},
|
84 |
{
|
85 |
"epoch": 0.043835616438356165,
|
86 |
-
"grad_norm": 0.
|
87 |
"learning_rate": 0.00016,
|
88 |
"loss": 10.38,
|
89 |
"step": 8
|
90 |
},
|
91 |
{
|
92 |
"epoch": 0.049315068493150684,
|
93 |
-
"grad_norm": 0.
|
94 |
"learning_rate": 0.00018,
|
95 |
-
"loss": 10.
|
96 |
"step": 9
|
97 |
},
|
98 |
{
|
99 |
"epoch": 0.049315068493150684,
|
100 |
-
"eval_loss": 10.
|
101 |
-
"eval_runtime": 0.
|
102 |
-
"eval_samples_per_second":
|
103 |
-
"eval_steps_per_second":
|
104 |
"step": 9
|
105 |
},
|
106 |
{
|
107 |
"epoch": 0.0547945205479452,
|
108 |
-
"grad_norm": 0.
|
109 |
"learning_rate": 0.0002,
|
110 |
"loss": 10.3759,
|
111 |
"step": 10
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.005479452054794521,
|
13 |
+
"grad_norm": 0.023885909467935562,
|
14 |
"learning_rate": 2e-05,
|
15 |
"loss": 10.3806,
|
16 |
"step": 1
|
|
|
18 |
{
|
19 |
"epoch": 0.005479452054794521,
|
20 |
"eval_loss": 10.376846313476562,
|
21 |
+
"eval_runtime": 0.3174,
|
22 |
+
"eval_samples_per_second": 122.881,
|
23 |
+
"eval_steps_per_second": 122.881,
|
24 |
"step": 1
|
25 |
},
|
26 |
{
|
27 |
"epoch": 0.010958904109589041,
|
28 |
+
"grad_norm": 0.02708149142563343,
|
29 |
"learning_rate": 4e-05,
|
30 |
"loss": 10.3795,
|
31 |
"step": 2
|
32 |
},
|
33 |
{
|
34 |
"epoch": 0.01643835616438356,
|
35 |
+
"grad_norm": 0.016956843435764313,
|
36 |
"learning_rate": 6e-05,
|
37 |
"loss": 10.3782,
|
38 |
"step": 3
|
39 |
},
|
40 |
{
|
41 |
"epoch": 0.01643835616438356,
|
42 |
+
"eval_loss": 10.376829147338867,
|
43 |
+
"eval_runtime": 0.2781,
|
44 |
+
"eval_samples_per_second": 140.23,
|
45 |
+
"eval_steps_per_second": 140.23,
|
46 |
"step": 3
|
47 |
},
|
48 |
{
|
49 |
"epoch": 0.021917808219178082,
|
50 |
+
"grad_norm": 0.022694140672683716,
|
51 |
"learning_rate": 8e-05,
|
52 |
"loss": 10.3785,
|
53 |
"step": 4
|
54 |
},
|
55 |
{
|
56 |
"epoch": 0.0273972602739726,
|
57 |
+
"grad_norm": 0.023907892405986786,
|
58 |
"learning_rate": 0.0001,
|
59 |
"loss": 10.3697,
|
60 |
"step": 5
|
61 |
},
|
62 |
{
|
63 |
"epoch": 0.03287671232876712,
|
64 |
+
"grad_norm": 0.019220907241106033,
|
65 |
"learning_rate": 0.00012,
|
66 |
"loss": 10.3829,
|
67 |
"step": 6
|
68 |
},
|
69 |
{
|
70 |
"epoch": 0.03287671232876712,
|
71 |
+
"eval_loss": 10.376757621765137,
|
72 |
+
"eval_runtime": 0.2992,
|
73 |
+
"eval_samples_per_second": 130.333,
|
74 |
+
"eval_steps_per_second": 130.333,
|
75 |
"step": 6
|
76 |
},
|
77 |
{
|
78 |
"epoch": 0.038356164383561646,
|
79 |
+
"grad_norm": 0.02152101695537567,
|
80 |
"learning_rate": 0.00014,
|
81 |
"loss": 10.3818,
|
82 |
"step": 7
|
83 |
},
|
84 |
{
|
85 |
"epoch": 0.043835616438356165,
|
86 |
+
"grad_norm": 0.07344566285610199,
|
87 |
"learning_rate": 0.00016,
|
88 |
"loss": 10.38,
|
89 |
"step": 8
|
90 |
},
|
91 |
{
|
92 |
"epoch": 0.049315068493150684,
|
93 |
+
"grad_norm": 0.03503725305199623,
|
94 |
"learning_rate": 0.00018,
|
95 |
+
"loss": 10.3829,
|
96 |
"step": 9
|
97 |
},
|
98 |
{
|
99 |
"epoch": 0.049315068493150684,
|
100 |
+
"eval_loss": 10.376622200012207,
|
101 |
+
"eval_runtime": 0.3928,
|
102 |
+
"eval_samples_per_second": 99.297,
|
103 |
+
"eval_steps_per_second": 99.297,
|
104 |
"step": 9
|
105 |
},
|
106 |
{
|
107 |
"epoch": 0.0547945205479452,
|
108 |
+
"grad_norm": 0.03340437635779381,
|
109 |
"learning_rate": 0.0002,
|
110 |
"loss": 10.3759,
|
111 |
"step": 10
|
last-checkpoint/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 6776
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f9e7b71b1ce7772469f28700f63d8a31b72c32cf43950aa8f0fc2a8925ca4941
|
3 |
size 6776
|