AmberYifan
commited on
Model save
Browse files- all_results.json +4 -4
- train_results.json +4 -4
- trainer_state.json +63 -63
all_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 0.992,
|
3 |
"total_flos": 0.0,
|
4 |
-
"train_loss": 0.
|
5 |
-
"train_runtime":
|
6 |
"train_samples": 2000,
|
7 |
-
"train_samples_per_second": 2.
|
8 |
-
"train_steps_per_second": 0.
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 0.992,
|
3 |
"total_flos": 0.0,
|
4 |
+
"train_loss": 0.8421394844208995,
|
5 |
+
"train_runtime": 736.5998,
|
6 |
"train_samples": 2000,
|
7 |
+
"train_samples_per_second": 2.715,
|
8 |
+
"train_steps_per_second": 0.084
|
9 |
}
|
train_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 0.992,
|
3 |
"total_flos": 0.0,
|
4 |
-
"train_loss": 0.
|
5 |
-
"train_runtime":
|
6 |
"train_samples": 2000,
|
7 |
-
"train_samples_per_second": 2.
|
8 |
-
"train_steps_per_second": 0.
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 0.992,
|
3 |
"total_flos": 0.0,
|
4 |
+
"train_loss": 0.8421394844208995,
|
5 |
+
"train_runtime": 736.5998,
|
6 |
"train_samples": 2000,
|
7 |
+
"train_samples_per_second": 2.715,
|
8 |
+
"train_steps_per_second": 0.084
|
9 |
}
|
trainer_state.json
CHANGED
@@ -10,7 +10,7 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.016,
|
13 |
-
"grad_norm": 22.
|
14 |
"learning_rate": 7.142857142857142e-08,
|
15 |
"logits/generated": -0.9303410053253174,
|
16 |
"logits/real": -0.6472625732421875,
|
@@ -25,102 +25,102 @@
|
|
25 |
},
|
26 |
{
|
27 |
"epoch": 0.16,
|
28 |
-
"grad_norm": 19.
|
29 |
"learning_rate": 4.727272727272727e-07,
|
30 |
-
"logits/generated": -0.
|
31 |
-
"logits/real": -0.
|
32 |
-
"logps/generated": -174.
|
33 |
-
"logps/real": -182.
|
34 |
-
"loss": 0.
|
35 |
-
"rewards/accuracies": 0.
|
36 |
-
"rewards/generated": 0.
|
37 |
-
"rewards/margins": 0.
|
38 |
-
"rewards/real": 0.
|
39 |
"step": 10
|
40 |
},
|
41 |
{
|
42 |
"epoch": 0.32,
|
43 |
-
"grad_norm": 20.
|
44 |
"learning_rate": 3.818181818181818e-07,
|
45 |
-
"logits/generated": -0.
|
46 |
-
"logits/real": -0.
|
47 |
-
"logps/generated": -163.
|
48 |
-
"logps/real": -164.
|
49 |
-
"loss": 0.
|
50 |
-
"rewards/accuracies": 0.
|
51 |
-
"rewards/generated": 0.
|
52 |
-
"rewards/margins": -0.
|
53 |
-
"rewards/real": 0.
|
54 |
"step": 20
|
55 |
},
|
56 |
{
|
57 |
"epoch": 0.48,
|
58 |
-
"grad_norm": 19.
|
59 |
"learning_rate": 2.909090909090909e-07,
|
60 |
-
"logits/generated": -0.
|
61 |
-
"logits/real": -0.
|
62 |
-
"logps/generated": -
|
63 |
-
"logps/real": -154.
|
64 |
-
"loss": 0.
|
65 |
-
"rewards/accuracies": 0.
|
66 |
-
"rewards/generated": 0.
|
67 |
-
"rewards/margins": 0.
|
68 |
-
"rewards/real": 0.
|
69 |
"step": 30
|
70 |
},
|
71 |
{
|
72 |
"epoch": 0.64,
|
73 |
-
"grad_norm":
|
74 |
"learning_rate": 2e-07,
|
75 |
-
"logits/generated": -0.
|
76 |
-
"logits/real": -0.
|
77 |
-
"logps/generated": -165.
|
78 |
-
"logps/real": -177.
|
79 |
-
"loss": 0.
|
80 |
-
"rewards/accuracies": 0.
|
81 |
-
"rewards/generated": 0.
|
82 |
-
"rewards/margins": 0.
|
83 |
-
"rewards/real": 0.
|
84 |
"step": 40
|
85 |
},
|
86 |
{
|
87 |
"epoch": 0.8,
|
88 |
-
"grad_norm": 20.
|
89 |
"learning_rate": 1.0909090909090908e-07,
|
90 |
-
"logits/generated": -0.
|
91 |
-
"logits/real": -0.
|
92 |
-
"logps/generated": -163.
|
93 |
-
"logps/real": -180.
|
94 |
"loss": 0.8413,
|
95 |
-
"rewards/accuracies": 0.
|
96 |
-
"rewards/generated": 1.
|
97 |
-
"rewards/margins": 0.
|
98 |
-
"rewards/real": 1.
|
99 |
"step": 50
|
100 |
},
|
101 |
{
|
102 |
"epoch": 0.96,
|
103 |
-
"grad_norm": 20.
|
104 |
"learning_rate": 1.818181818181818e-08,
|
105 |
-
"logits/generated": -0.
|
106 |
-
"logits/real": -0.
|
107 |
-
"logps/generated": -169.
|
108 |
-
"logps/real": -
|
109 |
-
"loss": 0.
|
110 |
"rewards/accuracies": 0.5625,
|
111 |
-
"rewards/generated": 1.
|
112 |
-
"rewards/margins": 0.
|
113 |
-
"rewards/real": 1.
|
114 |
"step": 60
|
115 |
},
|
116 |
{
|
117 |
"epoch": 0.992,
|
118 |
"step": 62,
|
119 |
"total_flos": 0.0,
|
120 |
-
"train_loss": 0.
|
121 |
-
"train_runtime":
|
122 |
-
"train_samples_per_second": 2.
|
123 |
-
"train_steps_per_second": 0.
|
124 |
}
|
125 |
],
|
126 |
"logging_steps": 10,
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.016,
|
13 |
+
"grad_norm": 22.116731061155882,
|
14 |
"learning_rate": 7.142857142857142e-08,
|
15 |
"logits/generated": -0.9303410053253174,
|
16 |
"logits/real": -0.6472625732421875,
|
|
|
25 |
},
|
26 |
{
|
27 |
"epoch": 0.16,
|
28 |
+
"grad_norm": 19.00686173642431,
|
29 |
"learning_rate": 4.727272727272727e-07,
|
30 |
+
"logits/generated": -0.7783747315406799,
|
31 |
+
"logits/real": -0.7638665437698364,
|
32 |
+
"logps/generated": -174.0995330810547,
|
33 |
+
"logps/real": -182.80723571777344,
|
34 |
+
"loss": 0.8563,
|
35 |
+
"rewards/accuracies": 0.5277777910232544,
|
36 |
+
"rewards/generated": 0.09232226759195328,
|
37 |
+
"rewards/margins": 0.01775742694735527,
|
38 |
+
"rewards/real": 0.11007969081401825,
|
39 |
"step": 10
|
40 |
},
|
41 |
{
|
42 |
"epoch": 0.32,
|
43 |
+
"grad_norm": 20.37082961992304,
|
44 |
"learning_rate": 3.818181818181818e-07,
|
45 |
+
"logits/generated": -0.7393258213996887,
|
46 |
+
"logits/real": -0.7510851621627808,
|
47 |
+
"logps/generated": -163.16494750976562,
|
48 |
+
"logps/real": -164.861572265625,
|
49 |
+
"loss": 0.8527,
|
50 |
+
"rewards/accuracies": 0.48750001192092896,
|
51 |
+
"rewards/generated": 0.45944079756736755,
|
52 |
+
"rewards/margins": -0.011271441355347633,
|
53 |
+
"rewards/real": 0.44816938042640686,
|
54 |
"step": 20
|
55 |
},
|
56 |
{
|
57 |
"epoch": 0.48,
|
58 |
+
"grad_norm": 19.828947810570654,
|
59 |
"learning_rate": 2.909090909090909e-07,
|
60 |
+
"logits/generated": -0.8125241994857788,
|
61 |
+
"logits/real": -0.6840296983718872,
|
62 |
+
"logps/generated": -145.97573852539062,
|
63 |
+
"logps/real": -154.54061889648438,
|
64 |
+
"loss": 0.8316,
|
65 |
+
"rewards/accuracies": 0.625,
|
66 |
+
"rewards/generated": 0.6635308265686035,
|
67 |
+
"rewards/margins": 0.1064867228269577,
|
68 |
+
"rewards/real": 0.7700175046920776,
|
69 |
"step": 30
|
70 |
},
|
71 |
{
|
72 |
"epoch": 0.64,
|
73 |
+
"grad_norm": 20.449526730384434,
|
74 |
"learning_rate": 2e-07,
|
75 |
+
"logits/generated": -0.7401232719421387,
|
76 |
+
"logits/real": -0.6730000972747803,
|
77 |
+
"logps/generated": -165.48190307617188,
|
78 |
+
"logps/real": -177.96424865722656,
|
79 |
+
"loss": 0.8405,
|
80 |
+
"rewards/accuracies": 0.5625,
|
81 |
+
"rewards/generated": 0.9031688570976257,
|
82 |
+
"rewards/margins": 0.03490729257464409,
|
83 |
+
"rewards/real": 0.9380761384963989,
|
84 |
"step": 40
|
85 |
},
|
86 |
{
|
87 |
"epoch": 0.8,
|
88 |
+
"grad_norm": 20.801102601242686,
|
89 |
"learning_rate": 1.0909090909090908e-07,
|
90 |
+
"logits/generated": -0.7878767251968384,
|
91 |
+
"logits/real": -0.6102726459503174,
|
92 |
+
"logps/generated": -163.09925842285156,
|
93 |
+
"logps/real": -180.8894500732422,
|
94 |
"loss": 0.8413,
|
95 |
+
"rewards/accuracies": 0.5625,
|
96 |
+
"rewards/generated": 1.0153449773788452,
|
97 |
+
"rewards/margins": 0.08370596170425415,
|
98 |
+
"rewards/real": 1.0990509986877441,
|
99 |
"step": 50
|
100 |
},
|
101 |
{
|
102 |
"epoch": 0.96,
|
103 |
+
"grad_norm": 20.460813305727022,
|
104 |
"learning_rate": 1.818181818181818e-08,
|
105 |
+
"logits/generated": -0.652385413646698,
|
106 |
+
"logits/real": -0.5340145230293274,
|
107 |
+
"logps/generated": -169.20877075195312,
|
108 |
+
"logps/real": -176.0380859375,
|
109 |
+
"loss": 0.8323,
|
110 |
"rewards/accuracies": 0.5625,
|
111 |
+
"rewards/generated": 1.1103785037994385,
|
112 |
+
"rewards/margins": 0.07993375509977341,
|
113 |
+
"rewards/real": 1.190312385559082,
|
114 |
"step": 60
|
115 |
},
|
116 |
{
|
117 |
"epoch": 0.992,
|
118 |
"step": 62,
|
119 |
"total_flos": 0.0,
|
120 |
+
"train_loss": 0.8421394844208995,
|
121 |
+
"train_runtime": 736.5998,
|
122 |
+
"train_samples_per_second": 2.715,
|
123 |
+
"train_steps_per_second": 0.084
|
124 |
}
|
125 |
],
|
126 |
"logging_steps": 10,
|