arynkiewicz commited on
Commit
94af9e4
·
verified ·
1 Parent(s): d093f64

Model save

Browse files
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ tags:
5
+ - trl
6
+ - orpo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: anydef-orpo-v2
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # anydef-orpo-v2
17
+
18
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on an unknown dataset.
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 5e-06
38
+ - train_batch_size: 8
39
+ - eval_batch_size: 8
40
+ - seed: 42
41
+ - distributed_type: multi-GPU
42
+ - num_devices: 8
43
+ - total_train_batch_size: 64
44
+ - total_eval_batch_size: 64
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: inverse_sqrt
47
+ - lr_scheduler_warmup_steps: 100
48
+ - num_epochs: 3
49
+
50
+ ### Training results
51
+
52
+
53
+
54
+ ### Framework versions
55
+
56
+ - Transformers 4.43.3
57
+ - Pytorch 2.1.2+cu121
58
+ - Datasets 2.18.0
59
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.16268803322004982,
5
+ "train_runtime": 7294.4356,
6
+ "train_samples": 19245,
7
+ "train_samples_per_second": 7.915,
8
+ "train_steps_per_second": 0.124
9
+ }
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 14336,
14
+ "max_position_embeddings": 32768,
15
+ "model_type": "mistral",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 32,
18
+ "num_key_value_heads": 8,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_theta": 10000.0,
21
+ "sliding_window": 4096,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.43.3",
25
+ "use_cache": false,
26
+ "vocab_size": 32000
27
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.43.3"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c722f3309f8baa50abc2ce1353973ac34246fb78946d3bba22313639b2799b8
3
+ size 4943162336
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:810a00e16834e2c924831fe991d8d995826f401f0e8a27138be4551ec26ca349
3
+ size 4999819336
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a78bd4b5861d69f2ffc1646024509e603fe53ba6f01603f0d3015f5b7fa6f119
3
+ size 4540516344
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
runs/Aug20_08-37-28_hgx1/events.out.tfevents.1724136396.hgx1.154369.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d7cee71c80e7a5a96510e02ff8fee5d41ae0c0880b6f89b7295abb5e5820a83
3
+ size 82866
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<s>",
33
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
34
+ "clean_up_tokenization_spaces": false,
35
+ "eos_token": "</s>",
36
+ "legacy": false,
37
+ "model_max_length": 2048,
38
+ "pad_token": "</s>",
39
+ "sp_model_kwargs": {},
40
+ "spaces_between_special_tokens": false,
41
+ "tokenizer_class": "LlamaTokenizer",
42
+ "unk_token": "<unk>",
43
+ "use_default_system_prompt": false
44
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.16268803322004982,
5
+ "train_runtime": 7294.4356,
6
+ "train_samples": 19245,
7
+ "train_samples_per_second": 7.915,
8
+ "train_steps_per_second": 0.124
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,1662 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 903,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03322259136212625,
13
+ "grad_norm": 516.0,
14
+ "learning_rate": 5.000000000000001e-07,
15
+ "log_odds_chosen": 0.16222620010375977,
16
+ "log_odds_ratio": -0.6306548118591309,
17
+ "logits/chosen": -2.278585433959961,
18
+ "logits/rejected": -2.279832124710083,
19
+ "logps/chosen": -2.096900463104248,
20
+ "logps/rejected": -2.239978551864624,
21
+ "loss": 4.4301,
22
+ "nll_loss": 4.511023998260498,
23
+ "rewards/accuracies": 0.699999988079071,
24
+ "rewards/chosen": -0.10484502464532852,
25
+ "rewards/margins": 0.007153891958296299,
26
+ "rewards/rejected": -0.11199891567230225,
27
+ "step": 10
28
+ },
29
+ {
30
+ "epoch": 0.0664451827242525,
31
+ "grad_norm": 51.25,
32
+ "learning_rate": 1.0000000000000002e-06,
33
+ "log_odds_chosen": 0.2909570336341858,
34
+ "log_odds_ratio": -0.5764315724372864,
35
+ "logits/chosen": -2.9006943702697754,
36
+ "logits/rejected": -2.899392604827881,
37
+ "logps/chosen": -1.8797166347503662,
38
+ "logps/rejected": -2.132899045944214,
39
+ "loss": 2.0537,
40
+ "nll_loss": 1.9548499584197998,
41
+ "rewards/accuracies": 0.8125,
42
+ "rewards/chosen": -0.09398583322763443,
43
+ "rewards/margins": 0.012659117579460144,
44
+ "rewards/rejected": -0.10664495080709457,
45
+ "step": 20
46
+ },
47
+ {
48
+ "epoch": 0.09966777408637874,
49
+ "grad_norm": 21.5,
50
+ "learning_rate": 1.5e-06,
51
+ "log_odds_chosen": 0.4692462384700775,
52
+ "log_odds_ratio": -0.510283350944519,
53
+ "logits/chosen": -2.651613235473633,
54
+ "logits/rejected": -2.651846170425415,
55
+ "logps/chosen": -1.1686553955078125,
56
+ "logps/rejected": -1.4823462963104248,
57
+ "loss": 0.8489,
58
+ "nll_loss": 0.7852751612663269,
59
+ "rewards/accuracies": 0.875,
60
+ "rewards/chosen": -0.058432769030332565,
61
+ "rewards/margins": 0.015684548765420914,
62
+ "rewards/rejected": -0.07411732524633408,
63
+ "step": 30
64
+ },
65
+ {
66
+ "epoch": 0.132890365448505,
67
+ "grad_norm": 5.4375,
68
+ "learning_rate": 2.0000000000000003e-06,
69
+ "log_odds_chosen": 1.7947794198989868,
70
+ "log_odds_ratio": -0.26858600974082947,
71
+ "logits/chosen": -2.2309794425964355,
72
+ "logits/rejected": -2.229917526245117,
73
+ "logps/chosen": -0.3180321156978607,
74
+ "logps/rejected": -1.0096584558486938,
75
+ "loss": 0.682,
76
+ "nll_loss": 0.6571449041366577,
77
+ "rewards/accuracies": 0.887499988079071,
78
+ "rewards/chosen": -0.015901606529951096,
79
+ "rewards/margins": 0.03458131104707718,
80
+ "rewards/rejected": -0.05048292130231857,
81
+ "step": 40
82
+ },
83
+ {
84
+ "epoch": 0.16611295681063123,
85
+ "grad_norm": 3.921875,
86
+ "learning_rate": 2.5e-06,
87
+ "log_odds_chosen": 3.033090114593506,
88
+ "log_odds_ratio": -0.14658799767494202,
89
+ "logits/chosen": -2.118901014328003,
90
+ "logits/rejected": -2.1173033714294434,
91
+ "logps/chosen": -0.25798267126083374,
92
+ "logps/rejected": -1.627856969833374,
93
+ "loss": 0.6248,
94
+ "nll_loss": 0.600904107093811,
95
+ "rewards/accuracies": 0.9624999761581421,
96
+ "rewards/chosen": -0.012899133376777172,
97
+ "rewards/margins": 0.06849371641874313,
98
+ "rewards/rejected": -0.08139285445213318,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.19933554817275748,
103
+ "grad_norm": 6.03125,
104
+ "learning_rate": 3e-06,
105
+ "log_odds_chosen": 3.421691417694092,
106
+ "log_odds_ratio": -0.14014041423797607,
107
+ "logits/chosen": -2.077253580093384,
108
+ "logits/rejected": -2.0778613090515137,
109
+ "logps/chosen": -0.22530755400657654,
110
+ "logps/rejected": -1.8401321172714233,
111
+ "loss": 0.6023,
112
+ "nll_loss": 0.6070769429206848,
113
+ "rewards/accuracies": 0.9624999761581421,
114
+ "rewards/chosen": -0.011265376582741737,
115
+ "rewards/margins": 0.08074121922254562,
116
+ "rewards/rejected": -0.0920066088438034,
117
+ "step": 60
118
+ },
119
+ {
120
+ "epoch": 0.23255813953488372,
121
+ "grad_norm": 4.40625,
122
+ "learning_rate": 3.5e-06,
123
+ "log_odds_chosen": 4.030662536621094,
124
+ "log_odds_ratio": -0.07589299231767654,
125
+ "logits/chosen": -2.0758180618286133,
126
+ "logits/rejected": -2.0744235515594482,
127
+ "logps/chosen": -0.2279697209596634,
128
+ "logps/rejected": -2.004603385925293,
129
+ "loss": 0.5389,
130
+ "nll_loss": 0.534622073173523,
131
+ "rewards/accuracies": 0.987500011920929,
132
+ "rewards/chosen": -0.011398485861718655,
133
+ "rewards/margins": 0.08883167803287506,
134
+ "rewards/rejected": -0.10023017227649689,
135
+ "step": 70
136
+ },
137
+ {
138
+ "epoch": 0.26578073089701,
139
+ "grad_norm": 3.609375,
140
+ "learning_rate": 4.000000000000001e-06,
141
+ "log_odds_chosen": 4.552683353424072,
142
+ "log_odds_ratio": -0.056131958961486816,
143
+ "logits/chosen": -1.9744676351547241,
144
+ "logits/rejected": -1.9743177890777588,
145
+ "logps/chosen": -0.18155953288078308,
146
+ "logps/rejected": -2.2489826679229736,
147
+ "loss": 0.5203,
148
+ "nll_loss": 0.4936625063419342,
149
+ "rewards/accuracies": 0.987500011920929,
150
+ "rewards/chosen": -0.009077977389097214,
151
+ "rewards/margins": 0.10337115824222565,
152
+ "rewards/rejected": -0.11244914680719376,
153
+ "step": 80
154
+ },
155
+ {
156
+ "epoch": 0.29900332225913623,
157
+ "grad_norm": 4.09375,
158
+ "learning_rate": 4.5e-06,
159
+ "log_odds_chosen": 4.036518096923828,
160
+ "log_odds_ratio": -0.11566118150949478,
161
+ "logits/chosen": -2.0107204914093018,
162
+ "logits/rejected": -2.009970188140869,
163
+ "logps/chosen": -0.2278076857328415,
164
+ "logps/rejected": -2.3444762229919434,
165
+ "loss": 0.4353,
166
+ "nll_loss": 0.42819660902023315,
167
+ "rewards/accuracies": 0.9624999761581421,
168
+ "rewards/chosen": -0.01139038521796465,
169
+ "rewards/margins": 0.10583342611789703,
170
+ "rewards/rejected": -0.11722382158041,
171
+ "step": 90
172
+ },
173
+ {
174
+ "epoch": 0.33222591362126247,
175
+ "grad_norm": 5.59375,
176
+ "learning_rate": 5e-06,
177
+ "log_odds_chosen": 5.044631004333496,
178
+ "log_odds_ratio": -0.04541964456439018,
179
+ "logits/chosen": -1.9699828624725342,
180
+ "logits/rejected": -1.973362922668457,
181
+ "logps/chosen": -0.17292837798595428,
182
+ "logps/rejected": -2.476628065109253,
183
+ "loss": 0.378,
184
+ "nll_loss": 0.4115411639213562,
185
+ "rewards/accuracies": 1.0,
186
+ "rewards/chosen": -0.008646419271826744,
187
+ "rewards/margins": 0.11518500000238419,
188
+ "rewards/rejected": -0.12383142858743668,
189
+ "step": 100
190
+ },
191
+ {
192
+ "epoch": 0.3654485049833887,
193
+ "grad_norm": 5.15625,
194
+ "learning_rate": 4.767312946227961e-06,
195
+ "log_odds_chosen": 5.11702823638916,
196
+ "log_odds_ratio": -0.06080981343984604,
197
+ "logits/chosen": -1.883387565612793,
198
+ "logits/rejected": -1.8867937326431274,
199
+ "logps/chosen": -0.2399381399154663,
200
+ "logps/rejected": -2.785667896270752,
201
+ "loss": 0.3143,
202
+ "nll_loss": 0.25671663880348206,
203
+ "rewards/accuracies": 0.987500011920929,
204
+ "rewards/chosen": -0.011996905319392681,
205
+ "rewards/margins": 0.12728647887706757,
206
+ "rewards/rejected": -0.13928338885307312,
207
+ "step": 110
208
+ },
209
+ {
210
+ "epoch": 0.39867109634551495,
211
+ "grad_norm": 5.0625,
212
+ "learning_rate": 4.564354645876385e-06,
213
+ "log_odds_chosen": 5.193495273590088,
214
+ "log_odds_ratio": -0.06189825385808945,
215
+ "logits/chosen": -2.042405605316162,
216
+ "logits/rejected": -2.0465760231018066,
217
+ "logps/chosen": -0.1781654804944992,
218
+ "logps/rejected": -2.7364754676818848,
219
+ "loss": 0.2883,
220
+ "nll_loss": 0.31883668899536133,
221
+ "rewards/accuracies": 0.9750000238418579,
222
+ "rewards/chosen": -0.008908274583518505,
223
+ "rewards/margins": 0.12791548669338226,
224
+ "rewards/rejected": -0.13682377338409424,
225
+ "step": 120
226
+ },
227
+ {
228
+ "epoch": 0.4318936877076412,
229
+ "grad_norm": 6.40625,
230
+ "learning_rate": 4.385290096535147e-06,
231
+ "log_odds_chosen": 5.065199375152588,
232
+ "log_odds_ratio": -0.08915611356496811,
233
+ "logits/chosen": -1.965550184249878,
234
+ "logits/rejected": -1.9655630588531494,
235
+ "logps/chosen": -0.2024417221546173,
236
+ "logps/rejected": -2.5124077796936035,
237
+ "loss": 0.2346,
238
+ "nll_loss": 0.2726798355579376,
239
+ "rewards/accuracies": 0.987500011920929,
240
+ "rewards/chosen": -0.01012208592146635,
241
+ "rewards/margins": 0.11549830436706543,
242
+ "rewards/rejected": -0.12562039494514465,
243
+ "step": 130
244
+ },
245
+ {
246
+ "epoch": 0.46511627906976744,
247
+ "grad_norm": 5.65625,
248
+ "learning_rate": 4.2257712736425835e-06,
249
+ "log_odds_chosen": 6.224053859710693,
250
+ "log_odds_ratio": -0.03768063336610794,
251
+ "logits/chosen": -1.9541261196136475,
252
+ "logits/rejected": -1.9546234607696533,
253
+ "logps/chosen": -0.18753428757190704,
254
+ "logps/rejected": -3.1887362003326416,
255
+ "loss": 0.2075,
256
+ "nll_loss": 0.1721208095550537,
257
+ "rewards/accuracies": 1.0,
258
+ "rewards/chosen": -0.009376714006066322,
259
+ "rewards/margins": 0.15006008744239807,
260
+ "rewards/rejected": -0.15943679213523865,
261
+ "step": 140
262
+ },
263
+ {
264
+ "epoch": 0.4983388704318937,
265
+ "grad_norm": 5.53125,
266
+ "learning_rate": 4.082482904638631e-06,
267
+ "log_odds_chosen": 5.947408199310303,
268
+ "log_odds_ratio": -0.03485158830881119,
269
+ "logits/chosen": -1.928086280822754,
270
+ "logits/rejected": -1.9305970668792725,
271
+ "logps/chosen": -0.16427160799503326,
272
+ "logps/rejected": -3.250744581222534,
273
+ "loss": 0.1935,
274
+ "nll_loss": 0.1781485378742218,
275
+ "rewards/accuracies": 1.0,
276
+ "rewards/chosen": -0.008213580586016178,
277
+ "rewards/margins": 0.15432362258434296,
278
+ "rewards/rejected": -0.16253721714019775,
279
+ "step": 150
280
+ },
281
+ {
282
+ "epoch": 0.53156146179402,
283
+ "grad_norm": 5.0625,
284
+ "learning_rate": 3.952847075210474e-06,
285
+ "log_odds_chosen": 5.442956924438477,
286
+ "log_odds_ratio": -0.0749388113617897,
287
+ "logits/chosen": -2.025440216064453,
288
+ "logits/rejected": -2.0270285606384277,
289
+ "logps/chosen": -0.18180342018604279,
290
+ "logps/rejected": -2.965075731277466,
291
+ "loss": 0.1607,
292
+ "nll_loss": 0.18244585394859314,
293
+ "rewards/accuracies": 0.9750000238418579,
294
+ "rewards/chosen": -0.00909017026424408,
295
+ "rewards/margins": 0.13916362822055817,
296
+ "rewards/rejected": -0.14825379848480225,
297
+ "step": 160
298
+ },
299
+ {
300
+ "epoch": 0.5647840531561462,
301
+ "grad_norm": 4.8125,
302
+ "learning_rate": 3.834824944236852e-06,
303
+ "log_odds_chosen": 6.501151084899902,
304
+ "log_odds_ratio": -0.04770870879292488,
305
+ "logits/chosen": -1.9193477630615234,
306
+ "logits/rejected": -1.9223600625991821,
307
+ "logps/chosen": -0.16726627945899963,
308
+ "logps/rejected": -3.4573769569396973,
309
+ "loss": 0.1339,
310
+ "nll_loss": 0.16945432126522064,
311
+ "rewards/accuracies": 0.987500011920929,
312
+ "rewards/chosen": -0.008363312110304832,
313
+ "rewards/margins": 0.16450552642345428,
314
+ "rewards/rejected": -0.17286884784698486,
315
+ "step": 170
316
+ },
317
+ {
318
+ "epoch": 0.5980066445182725,
319
+ "grad_norm": 4.8125,
320
+ "learning_rate": 3.72677996249965e-06,
321
+ "log_odds_chosen": 6.667401313781738,
322
+ "log_odds_ratio": -0.049022819846868515,
323
+ "logits/chosen": -1.849381685256958,
324
+ "logits/rejected": -1.8515217304229736,
325
+ "logps/chosen": -0.135690376162529,
326
+ "logps/rejected": -3.4136643409729004,
327
+ "loss": 0.12,
328
+ "nll_loss": 0.0964335948228836,
329
+ "rewards/accuracies": 0.987500011920929,
330
+ "rewards/chosen": -0.006784519646316767,
331
+ "rewards/margins": 0.16389869153499603,
332
+ "rewards/rejected": -0.17068320512771606,
333
+ "step": 180
334
+ },
335
+ {
336
+ "epoch": 0.6312292358803987,
337
+ "grad_norm": 4.0625,
338
+ "learning_rate": 3.6273812505500587e-06,
339
+ "log_odds_chosen": 6.884723663330078,
340
+ "log_odds_ratio": -0.028747648000717163,
341
+ "logits/chosen": -1.9872407913208008,
342
+ "logits/rejected": -1.9903675317764282,
343
+ "logps/chosen": -0.1553266942501068,
344
+ "logps/rejected": -3.7033779621124268,
345
+ "loss": 0.1064,
346
+ "nll_loss": 0.1674821376800537,
347
+ "rewards/accuracies": 1.0,
348
+ "rewards/chosen": -0.007766333874315023,
349
+ "rewards/margins": 0.1774025708436966,
350
+ "rewards/rejected": -0.18516890704631805,
351
+ "step": 190
352
+ },
353
+ {
354
+ "epoch": 0.6644518272425249,
355
+ "grad_norm": 4.40625,
356
+ "learning_rate": 3.5355339059327378e-06,
357
+ "log_odds_chosen": 6.214459419250488,
358
+ "log_odds_ratio": -0.04790915921330452,
359
+ "logits/chosen": -1.8185112476348877,
360
+ "logits/rejected": -1.820067048072815,
361
+ "logps/chosen": -0.15120986104011536,
362
+ "logps/rejected": -3.4781315326690674,
363
+ "loss": 0.0925,
364
+ "nll_loss": 0.06244741007685661,
365
+ "rewards/accuracies": 0.987500011920929,
366
+ "rewards/chosen": -0.007560492493212223,
367
+ "rewards/margins": 0.16634607315063477,
368
+ "rewards/rejected": -0.17390656471252441,
369
+ "step": 200
370
+ },
371
+ {
372
+ "epoch": 0.6976744186046512,
373
+ "grad_norm": 5.46875,
374
+ "learning_rate": 3.450327796711771e-06,
375
+ "log_odds_chosen": 7.0073442459106445,
376
+ "log_odds_ratio": -0.021652357652783394,
377
+ "logits/chosen": -1.8007183074951172,
378
+ "logits/rejected": -1.8030471801757812,
379
+ "logps/chosen": -0.15487684309482574,
380
+ "logps/rejected": -3.859619140625,
381
+ "loss": 0.0897,
382
+ "nll_loss": 0.07238463312387466,
383
+ "rewards/accuracies": 1.0,
384
+ "rewards/chosen": -0.007743841968476772,
385
+ "rewards/margins": 0.1852371096611023,
386
+ "rewards/rejected": -0.19298096001148224,
387
+ "step": 210
388
+ },
389
+ {
390
+ "epoch": 0.7308970099667774,
391
+ "grad_norm": 3.484375,
392
+ "learning_rate": 3.3709993123162106e-06,
393
+ "log_odds_chosen": 6.155528545379639,
394
+ "log_odds_ratio": -0.03304092958569527,
395
+ "logits/chosen": -1.827543020248413,
396
+ "logits/rejected": -1.8281749486923218,
397
+ "logps/chosen": -0.13896045088768005,
398
+ "logps/rejected": -3.173088312149048,
399
+ "loss": 0.0762,
400
+ "nll_loss": 0.08422436565160751,
401
+ "rewards/accuracies": 1.0,
402
+ "rewards/chosen": -0.0069480217061936855,
403
+ "rewards/margins": 0.15170639753341675,
404
+ "rewards/rejected": -0.15865442156791687,
405
+ "step": 220
406
+ },
407
+ {
408
+ "epoch": 0.7641196013289037,
409
+ "grad_norm": 5.03125,
410
+ "learning_rate": 3.296902366978936e-06,
411
+ "log_odds_chosen": 7.247349739074707,
412
+ "log_odds_ratio": -0.028882017359137535,
413
+ "logits/chosen": -1.8549703359603882,
414
+ "logits/rejected": -1.854103446006775,
415
+ "logps/chosen": -0.13769736886024475,
416
+ "logps/rejected": -3.765294313430786,
417
+ "loss": 0.0634,
418
+ "nll_loss": 0.058101166039705276,
419
+ "rewards/accuracies": 0.987500011920929,
420
+ "rewards/chosen": -0.006884869188070297,
421
+ "rewards/margins": 0.18137982487678528,
422
+ "rewards/rejected": -0.18826469779014587,
423
+ "step": 230
424
+ },
425
+ {
426
+ "epoch": 0.7973421926910299,
427
+ "grad_norm": 3.875,
428
+ "learning_rate": 3.2274861218395142e-06,
429
+ "log_odds_chosen": 7.331563472747803,
430
+ "log_odds_ratio": -0.05921437218785286,
431
+ "logits/chosen": -1.9100215435028076,
432
+ "logits/rejected": -1.9127085208892822,
433
+ "logps/chosen": -0.12960004806518555,
434
+ "logps/rejected": -3.6168124675750732,
435
+ "loss": 0.0571,
436
+ "nll_loss": 0.05991581082344055,
437
+ "rewards/accuracies": 0.9624999761581421,
438
+ "rewards/chosen": -0.00648000231012702,
439
+ "rewards/margins": 0.17436063289642334,
440
+ "rewards/rejected": -0.1808406263589859,
441
+ "step": 240
442
+ },
443
+ {
444
+ "epoch": 0.8305647840531561,
445
+ "grad_norm": 3.53125,
446
+ "learning_rate": 3.1622776601683796e-06,
447
+ "log_odds_chosen": 7.729872703552246,
448
+ "log_odds_ratio": -0.01583888754248619,
449
+ "logits/chosen": -1.9329345226287842,
450
+ "logits/rejected": -1.9311659336090088,
451
+ "logps/chosen": -0.16400082409381866,
452
+ "logps/rejected": -4.614955902099609,
453
+ "loss": 0.0623,
454
+ "nll_loss": 0.045454978942871094,
455
+ "rewards/accuracies": 1.0,
456
+ "rewards/chosen": -0.008200041949748993,
457
+ "rewards/margins": 0.2225477695465088,
458
+ "rewards/rejected": -0.230747789144516,
459
+ "step": 250
460
+ },
461
+ {
462
+ "epoch": 0.8637873754152824,
463
+ "grad_norm": 3.15625,
464
+ "learning_rate": 3.1008683647302113e-06,
465
+ "log_odds_chosen": 7.562008857727051,
466
+ "log_odds_ratio": -0.06357506662607193,
467
+ "logits/chosen": -1.8379156589508057,
468
+ "logits/rejected": -1.8368641138076782,
469
+ "logps/chosen": -0.15229454636573792,
470
+ "logps/rejected": -4.107020854949951,
471
+ "loss": 0.0485,
472
+ "nll_loss": 0.033093374222517014,
473
+ "rewards/accuracies": 0.9750000238418579,
474
+ "rewards/chosen": -0.0076147266663610935,
475
+ "rewards/margins": 0.19773633778095245,
476
+ "rewards/rejected": -0.2053510695695877,
477
+ "step": 260
478
+ },
479
+ {
480
+ "epoch": 0.8970099667774086,
481
+ "grad_norm": 3.15625,
482
+ "learning_rate": 3.0429030972509227e-06,
483
+ "log_odds_chosen": 7.212728023529053,
484
+ "log_odds_ratio": -0.07752545177936554,
485
+ "logits/chosen": -1.7939443588256836,
486
+ "logits/rejected": -1.7951726913452148,
487
+ "logps/chosen": -0.16109412908554077,
488
+ "logps/rejected": -3.8484885692596436,
489
+ "loss": 0.0557,
490
+ "nll_loss": 0.05208224803209305,
491
+ "rewards/accuracies": 0.9624999761581421,
492
+ "rewards/chosen": -0.008054706268012524,
493
+ "rewards/margins": 0.18436971306800842,
494
+ "rewards/rejected": -0.19242441654205322,
495
+ "step": 270
496
+ },
497
+ {
498
+ "epoch": 0.9302325581395349,
499
+ "grad_norm": 9.25,
500
+ "learning_rate": 2.988071523335984e-06,
501
+ "log_odds_chosen": 7.317690372467041,
502
+ "log_odds_ratio": -0.03297095373272896,
503
+ "logits/chosen": -1.7949488162994385,
504
+ "logits/rejected": -1.7990652322769165,
505
+ "logps/chosen": -0.13610824942588806,
506
+ "logps/rejected": -3.9614219665527344,
507
+ "loss": 0.0511,
508
+ "nll_loss": 0.05410151928663254,
509
+ "rewards/accuracies": 0.987500011920929,
510
+ "rewards/chosen": -0.0068054115399718285,
511
+ "rewards/margins": 0.19126567244529724,
512
+ "rewards/rejected": -0.19807109236717224,
513
+ "step": 280
514
+ },
515
+ {
516
+ "epoch": 0.9634551495016611,
517
+ "grad_norm": 3.796875,
518
+ "learning_rate": 2.9361010975735177e-06,
519
+ "log_odds_chosen": 7.124808311462402,
520
+ "log_odds_ratio": -0.059395015239715576,
521
+ "logits/chosen": -1.7850925922393799,
522
+ "logits/rejected": -1.7865415811538696,
523
+ "logps/chosen": -0.12057201564311981,
524
+ "logps/rejected": -3.576596736907959,
525
+ "loss": 0.0428,
526
+ "nll_loss": 0.03806814178824425,
527
+ "rewards/accuracies": 0.9750000238418579,
528
+ "rewards/chosen": -0.0060286009684205055,
529
+ "rewards/margins": 0.17280122637748718,
530
+ "rewards/rejected": -0.1788298487663269,
531
+ "step": 290
532
+ },
533
+ {
534
+ "epoch": 0.9966777408637874,
535
+ "grad_norm": 3.65625,
536
+ "learning_rate": 2.8867513459481293e-06,
537
+ "log_odds_chosen": 8.637829780578613,
538
+ "log_odds_ratio": -0.03425002470612526,
539
+ "logits/chosen": -1.8364009857177734,
540
+ "logits/rejected": -1.8421306610107422,
541
+ "logps/chosen": -0.09547251462936401,
542
+ "logps/rejected": -4.495790481567383,
543
+ "loss": 0.0425,
544
+ "nll_loss": 0.04902663081884384,
545
+ "rewards/accuracies": 0.9750000238418579,
546
+ "rewards/chosen": -0.004773625638335943,
547
+ "rewards/margins": 0.22001588344573975,
548
+ "rewards/rejected": -0.22478953003883362,
549
+ "step": 300
550
+ },
551
+ {
552
+ "epoch": 1.0299003322259137,
553
+ "grad_norm": 2.8125,
554
+ "learning_rate": 2.839809171235324e-06,
555
+ "log_odds_chosen": 7.561132907867432,
556
+ "log_odds_ratio": -0.03642101213335991,
557
+ "logits/chosen": -1.7316315174102783,
558
+ "logits/rejected": -1.7338483333587646,
559
+ "logps/chosen": -0.0959225445985794,
560
+ "logps/rejected": -4.059569358825684,
561
+ "loss": 0.0292,
562
+ "nll_loss": 0.028634298592805862,
563
+ "rewards/accuracies": 0.987500011920929,
564
+ "rewards/chosen": -0.0047961268573999405,
565
+ "rewards/margins": 0.1981823742389679,
566
+ "rewards/rejected": -0.2029784917831421,
567
+ "step": 310
568
+ },
569
+ {
570
+ "epoch": 1.06312292358804,
571
+ "grad_norm": 2.53125,
572
+ "learning_rate": 2.7950849718747376e-06,
573
+ "log_odds_chosen": 9.18364429473877,
574
+ "log_odds_ratio": -0.014110135845839977,
575
+ "logits/chosen": -1.7149658203125,
576
+ "logits/rejected": -1.7151539325714111,
577
+ "logps/chosen": -0.12401266396045685,
578
+ "logps/rejected": -5.025017738342285,
579
+ "loss": 0.0279,
580
+ "nll_loss": 0.019647331908345222,
581
+ "rewards/accuracies": 1.0,
582
+ "rewards/chosen": -0.0062006330117583275,
583
+ "rewards/margins": 0.24505026638507843,
584
+ "rewards/rejected": -0.25125089287757874,
585
+ "step": 320
586
+ },
587
+ {
588
+ "epoch": 1.0963455149501662,
589
+ "grad_norm": 3.78125,
590
+ "learning_rate": 2.752409412815902e-06,
591
+ "log_odds_chosen": 8.793200492858887,
592
+ "log_odds_ratio": -0.009492707438766956,
593
+ "logits/chosen": -1.7698380947113037,
594
+ "logits/rejected": -1.770939588546753,
595
+ "logps/chosen": -0.11295183002948761,
596
+ "logps/rejected": -4.82761812210083,
597
+ "loss": 0.0332,
598
+ "nll_loss": 0.02587791346013546,
599
+ "rewards/accuracies": 1.0,
600
+ "rewards/chosen": -0.005647591315209866,
601
+ "rewards/margins": 0.23573331534862518,
602
+ "rewards/rejected": -0.24138090014457703,
603
+ "step": 330
604
+ },
605
+ {
606
+ "epoch": 1.1295681063122924,
607
+ "grad_norm": 1.78125,
608
+ "learning_rate": 2.711630722733202e-06,
609
+ "log_odds_chosen": 8.388921737670898,
610
+ "log_odds_ratio": -0.009843870997428894,
611
+ "logits/chosen": -1.7839330434799194,
612
+ "logits/rejected": -1.7847379446029663,
613
+ "logps/chosen": -0.0786074846982956,
614
+ "logps/rejected": -4.0679216384887695,
615
+ "loss": 0.0277,
616
+ "nll_loss": 0.022286545485258102,
617
+ "rewards/accuracies": 1.0,
618
+ "rewards/chosen": -0.003930374514311552,
619
+ "rewards/margins": 0.19946573674678802,
620
+ "rewards/rejected": -0.20339611172676086,
621
+ "step": 340
622
+ },
623
+ {
624
+ "epoch": 1.1627906976744187,
625
+ "grad_norm": 2.28125,
626
+ "learning_rate": 2.6726124191242444e-06,
627
+ "log_odds_chosen": 8.371394157409668,
628
+ "log_odds_ratio": -0.016807865351438522,
629
+ "logits/chosen": -1.829673171043396,
630
+ "logits/rejected": -1.8303911685943604,
631
+ "logps/chosen": -0.10171355307102203,
632
+ "logps/rejected": -4.102365016937256,
633
+ "loss": 0.0254,
634
+ "nll_loss": 0.019215276464819908,
635
+ "rewards/accuracies": 1.0,
636
+ "rewards/chosen": -0.005085677839815617,
637
+ "rewards/margins": 0.20003259181976318,
638
+ "rewards/rejected": -0.20511826872825623,
639
+ "step": 350
640
+ },
641
+ {
642
+ "epoch": 1.196013289036545,
643
+ "grad_norm": 3.484375,
644
+ "learning_rate": 2.6352313834736496e-06,
645
+ "log_odds_chosen": 8.600504875183105,
646
+ "log_odds_ratio": -0.01216288935393095,
647
+ "logits/chosen": -1.7985435724258423,
648
+ "logits/rejected": -1.8070056438446045,
649
+ "logps/chosen": -0.06962008774280548,
650
+ "logps/rejected": -4.320959568023682,
651
+ "loss": 0.0267,
652
+ "nll_loss": 0.029016951099038124,
653
+ "rewards/accuracies": 1.0,
654
+ "rewards/chosen": -0.0034810048528015614,
655
+ "rewards/margins": 0.21256697177886963,
656
+ "rewards/rejected": -0.2160479724407196,
657
+ "step": 360
658
+ },
659
+ {
660
+ "epoch": 1.2292358803986712,
661
+ "grad_norm": 3.1875,
662
+ "learning_rate": 2.599376224550182e-06,
663
+ "log_odds_chosen": 9.181499481201172,
664
+ "log_odds_ratio": -0.009369775652885437,
665
+ "logits/chosen": -1.7924983501434326,
666
+ "logits/rejected": -1.7953475713729858,
667
+ "logps/chosen": -0.11782409995794296,
668
+ "logps/rejected": -4.833601474761963,
669
+ "loss": 0.0282,
670
+ "nll_loss": 0.02526969090104103,
671
+ "rewards/accuracies": 1.0,
672
+ "rewards/chosen": -0.0058912052772939205,
673
+ "rewards/margins": 0.23578886687755585,
674
+ "rewards/rejected": -0.2416801005601883,
675
+ "step": 370
676
+ },
677
+ {
678
+ "epoch": 1.2624584717607974,
679
+ "grad_norm": 2.796875,
680
+ "learning_rate": 2.564945880212886e-06,
681
+ "log_odds_chosen": 9.619462966918945,
682
+ "log_odds_ratio": -0.039833612740039825,
683
+ "logits/chosen": -1.8319047689437866,
684
+ "logits/rejected": -1.8300600051879883,
685
+ "logps/chosen": -0.10069389641284943,
686
+ "logps/rejected": -5.014215469360352,
687
+ "loss": 0.0266,
688
+ "nll_loss": 0.028005924075841904,
689
+ "rewards/accuracies": 0.9624999761581421,
690
+ "rewards/chosen": -0.005034694913774729,
691
+ "rewards/margins": 0.24567607045173645,
692
+ "rewards/rejected": -0.25071078538894653,
693
+ "step": 380
694
+ },
695
+ {
696
+ "epoch": 1.2956810631229236,
697
+ "grad_norm": 2.59375,
698
+ "learning_rate": 2.5318484177091667e-06,
699
+ "log_odds_chosen": 8.534521102905273,
700
+ "log_odds_ratio": -0.01909024640917778,
701
+ "logits/chosen": -1.8544292449951172,
702
+ "logits/rejected": -1.8542238473892212,
703
+ "logps/chosen": -0.08696131408214569,
704
+ "logps/rejected": -4.430135250091553,
705
+ "loss": 0.0232,
706
+ "nll_loss": 0.02147216536104679,
707
+ "rewards/accuracies": 0.987500011920929,
708
+ "rewards/chosen": -0.004348065238445997,
709
+ "rewards/margins": 0.217158704996109,
710
+ "rewards/rejected": -0.2215067595243454,
711
+ "step": 390
712
+ },
713
+ {
714
+ "epoch": 1.3289036544850499,
715
+ "grad_norm": 1.640625,
716
+ "learning_rate": 2.5e-06,
717
+ "log_odds_chosen": 8.600648880004883,
718
+ "log_odds_ratio": -0.012287040241062641,
719
+ "logits/chosen": -1.8202216625213623,
720
+ "logits/rejected": -1.8218141794204712,
721
+ "logps/chosen": -0.09084287285804749,
722
+ "logps/rejected": -4.572846412658691,
723
+ "loss": 0.0247,
724
+ "nll_loss": 0.02059631608426571,
725
+ "rewards/accuracies": 1.0,
726
+ "rewards/chosen": -0.004542144015431404,
727
+ "rewards/margins": 0.22410018742084503,
728
+ "rewards/rejected": -0.22864234447479248,
729
+ "step": 400
730
+ },
731
+ {
732
+ "epoch": 1.3621262458471761,
733
+ "grad_norm": 3.640625,
734
+ "learning_rate": 2.4693239916239746e-06,
735
+ "log_odds_chosen": 9.651135444641113,
736
+ "log_odds_ratio": -0.00363975390791893,
737
+ "logits/chosen": -1.7791026830673218,
738
+ "logits/rejected": -1.7837848663330078,
739
+ "logps/chosen": -0.06081225723028183,
740
+ "logps/rejected": -4.943568706512451,
741
+ "loss": 0.024,
742
+ "nll_loss": 0.021141935139894485,
743
+ "rewards/accuracies": 1.0,
744
+ "rewards/chosen": -0.0030406129080802202,
745
+ "rewards/margins": 0.24413780868053436,
746
+ "rewards/rejected": -0.24717843532562256,
747
+ "step": 410
748
+ },
749
+ {
750
+ "epoch": 1.3953488372093024,
751
+ "grad_norm": 1.875,
752
+ "learning_rate": 2.4397501823713327e-06,
753
+ "log_odds_chosen": 8.751879692077637,
754
+ "log_odds_ratio": -0.010004991665482521,
755
+ "logits/chosen": -1.8006718158721924,
756
+ "logits/rejected": -1.804359793663025,
757
+ "logps/chosen": -0.0964532420039177,
758
+ "logps/rejected": -4.5417680740356445,
759
+ "loss": 0.0242,
760
+ "nll_loss": 0.025043126195669174,
761
+ "rewards/accuracies": 1.0,
762
+ "rewards/chosen": -0.004822662565857172,
763
+ "rewards/margins": 0.22226576507091522,
764
+ "rewards/rejected": -0.22708842158317566,
765
+ "step": 420
766
+ },
767
+ {
768
+ "epoch": 1.4285714285714286,
769
+ "grad_norm": 1.984375,
770
+ "learning_rate": 2.411214110852061e-06,
771
+ "log_odds_chosen": 9.289121627807617,
772
+ "log_odds_ratio": -0.015525879338383675,
773
+ "logits/chosen": -1.7658954858779907,
774
+ "logits/rejected": -1.7667725086212158,
775
+ "logps/chosen": -0.08038794249296188,
776
+ "logps/rejected": -4.9636030197143555,
777
+ "loss": 0.0201,
778
+ "nll_loss": 0.020869722589850426,
779
+ "rewards/accuracies": 1.0,
780
+ "rewards/chosen": -0.004019397310912609,
781
+ "rewards/margins": 0.24416080117225647,
782
+ "rewards/rejected": -0.24818019568920135,
783
+ "step": 430
784
+ },
785
+ {
786
+ "epoch": 1.4617940199335548,
787
+ "grad_norm": 2.09375,
788
+ "learning_rate": 2.3836564731139807e-06,
789
+ "log_odds_chosen": 8.703948974609375,
790
+ "log_odds_ratio": -0.03381601721048355,
791
+ "logits/chosen": -1.8417913913726807,
792
+ "logits/rejected": -1.845391869544983,
793
+ "logps/chosen": -0.09374421089887619,
794
+ "logps/rejected": -4.612320899963379,
795
+ "loss": 0.0229,
796
+ "nll_loss": 0.021974634379148483,
797
+ "rewards/accuracies": 0.9750000238418579,
798
+ "rewards/chosen": -0.004687210079282522,
799
+ "rewards/margins": 0.22592882812023163,
800
+ "rewards/rejected": -0.23061604797840118,
801
+ "step": 440
802
+ },
803
+ {
804
+ "epoch": 1.495016611295681,
805
+ "grad_norm": 1.890625,
806
+ "learning_rate": 2.357022603955159e-06,
807
+ "log_odds_chosen": 9.317599296569824,
808
+ "log_odds_ratio": -0.008442175574600697,
809
+ "logits/chosen": -1.9028446674346924,
810
+ "logits/rejected": -1.9078031778335571,
811
+ "logps/chosen": -0.0840989276766777,
812
+ "logps/rejected": -4.985965251922607,
813
+ "loss": 0.0277,
814
+ "nll_loss": 0.019282350316643715,
815
+ "rewards/accuracies": 1.0,
816
+ "rewards/chosen": -0.0042049465700984,
817
+ "rewards/margins": 0.24509334564208984,
818
+ "rewards/rejected": -0.24929828941822052,
819
+ "step": 450
820
+ },
821
+ {
822
+ "epoch": 1.5282392026578073,
823
+ "grad_norm": 1.109375,
824
+ "learning_rate": 2.3312620206007847e-06,
825
+ "log_odds_chosen": 8.135089874267578,
826
+ "log_odds_ratio": -0.014614465646445751,
827
+ "logits/chosen": -1.80266535282135,
828
+ "logits/rejected": -1.8127800226211548,
829
+ "logps/chosen": -0.08102138340473175,
830
+ "logps/rejected": -4.199796676635742,
831
+ "loss": 0.0208,
832
+ "nll_loss": 0.02294105850160122,
833
+ "rewards/accuracies": 1.0,
834
+ "rewards/chosen": -0.004051069263368845,
835
+ "rewards/margins": 0.20593877136707306,
836
+ "rewards/rejected": -0.20998983085155487,
837
+ "step": 460
838
+ },
839
+ {
840
+ "epoch": 1.5614617940199336,
841
+ "grad_norm": 2.40625,
842
+ "learning_rate": 2.3063280200722128e-06,
843
+ "log_odds_chosen": 9.386737823486328,
844
+ "log_odds_ratio": -0.008776369504630566,
845
+ "logits/chosen": -1.7513538599014282,
846
+ "logits/rejected": -1.7554657459259033,
847
+ "logps/chosen": -0.06230410188436508,
848
+ "logps/rejected": -4.882990837097168,
849
+ "loss": 0.0227,
850
+ "nll_loss": 0.01421122532337904,
851
+ "rewards/accuracies": 1.0,
852
+ "rewards/chosen": -0.003115205094218254,
853
+ "rewards/margins": 0.24103431403636932,
854
+ "rewards/rejected": -0.24414952099323273,
855
+ "step": 470
856
+ },
857
+ {
858
+ "epoch": 1.5946843853820598,
859
+ "grad_norm": 1.625,
860
+ "learning_rate": 2.2821773229381924e-06,
861
+ "log_odds_chosen": 8.983893394470215,
862
+ "log_odds_ratio": -0.024142052978277206,
863
+ "logits/chosen": -1.744749665260315,
864
+ "logits/rejected": -1.7481634616851807,
865
+ "logps/chosen": -0.06949851661920547,
866
+ "logps/rejected": -4.313258171081543,
867
+ "loss": 0.0222,
868
+ "nll_loss": 0.014674236066639423,
869
+ "rewards/accuracies": 0.987500011920929,
870
+ "rewards/chosen": -0.0034749258775264025,
871
+ "rewards/margins": 0.2121879756450653,
872
+ "rewards/rejected": -0.2156629115343094,
873
+ "step": 480
874
+ },
875
+ {
876
+ "epoch": 1.627906976744186,
877
+ "grad_norm": 2.0,
878
+ "learning_rate": 2.2587697572631284e-06,
879
+ "log_odds_chosen": 9.338783264160156,
880
+ "log_odds_ratio": -0.003996879793703556,
881
+ "logits/chosen": -1.791486382484436,
882
+ "logits/rejected": -1.795069694519043,
883
+ "logps/chosen": -0.06457408517599106,
884
+ "logps/rejected": -4.7583208084106445,
885
+ "loss": 0.0177,
886
+ "nll_loss": 0.015971561893820763,
887
+ "rewards/accuracies": 1.0,
888
+ "rewards/chosen": -0.0032287046778947115,
889
+ "rewards/margins": 0.23468737304210663,
890
+ "rewards/rejected": -0.23791606724262238,
891
+ "step": 490
892
+ },
893
+ {
894
+ "epoch": 1.6611295681063123,
895
+ "grad_norm": 1.6015625,
896
+ "learning_rate": 2.23606797749979e-06,
897
+ "log_odds_chosen": 9.166845321655273,
898
+ "log_odds_ratio": -0.019674357026815414,
899
+ "logits/chosen": -1.7548977136611938,
900
+ "logits/rejected": -1.7554121017456055,
901
+ "logps/chosen": -0.10164159536361694,
902
+ "logps/rejected": -4.969311714172363,
903
+ "loss": 0.0212,
904
+ "nll_loss": 0.01509636640548706,
905
+ "rewards/accuracies": 0.987500011920929,
906
+ "rewards/chosen": -0.00508207967504859,
907
+ "rewards/margins": 0.243383526802063,
908
+ "rewards/rejected": -0.24846558272838593,
909
+ "step": 500
910
+ },
911
+ {
912
+ "epoch": 1.6943521594684385,
913
+ "grad_norm": 1.28125,
914
+ "learning_rate": 2.2140372138502386e-06,
915
+ "log_odds_chosen": 8.379947662353516,
916
+ "log_odds_ratio": -0.03198238089680672,
917
+ "logits/chosen": -1.8583186864852905,
918
+ "logits/rejected": -1.8610032796859741,
919
+ "logps/chosen": -0.08590197563171387,
920
+ "logps/rejected": -4.655932426452637,
921
+ "loss": 0.0195,
922
+ "nll_loss": 0.017975686118006706,
923
+ "rewards/accuracies": 0.987500011920929,
924
+ "rewards/chosen": -0.0042950985953211784,
925
+ "rewards/margins": 0.22850151360034943,
926
+ "rewards/rejected": -0.23279662430286407,
927
+ "step": 510
928
+ },
929
+ {
930
+ "epoch": 1.7275747508305648,
931
+ "grad_norm": 3.125,
932
+ "learning_rate": 2.1926450482675734e-06,
933
+ "log_odds_chosen": 8.935117721557617,
934
+ "log_odds_ratio": -0.014949078671634197,
935
+ "logits/chosen": -1.7289230823516846,
936
+ "logits/rejected": -1.73250412940979,
937
+ "logps/chosen": -0.07164986431598663,
938
+ "logps/rejected": -4.507022857666016,
939
+ "loss": 0.0178,
940
+ "nll_loss": 0.014084184542298317,
941
+ "rewards/accuracies": 1.0,
942
+ "rewards/chosen": -0.003582493169233203,
943
+ "rewards/margins": 0.22176864743232727,
944
+ "rewards/rejected": -0.22535113990306854,
945
+ "step": 520
946
+ },
947
+ {
948
+ "epoch": 1.760797342192691,
949
+ "grad_norm": 1.71875,
950
+ "learning_rate": 2.1718612138153473e-06,
951
+ "log_odds_chosen": 10.034872055053711,
952
+ "log_odds_ratio": -0.006512313149869442,
953
+ "logits/chosen": -1.7052526473999023,
954
+ "logits/rejected": -1.7084852457046509,
955
+ "logps/chosen": -0.07591713964939117,
956
+ "logps/rejected": -5.4831976890563965,
957
+ "loss": 0.0223,
958
+ "nll_loss": 0.020342020317912102,
959
+ "rewards/accuracies": 1.0,
960
+ "rewards/chosen": -0.003795857075601816,
961
+ "rewards/margins": 0.27036404609680176,
962
+ "rewards/rejected": -0.27415987849235535,
963
+ "step": 530
964
+ },
965
+ {
966
+ "epoch": 1.7940199335548173,
967
+ "grad_norm": 4.46875,
968
+ "learning_rate": 2.151657414559676e-06,
969
+ "log_odds_chosen": 8.771195411682129,
970
+ "log_odds_ratio": -0.020360399037599564,
971
+ "logits/chosen": -1.7692371606826782,
972
+ "logits/rejected": -1.772956132888794,
973
+ "logps/chosen": -0.07401047646999359,
974
+ "logps/rejected": -4.339105129241943,
975
+ "loss": 0.0207,
976
+ "nll_loss": 0.02204059436917305,
977
+ "rewards/accuracies": 0.987500011920929,
978
+ "rewards/chosen": -0.0037005238700658083,
979
+ "rewards/margins": 0.21325473487377167,
980
+ "rewards/rejected": -0.2169552594423294,
981
+ "step": 540
982
+ },
983
+ {
984
+ "epoch": 1.8272425249169435,
985
+ "grad_norm": 2.4375,
986
+ "learning_rate": 2.132007163556104e-06,
987
+ "log_odds_chosen": 8.97265625,
988
+ "log_odds_ratio": -0.010379938408732414,
989
+ "logits/chosen": -1.761279821395874,
990
+ "logits/rejected": -1.7625354528427124,
991
+ "logps/chosen": -0.08797116577625275,
992
+ "logps/rejected": -4.506954669952393,
993
+ "loss": 0.0191,
994
+ "nll_loss": 0.015043037012219429,
995
+ "rewards/accuracies": 1.0,
996
+ "rewards/chosen": -0.004398558288812637,
997
+ "rewards/margins": 0.2209491729736328,
998
+ "rewards/rejected": -0.22534772753715515,
999
+ "step": 550
1000
+ },
1001
+ {
1002
+ "epoch": 1.8604651162790697,
1003
+ "grad_norm": 1.96875,
1004
+ "learning_rate": 2.1128856368212917e-06,
1005
+ "log_odds_chosen": 9.888033866882324,
1006
+ "log_odds_ratio": -0.00335489958524704,
1007
+ "logits/chosen": -1.7607628107070923,
1008
+ "logits/rejected": -1.7624956369400024,
1009
+ "logps/chosen": -0.06476293504238129,
1010
+ "logps/rejected": -5.111817359924316,
1011
+ "loss": 0.0194,
1012
+ "nll_loss": 0.013030583038926125,
1013
+ "rewards/accuracies": 1.0,
1014
+ "rewards/chosen": -0.003238147124648094,
1015
+ "rewards/margins": 0.2523527443408966,
1016
+ "rewards/rejected": -0.25559088587760925,
1017
+ "step": 560
1018
+ },
1019
+ {
1020
+ "epoch": 1.893687707641196,
1021
+ "grad_norm": 1.6328125,
1022
+ "learning_rate": 2.0942695414584777e-06,
1023
+ "log_odds_chosen": 8.265462875366211,
1024
+ "log_odds_ratio": -0.017327692359685898,
1025
+ "logits/chosen": -1.7851202487945557,
1026
+ "logits/rejected": -1.7864612340927124,
1027
+ "logps/chosen": -0.10817401111125946,
1028
+ "logps/rejected": -4.388330459594727,
1029
+ "loss": 0.0191,
1030
+ "nll_loss": 0.01878109760582447,
1031
+ "rewards/accuracies": 1.0,
1032
+ "rewards/chosen": -0.005408700555562973,
1033
+ "rewards/margins": 0.21400780975818634,
1034
+ "rewards/rejected": -0.2194165289402008,
1035
+ "step": 570
1036
+ },
1037
+ {
1038
+ "epoch": 1.9269102990033222,
1039
+ "grad_norm": 1.2265625,
1040
+ "learning_rate": 2.0761369963434992e-06,
1041
+ "log_odds_chosen": 8.885993003845215,
1042
+ "log_odds_ratio": -0.027743179351091385,
1043
+ "logits/chosen": -1.7333558797836304,
1044
+ "logits/rejected": -1.7339531183242798,
1045
+ "logps/chosen": -0.1336405724287033,
1046
+ "logps/rejected": -4.482719421386719,
1047
+ "loss": 0.0201,
1048
+ "nll_loss": 0.015746701508760452,
1049
+ "rewards/accuracies": 0.987500011920929,
1050
+ "rewards/chosen": -0.0066820294596254826,
1051
+ "rewards/margins": 0.21745392680168152,
1052
+ "rewards/rejected": -0.22413596510887146,
1053
+ "step": 580
1054
+ },
1055
+ {
1056
+ "epoch": 1.9601328903654485,
1057
+ "grad_norm": 2.171875,
1058
+ "learning_rate": 2.058467423981546e-06,
1059
+ "log_odds_chosen": 9.54238224029541,
1060
+ "log_odds_ratio": -0.018449265509843826,
1061
+ "logits/chosen": -1.7866191864013672,
1062
+ "logits/rejected": -1.7889735698699951,
1063
+ "logps/chosen": -0.10259035974740982,
1064
+ "logps/rejected": -4.987481117248535,
1065
+ "loss": 0.0187,
1066
+ "nll_loss": 0.0172494538128376,
1067
+ "rewards/accuracies": 1.0,
1068
+ "rewards/chosen": -0.005129518453031778,
1069
+ "rewards/margins": 0.2442445456981659,
1070
+ "rewards/rejected": -0.24937407672405243,
1071
+ "step": 590
1072
+ },
1073
+ {
1074
+ "epoch": 1.9933554817275747,
1075
+ "grad_norm": 0.890625,
1076
+ "learning_rate": 2.0412414523193154e-06,
1077
+ "log_odds_chosen": 9.205097198486328,
1078
+ "log_odds_ratio": -0.005630264058709145,
1079
+ "logits/chosen": -1.7373039722442627,
1080
+ "logits/rejected": -1.7389856576919556,
1081
+ "logps/chosen": -0.09646005928516388,
1082
+ "logps/rejected": -4.7054619789123535,
1083
+ "loss": 0.0182,
1084
+ "nll_loss": 0.02860497497022152,
1085
+ "rewards/accuracies": 1.0,
1086
+ "rewards/chosen": -0.004823002498596907,
1087
+ "rewards/margins": 0.2304501086473465,
1088
+ "rewards/rejected": -0.235273078083992,
1089
+ "step": 600
1090
+ },
1091
+ {
1092
+ "epoch": 2.026578073089701,
1093
+ "grad_norm": 1.4921875,
1094
+ "learning_rate": 2.0244408254472904e-06,
1095
+ "log_odds_chosen": 10.055734634399414,
1096
+ "log_odds_ratio": -0.011299138888716698,
1097
+ "logits/chosen": -1.7703691720962524,
1098
+ "logits/rejected": -1.771695852279663,
1099
+ "logps/chosen": -0.06641928851604462,
1100
+ "logps/rejected": -5.32825231552124,
1101
+ "loss": 0.017,
1102
+ "nll_loss": 0.013218941166996956,
1103
+ "rewards/accuracies": 1.0,
1104
+ "rewards/chosen": -0.0033209645189344883,
1105
+ "rewards/margins": 0.26309165358543396,
1106
+ "rewards/rejected": -0.2664126455783844,
1107
+ "step": 610
1108
+ },
1109
+ {
1110
+ "epoch": 2.0598006644518274,
1111
+ "grad_norm": 1.3671875,
1112
+ "learning_rate": 2.0080483222562476e-06,
1113
+ "log_odds_chosen": 10.432465553283691,
1114
+ "log_odds_ratio": -0.016602743417024612,
1115
+ "logits/chosen": -1.8167240619659424,
1116
+ "logits/rejected": -1.8178882598876953,
1117
+ "logps/chosen": -0.05541493743658066,
1118
+ "logps/rejected": -5.0302910804748535,
1119
+ "loss": 0.0145,
1120
+ "nll_loss": 0.015641603618860245,
1121
+ "rewards/accuracies": 0.987500011920929,
1122
+ "rewards/chosen": -0.0027707472909241915,
1123
+ "rewards/margins": 0.24874380230903625,
1124
+ "rewards/rejected": -0.25151461362838745,
1125
+ "step": 620
1126
+ },
1127
+ {
1128
+ "epoch": 2.0930232558139537,
1129
+ "grad_norm": 0.7421875,
1130
+ "learning_rate": 1.9920476822239895e-06,
1131
+ "log_odds_chosen": 10.32500171661377,
1132
+ "log_odds_ratio": -0.0108437929302454,
1133
+ "logits/chosen": -1.6814196109771729,
1134
+ "logits/rejected": -1.6834462881088257,
1135
+ "logps/chosen": -0.07114370167255402,
1136
+ "logps/rejected": -5.47824764251709,
1137
+ "loss": 0.0146,
1138
+ "nll_loss": 0.012577347457408905,
1139
+ "rewards/accuracies": 0.987500011920929,
1140
+ "rewards/chosen": -0.003557185409590602,
1141
+ "rewards/margins": 0.270355224609375,
1142
+ "rewards/rejected": -0.27391237020492554,
1143
+ "step": 630
1144
+ },
1145
+ {
1146
+ "epoch": 2.12624584717608,
1147
+ "grad_norm": 1.7734375,
1148
+ "learning_rate": 1.976423537605237e-06,
1149
+ "log_odds_chosen": 9.84516429901123,
1150
+ "log_odds_ratio": -0.007125245872884989,
1151
+ "logits/chosen": -1.823743224143982,
1152
+ "logits/rejected": -1.8290736675262451,
1153
+ "logps/chosen": -0.06525563448667526,
1154
+ "logps/rejected": -5.315881729125977,
1155
+ "loss": 0.0157,
1156
+ "nll_loss": 0.014731844887137413,
1157
+ "rewards/accuracies": 1.0,
1158
+ "rewards/chosen": -0.0032627820037305355,
1159
+ "rewards/margins": 0.2625313103199005,
1160
+ "rewards/rejected": -0.2657940983772278,
1161
+ "step": 640
1162
+ },
1163
+ {
1164
+ "epoch": 2.159468438538206,
1165
+ "grad_norm": 1.140625,
1166
+ "learning_rate": 1.961161351381841e-06,
1167
+ "log_odds_chosen": 11.63329029083252,
1168
+ "log_odds_ratio": -0.0004948956775479019,
1169
+ "logits/chosen": -1.805872917175293,
1170
+ "logits/rejected": -1.8104311227798462,
1171
+ "logps/chosen": -0.04712063446640968,
1172
+ "logps/rejected": -6.125610828399658,
1173
+ "loss": 0.0142,
1174
+ "nll_loss": 0.013872918672859669,
1175
+ "rewards/accuracies": 1.0,
1176
+ "rewards/chosen": -0.0023560314439237118,
1177
+ "rewards/margins": 0.3039245009422302,
1178
+ "rewards/rejected": -0.30628055334091187,
1179
+ "step": 650
1180
+ },
1181
+ {
1182
+ "epoch": 2.1926910299003324,
1183
+ "grad_norm": 1.640625,
1184
+ "learning_rate": 1.9462473604038077e-06,
1185
+ "log_odds_chosen": 10.331804275512695,
1186
+ "log_odds_ratio": -0.015263216570019722,
1187
+ "logits/chosen": -1.8137140274047852,
1188
+ "logits/rejected": -1.8163013458251953,
1189
+ "logps/chosen": -0.05831971764564514,
1190
+ "logps/rejected": -5.453424453735352,
1191
+ "loss": 0.0144,
1192
+ "nll_loss": 0.014606691896915436,
1193
+ "rewards/accuracies": 0.987500011920929,
1194
+ "rewards/chosen": -0.002915985882282257,
1195
+ "rewards/margins": 0.26975521445274353,
1196
+ "rewards/rejected": -0.2726712226867676,
1197
+ "step": 660
1198
+ },
1199
+ {
1200
+ "epoch": 2.2259136212624586,
1201
+ "grad_norm": 2.703125,
1202
+ "learning_rate": 1.9316685232156397e-06,
1203
+ "log_odds_chosen": 10.702049255371094,
1204
+ "log_odds_ratio": -0.018240805715322495,
1205
+ "logits/chosen": -1.913975715637207,
1206
+ "logits/rejected": -1.9187599420547485,
1207
+ "logps/chosen": -0.07275418192148209,
1208
+ "logps/rejected": -5.977799892425537,
1209
+ "loss": 0.0142,
1210
+ "nll_loss": 0.015033453702926636,
1211
+ "rewards/accuracies": 1.0,
1212
+ "rewards/chosen": -0.0036377091892063618,
1213
+ "rewards/margins": 0.2952522933483124,
1214
+ "rewards/rejected": -0.29889002442359924,
1215
+ "step": 670
1216
+ },
1217
+ {
1218
+ "epoch": 2.259136212624585,
1219
+ "grad_norm": 1.0546875,
1220
+ "learning_rate": 1.917412472118426e-06,
1221
+ "log_odds_chosen": 11.560079574584961,
1222
+ "log_odds_ratio": -0.0027265329845249653,
1223
+ "logits/chosen": -1.849691390991211,
1224
+ "logits/rejected": -1.8556410074234009,
1225
+ "logps/chosen": -0.05384901165962219,
1226
+ "logps/rejected": -6.353396415710449,
1227
+ "loss": 0.0171,
1228
+ "nll_loss": 0.02180541306734085,
1229
+ "rewards/accuracies": 1.0,
1230
+ "rewards/chosen": -0.0026924503035843372,
1231
+ "rewards/margins": 0.31497737765312195,
1232
+ "rewards/rejected": -0.3176698088645935,
1233
+ "step": 680
1234
+ },
1235
+ {
1236
+ "epoch": 2.292358803986711,
1237
+ "grad_norm": 1.1875,
1238
+ "learning_rate": 1.9034674690672024e-06,
1239
+ "log_odds_chosen": 11.148561477661133,
1240
+ "log_odds_ratio": -0.00223861588165164,
1241
+ "logits/chosen": -1.8589222431182861,
1242
+ "logits/rejected": -1.8631727695465088,
1243
+ "logps/chosen": -0.06614092737436295,
1244
+ "logps/rejected": -5.854241371154785,
1245
+ "loss": 0.0151,
1246
+ "nll_loss": 0.01388646848499775,
1247
+ "rewards/accuracies": 1.0,
1248
+ "rewards/chosen": -0.0033070463687181473,
1249
+ "rewards/margins": 0.28940504789352417,
1250
+ "rewards/rejected": -0.2927120625972748,
1251
+ "step": 690
1252
+ },
1253
+ {
1254
+ "epoch": 2.3255813953488373,
1255
+ "grad_norm": 0.72265625,
1256
+ "learning_rate": 1.8898223650461362e-06,
1257
+ "log_odds_chosen": 10.924173355102539,
1258
+ "log_odds_ratio": -0.006120534148067236,
1259
+ "logits/chosen": -1.833099365234375,
1260
+ "logits/rejected": -1.841202974319458,
1261
+ "logps/chosen": -0.045662157237529755,
1262
+ "logps/rejected": -5.663559913635254,
1263
+ "loss": 0.0141,
1264
+ "nll_loss": 0.014723509550094604,
1265
+ "rewards/accuracies": 1.0,
1266
+ "rewards/chosen": -0.0022831077221781015,
1267
+ "rewards/margins": 0.2808949053287506,
1268
+ "rewards/rejected": -0.2831780016422272,
1269
+ "step": 700
1270
+ },
1271
+ {
1272
+ "epoch": 2.3588039867109636,
1273
+ "grad_norm": 1.546875,
1274
+ "learning_rate": 1.876466562602004e-06,
1275
+ "log_odds_chosen": 11.659400939941406,
1276
+ "log_odds_ratio": -0.007115496788173914,
1277
+ "logits/chosen": -1.8497368097305298,
1278
+ "logits/rejected": -1.8600342273712158,
1279
+ "logps/chosen": -0.043242715299129486,
1280
+ "logps/rejected": -5.790225028991699,
1281
+ "loss": 0.014,
1282
+ "nll_loss": 0.014149373397231102,
1283
+ "rewards/accuracies": 1.0,
1284
+ "rewards/chosen": -0.002162135671824217,
1285
+ "rewards/margins": 0.28734907507896423,
1286
+ "rewards/rejected": -0.28951120376586914,
1287
+ "step": 710
1288
+ },
1289
+ {
1290
+ "epoch": 2.39202657807309,
1291
+ "grad_norm": 1.703125,
1292
+ "learning_rate": 1.863389981249825e-06,
1293
+ "log_odds_chosen": 11.449972152709961,
1294
+ "log_odds_ratio": -0.005292683839797974,
1295
+ "logits/chosen": -1.9128930568695068,
1296
+ "logits/rejected": -1.9192262887954712,
1297
+ "logps/chosen": -0.043063901364803314,
1298
+ "logps/rejected": -5.712512016296387,
1299
+ "loss": 0.0141,
1300
+ "nll_loss": 0.014921635389328003,
1301
+ "rewards/accuracies": 1.0,
1302
+ "rewards/chosen": -0.0021531949751079082,
1303
+ "rewards/margins": 0.2834724187850952,
1304
+ "rewards/rejected": -0.2856256365776062,
1305
+ "step": 720
1306
+ },
1307
+ {
1308
+ "epoch": 2.425249169435216,
1309
+ "grad_norm": 3.125,
1310
+ "learning_rate": 1.8505830254940132e-06,
1311
+ "log_odds_chosen": 10.498836517333984,
1312
+ "log_odds_ratio": -0.004357654135674238,
1313
+ "logits/chosen": -1.881967306137085,
1314
+ "logits/rejected": -1.8853543996810913,
1315
+ "logps/chosen": -0.03361859172582626,
1316
+ "logps/rejected": -5.354216575622559,
1317
+ "loss": 0.0141,
1318
+ "nll_loss": 0.011369029060006142,
1319
+ "rewards/accuracies": 1.0,
1320
+ "rewards/chosen": -0.0016809297958388925,
1321
+ "rewards/margins": 0.26602986454963684,
1322
+ "rewards/rejected": -0.2677108347415924,
1323
+ "step": 730
1324
+ },
1325
+ {
1326
+ "epoch": 2.4584717607973423,
1327
+ "grad_norm": 1.5625,
1328
+ "learning_rate": 1.8380365552345197e-06,
1329
+ "log_odds_chosen": 10.925847053527832,
1330
+ "log_odds_ratio": -0.003954787738621235,
1331
+ "logits/chosen": -1.8275858163833618,
1332
+ "logits/rejected": -1.8305232524871826,
1333
+ "logps/chosen": -0.06278284639120102,
1334
+ "logps/rejected": -5.769686698913574,
1335
+ "loss": 0.0141,
1336
+ "nll_loss": 0.013375637121498585,
1337
+ "rewards/accuracies": 1.0,
1338
+ "rewards/chosen": -0.003139142645522952,
1339
+ "rewards/margins": 0.285345196723938,
1340
+ "rewards/rejected": -0.2884843945503235,
1341
+ "step": 740
1342
+ },
1343
+ {
1344
+ "epoch": 2.4916943521594686,
1345
+ "grad_norm": 0.640625,
1346
+ "learning_rate": 1.8257418583505536e-06,
1347
+ "log_odds_chosen": 10.625173568725586,
1348
+ "log_odds_ratio": -0.0035576275549829006,
1349
+ "logits/chosen": -1.7775007486343384,
1350
+ "logits/rejected": -1.7832441329956055,
1351
+ "logps/chosen": -0.04924372583627701,
1352
+ "logps/rejected": -5.4554243087768555,
1353
+ "loss": 0.0151,
1354
+ "nll_loss": 0.015531172044575214,
1355
+ "rewards/accuracies": 1.0,
1356
+ "rewards/chosen": -0.002462186384946108,
1357
+ "rewards/margins": 0.2703090310096741,
1358
+ "rewards/rejected": -0.2727712094783783,
1359
+ "step": 750
1360
+ },
1361
+ {
1362
+ "epoch": 2.524916943521595,
1363
+ "grad_norm": 0.72265625,
1364
+ "learning_rate": 1.8136906252750293e-06,
1365
+ "log_odds_chosen": 11.038446426391602,
1366
+ "log_odds_ratio": -0.0013516563922166824,
1367
+ "logits/chosen": -1.81307053565979,
1368
+ "logits/rejected": -1.8172311782836914,
1369
+ "logps/chosen": -0.037691373378038406,
1370
+ "logps/rejected": -5.5027947425842285,
1371
+ "loss": 0.0155,
1372
+ "nll_loss": 0.011486930772662163,
1373
+ "rewards/accuracies": 1.0,
1374
+ "rewards/chosen": -0.0018845684826374054,
1375
+ "rewards/margins": 0.2732551693916321,
1376
+ "rewards/rejected": -0.275139719247818,
1377
+ "step": 760
1378
+ },
1379
+ {
1380
+ "epoch": 2.558139534883721,
1381
+ "grad_norm": 1.1015625,
1382
+ "learning_rate": 1.801874925391118e-06,
1383
+ "log_odds_chosen": 11.234697341918945,
1384
+ "log_odds_ratio": -0.005816595163196325,
1385
+ "logits/chosen": -1.8077905178070068,
1386
+ "logits/rejected": -1.8116910457611084,
1387
+ "logps/chosen": -0.06007402017712593,
1388
+ "logps/rejected": -5.977658748626709,
1389
+ "loss": 0.0135,
1390
+ "nll_loss": 0.013816078193485737,
1391
+ "rewards/accuracies": 1.0,
1392
+ "rewards/chosen": -0.0030037013348191977,
1393
+ "rewards/margins": 0.2958792448043823,
1394
+ "rewards/rejected": -0.29888293147087097,
1395
+ "step": 770
1396
+ },
1397
+ {
1398
+ "epoch": 2.5913621262458473,
1399
+ "grad_norm": 0.65234375,
1400
+ "learning_rate": 1.7902871850985824e-06,
1401
+ "log_odds_chosen": 11.535958290100098,
1402
+ "log_odds_ratio": -0.009717768058180809,
1403
+ "logits/chosen": -1.8791577816009521,
1404
+ "logits/rejected": -1.883548378944397,
1405
+ "logps/chosen": -0.051692645996809006,
1406
+ "logps/rejected": -5.989034652709961,
1407
+ "loss": 0.014,
1408
+ "nll_loss": 0.014420375227928162,
1409
+ "rewards/accuracies": 0.987500011920929,
1410
+ "rewards/chosen": -0.0025846322532743216,
1411
+ "rewards/margins": 0.29686713218688965,
1412
+ "rewards/rejected": -0.2994517385959625,
1413
+ "step": 780
1414
+ },
1415
+ {
1416
+ "epoch": 2.6245847176079735,
1417
+ "grad_norm": 0.76171875,
1418
+ "learning_rate": 1.7789201674120502e-06,
1419
+ "log_odds_chosen": 10.751108169555664,
1420
+ "log_odds_ratio": -0.01122227031737566,
1421
+ "logits/chosen": -1.8293044567108154,
1422
+ "logits/rejected": -1.8323638439178467,
1423
+ "logps/chosen": -0.05979070067405701,
1424
+ "logps/rejected": -5.497213363647461,
1425
+ "loss": 0.0157,
1426
+ "nll_loss": 0.014203609898686409,
1427
+ "rewards/accuracies": 0.987500011920929,
1428
+ "rewards/chosen": -0.0029895349871367216,
1429
+ "rewards/margins": 0.2718711495399475,
1430
+ "rewards/rejected": -0.274860680103302,
1431
+ "step": 790
1432
+ },
1433
+ {
1434
+ "epoch": 2.6578073089700998,
1435
+ "grad_norm": 0.5859375,
1436
+ "learning_rate": 1.7677669529663689e-06,
1437
+ "log_odds_chosen": 10.65892219543457,
1438
+ "log_odds_ratio": -0.006628723349422216,
1439
+ "logits/chosen": -1.8738857507705688,
1440
+ "logits/rejected": -1.877375602722168,
1441
+ "logps/chosen": -0.06150083988904953,
1442
+ "logps/rejected": -5.296011924743652,
1443
+ "loss": 0.0149,
1444
+ "nll_loss": 0.013364692218601704,
1445
+ "rewards/accuracies": 1.0,
1446
+ "rewards/chosen": -0.0030750418081879616,
1447
+ "rewards/margins": 0.2617255747318268,
1448
+ "rewards/rejected": -0.2648006081581116,
1449
+ "step": 800
1450
+ },
1451
+ {
1452
+ "epoch": 2.691029900332226,
1453
+ "grad_norm": 1.6640625,
1454
+ "learning_rate": 1.7568209223157664e-06,
1455
+ "log_odds_chosen": 11.236889839172363,
1456
+ "log_odds_ratio": -0.004805346950888634,
1457
+ "logits/chosen": -1.9045976400375366,
1458
+ "logits/rejected": -1.9087021350860596,
1459
+ "logps/chosen": -0.047368817031383514,
1460
+ "logps/rejected": -5.490727424621582,
1461
+ "loss": 0.0147,
1462
+ "nll_loss": 0.0136332456022501,
1463
+ "rewards/accuracies": 1.0,
1464
+ "rewards/chosen": -0.0023684408515691757,
1465
+ "rewards/margins": 0.2721679210662842,
1466
+ "rewards/rejected": -0.2745364010334015,
1467
+ "step": 810
1468
+ },
1469
+ {
1470
+ "epoch": 2.7242524916943522,
1471
+ "grad_norm": 0.76953125,
1472
+ "learning_rate": 1.7460757394239458e-06,
1473
+ "log_odds_chosen": 11.060879707336426,
1474
+ "log_odds_ratio": -0.0016804604092612863,
1475
+ "logits/chosen": -1.8651930093765259,
1476
+ "logits/rejected": -1.8689038753509521,
1477
+ "logps/chosen": -0.036452341824769974,
1478
+ "logps/rejected": -5.505632400512695,
1479
+ "loss": 0.013,
1480
+ "nll_loss": 0.011724123731255531,
1481
+ "rewards/accuracies": 1.0,
1482
+ "rewards/chosen": -0.0018226171378046274,
1483
+ "rewards/margins": 0.2734590172767639,
1484
+ "rewards/rejected": -0.2752816081047058,
1485
+ "step": 820
1486
+ },
1487
+ {
1488
+ "epoch": 2.7574750830564785,
1489
+ "grad_norm": 1.3828125,
1490
+ "learning_rate": 1.7355253362515584e-06,
1491
+ "log_odds_chosen": 12.019643783569336,
1492
+ "log_odds_ratio": -0.0025712151546031237,
1493
+ "logits/chosen": -1.9404821395874023,
1494
+ "logits/rejected": -1.9456230401992798,
1495
+ "logps/chosen": -0.04571037366986275,
1496
+ "logps/rejected": -6.3564581871032715,
1497
+ "loss": 0.0144,
1498
+ "nll_loss": 0.01494914572685957,
1499
+ "rewards/accuracies": 1.0,
1500
+ "rewards/chosen": -0.0022855184506624937,
1501
+ "rewards/margins": 0.31553739309310913,
1502
+ "rewards/rejected": -0.3178229033946991,
1503
+ "step": 830
1504
+ },
1505
+ {
1506
+ "epoch": 2.7906976744186047,
1507
+ "grad_norm": 1.1484375,
1508
+ "learning_rate": 1.7251638983558855e-06,
1509
+ "log_odds_chosen": 10.963711738586426,
1510
+ "log_odds_ratio": -0.004456724040210247,
1511
+ "logits/chosen": -1.8980438709259033,
1512
+ "logits/rejected": -1.9045253992080688,
1513
+ "logps/chosen": -0.04593021795153618,
1514
+ "logps/rejected": -5.473552703857422,
1515
+ "loss": 0.0145,
1516
+ "nll_loss": 0.017903735861182213,
1517
+ "rewards/accuracies": 1.0,
1518
+ "rewards/chosen": -0.002296511083841324,
1519
+ "rewards/margins": 0.27138110995292664,
1520
+ "rewards/rejected": -0.27367764711380005,
1521
+ "step": 840
1522
+ },
1523
+ {
1524
+ "epoch": 2.823920265780731,
1525
+ "grad_norm": 0.95703125,
1526
+ "learning_rate": 1.7149858514250883e-06,
1527
+ "log_odds_chosen": 10.605644226074219,
1528
+ "log_odds_ratio": -0.004892362747341394,
1529
+ "logits/chosen": -1.8568840026855469,
1530
+ "logits/rejected": -1.867110013961792,
1531
+ "logps/chosen": -0.0643405169248581,
1532
+ "logps/rejected": -5.792882919311523,
1533
+ "loss": 0.0137,
1534
+ "nll_loss": 0.01375966053456068,
1535
+ "rewards/accuracies": 1.0,
1536
+ "rewards/chosen": -0.0032170258928090334,
1537
+ "rewards/margins": 0.2864271104335785,
1538
+ "rewards/rejected": -0.28964415192604065,
1539
+ "step": 850
1540
+ },
1541
+ {
1542
+ "epoch": 2.857142857142857,
1543
+ "grad_norm": 0.578125,
1544
+ "learning_rate": 1.704985848676184e-06,
1545
+ "log_odds_chosen": 10.766576766967773,
1546
+ "log_odds_ratio": -0.01473341602832079,
1547
+ "logits/chosen": -1.8542922735214233,
1548
+ "logits/rejected": -1.8589084148406982,
1549
+ "logps/chosen": -0.07984187453985214,
1550
+ "logps/rejected": -5.777710914611816,
1551
+ "loss": 0.0152,
1552
+ "nll_loss": 0.020643722265958786,
1553
+ "rewards/accuracies": 0.987500011920929,
1554
+ "rewards/chosen": -0.00399209326133132,
1555
+ "rewards/margins": 0.2848934531211853,
1556
+ "rewards/rejected": -0.28888556361198425,
1557
+ "step": 860
1558
+ },
1559
+ {
1560
+ "epoch": 2.8903654485049834,
1561
+ "grad_norm": 0.4921875,
1562
+ "learning_rate": 1.6951587590520263e-06,
1563
+ "log_odds_chosen": 11.57789421081543,
1564
+ "log_odds_ratio": -0.0173480324447155,
1565
+ "logits/chosen": -1.7418874502182007,
1566
+ "logits/rejected": -1.7448689937591553,
1567
+ "logps/chosen": -0.054336708039045334,
1568
+ "logps/rejected": -5.907016754150391,
1569
+ "loss": 0.0129,
1570
+ "nll_loss": 0.010191375389695168,
1571
+ "rewards/accuracies": 0.987500011920929,
1572
+ "rewards/chosen": -0.002716835355386138,
1573
+ "rewards/margins": 0.2926340401172638,
1574
+ "rewards/rejected": -0.2953508496284485,
1575
+ "step": 870
1576
+ },
1577
+ {
1578
+ "epoch": 2.9235880398671097,
1579
+ "grad_norm": 0.6875,
1580
+ "learning_rate": 1.6854996561581053e-06,
1581
+ "log_odds_chosen": 11.569136619567871,
1582
+ "log_odds_ratio": -0.011355452239513397,
1583
+ "logits/chosen": -1.959159255027771,
1584
+ "logits/rejected": -1.9617881774902344,
1585
+ "logps/chosen": -0.06317956745624542,
1586
+ "logps/rejected": -6.309741020202637,
1587
+ "loss": 0.0135,
1588
+ "nll_loss": 0.014484817162156105,
1589
+ "rewards/accuracies": 0.987500011920929,
1590
+ "rewards/chosen": -0.0031589786522090435,
1591
+ "rewards/margins": 0.3123281002044678,
1592
+ "rewards/rejected": -0.3154870867729187,
1593
+ "step": 880
1594
+ },
1595
+ {
1596
+ "epoch": 2.956810631229236,
1597
+ "grad_norm": 0.5625,
1598
+ "learning_rate": 1.6760038078849776e-06,
1599
+ "log_odds_chosen": 11.66430377960205,
1600
+ "log_odds_ratio": -0.001645472482778132,
1601
+ "logits/chosen": -1.8734182119369507,
1602
+ "logits/rejected": -1.8795799016952515,
1603
+ "logps/chosen": -0.04230424761772156,
1604
+ "logps/rejected": -5.725351333618164,
1605
+ "loss": 0.0143,
1606
+ "nll_loss": 0.013855007477104664,
1607
+ "rewards/accuracies": 1.0,
1608
+ "rewards/chosen": -0.002115212380886078,
1609
+ "rewards/margins": 0.28415238857269287,
1610
+ "rewards/rejected": -0.28626757860183716,
1611
+ "step": 890
1612
+ },
1613
+ {
1614
+ "epoch": 2.990033222591362,
1615
+ "grad_norm": 0.9140625,
1616
+ "learning_rate": 1.6666666666666667e-06,
1617
+ "log_odds_chosen": 11.357660293579102,
1618
+ "log_odds_ratio": -0.0027510782238096,
1619
+ "logits/chosen": -1.856702446937561,
1620
+ "logits/rejected": -1.8652187585830688,
1621
+ "logps/chosen": -0.05658254772424698,
1622
+ "logps/rejected": -5.779760360717773,
1623
+ "loss": 0.0168,
1624
+ "nll_loss": 0.03194582462310791,
1625
+ "rewards/accuracies": 1.0,
1626
+ "rewards/chosen": -0.0028291274793446064,
1627
+ "rewards/margins": 0.2861589193344116,
1628
+ "rewards/rejected": -0.28898805379867554,
1629
+ "step": 900
1630
+ },
1631
+ {
1632
+ "epoch": 3.0,
1633
+ "step": 903,
1634
+ "total_flos": 0.0,
1635
+ "train_loss": 0.16268803322004982,
1636
+ "train_runtime": 7294.4356,
1637
+ "train_samples_per_second": 7.915,
1638
+ "train_steps_per_second": 0.124
1639
+ }
1640
+ ],
1641
+ "logging_steps": 10,
1642
+ "max_steps": 903,
1643
+ "num_input_tokens_seen": 0,
1644
+ "num_train_epochs": 3,
1645
+ "save_steps": 500,
1646
+ "stateful_callbacks": {
1647
+ "TrainerControl": {
1648
+ "args": {
1649
+ "should_epoch_stop": false,
1650
+ "should_evaluate": false,
1651
+ "should_log": false,
1652
+ "should_save": false,
1653
+ "should_training_stop": false
1654
+ },
1655
+ "attributes": {}
1656
+ }
1657
+ },
1658
+ "total_flos": 0.0,
1659
+ "train_batch_size": 8,
1660
+ "trial_name": null,
1661
+ "trial_params": null
1662
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:916af5787c083179b647536627641ffd6ab24535fc3a7ea8f8625f7ac46dc852
3
+ size 5496