just1nseo commited on
Commit
6d917ec
·
verified ·
1 Parent(s): 06a2b7d

Model save

Browse files
README.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: alignment-handbook/zephyr-7b-sft-full
9
+ model-index:
10
+ - name: zephyr-dpo-qlora-gpt4-5e-7-epoch3
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # zephyr-dpo-qlora-gpt4-5e-7-epoch3
18
+
19
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.6607
22
+ - Rewards/chosen: -0.3036
23
+ - Rewards/rejected: -0.4008
24
+ - Rewards/accuracies: 0.6032
25
+ - Rewards/margins: 0.0972
26
+ - Rewards/margins Max: 0.5193
27
+ - Rewards/margins Min: -0.3338
28
+ - Rewards/margins Std: 0.3807
29
+ - Logps/rejected: -299.2639
30
+ - Logps/chosen: -315.5808
31
+ - Logits/rejected: -2.6356
32
+ - Logits/chosen: -2.6727
33
+
34
+ ## Model description
35
+
36
+ More information needed
37
+
38
+ ## Intended uses & limitations
39
+
40
+ More information needed
41
+
42
+ ## Training and evaluation data
43
+
44
+ More information needed
45
+
46
+ ## Training procedure
47
+
48
+ ### Training hyperparameters
49
+
50
+ The following hyperparameters were used during training:
51
+ - learning_rate: 5e-07
52
+ - train_batch_size: 2
53
+ - eval_batch_size: 4
54
+ - seed: 42
55
+ - distributed_type: multi-GPU
56
+ - num_devices: 8
57
+ - total_train_batch_size: 16
58
+ - total_eval_batch_size: 32
59
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
60
+ - lr_scheduler_type: cosine
61
+ - lr_scheduler_warmup_ratio: 0.1
62
+ - num_epochs: 3
63
+
64
+ ### Training results
65
+
66
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Rewards/margins Max | Rewards/margins Min | Rewards/margins Std | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
67
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:-------------------:|:-------------------:|:-------------------:|:--------------:|:------------:|:---------------:|:-------------:|
68
+ | 0.6815 | 0.28 | 100 | 0.6918 | -0.0019 | -0.0055 | 0.5516 | 0.0037 | 0.0181 | -0.0087 | 0.0120 | -259.7351 | -285.4075 | -2.8079 | -2.8531 |
69
+ | 0.6235 | 0.56 | 200 | 0.6873 | -0.0383 | -0.0542 | 0.5873 | 0.0160 | 0.0859 | -0.0499 | 0.0601 | -264.6065 | -289.0478 | -2.7712 | -2.8159 |
70
+ | 0.5521 | 0.85 | 300 | 0.6808 | -0.1327 | -0.1683 | 0.5952 | 0.0356 | 0.1823 | -0.1064 | 0.1266 | -276.0095 | -298.4897 | -2.7261 | -2.7701 |
71
+ | 0.4853 | 1.13 | 400 | 0.6749 | -0.2053 | -0.2614 | 0.6032 | 0.0561 | 0.2952 | -0.1704 | 0.2056 | -285.3263 | -305.7520 | -2.6873 | -2.7295 |
72
+ | 0.4561 | 1.41 | 500 | 0.6651 | -0.1807 | -0.2628 | 0.5913 | 0.0821 | 0.4091 | -0.2388 | 0.2874 | -285.4612 | -303.2937 | -2.6622 | -2.7037 |
73
+ | 0.4337 | 1.69 | 600 | 0.6630 | -0.2648 | -0.3479 | 0.6111 | 0.0831 | 0.4556 | -0.2917 | 0.3299 | -293.9761 | -311.7008 | -2.6522 | -2.6912 |
74
+ | 0.4052 | 1.97 | 700 | 0.6606 | -0.2499 | -0.3494 | 0.6151 | 0.0995 | 0.5023 | -0.3041 | 0.3604 | -294.1273 | -310.2143 | -2.6437 | -2.6819 |
75
+ | 0.3797 | 2.25 | 800 | 0.6601 | -0.2711 | -0.3716 | 0.6151 | 0.1005 | 0.5194 | -0.3194 | 0.3750 | -296.3420 | -312.3301 | -2.6373 | -2.6750 |
76
+ | 0.3692 | 2.54 | 900 | 0.6601 | -0.2914 | -0.3911 | 0.6032 | 0.0997 | 0.5207 | -0.3303 | 0.3804 | -298.2907 | -314.3626 | -2.6357 | -2.6730 |
77
+ | 0.3953 | 2.82 | 1000 | 0.6607 | -0.3036 | -0.4008 | 0.6032 | 0.0972 | 0.5193 | -0.3338 | 0.3807 | -299.2639 | -315.5808 | -2.6356 | -2.6727 |
78
+
79
+
80
+ ### Framework versions
81
+
82
+ - PEFT 0.7.1
83
+ - Transformers 4.39.0.dev0
84
+ - Pytorch 2.1.2+cu121
85
+ - Datasets 2.14.6
86
+ - Tokenizers 0.15.2
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3fbf96d7d55be61b40cdd77310b48d43fc9eed3c8b8d3b061532900818cf3b31
3
  size 671150064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:672d819f59c2f9d7b0f029319e07dc5b663d8ed780ef17314147f1d95df192b6
3
  size 671150064
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.4860824931955114,
4
+ "train_runtime": 9341.4813,
5
+ "train_samples": 5678,
6
+ "train_samples_per_second": 1.823,
7
+ "train_steps_per_second": 0.114
8
+ }
runs/Jul29_12-01-01_node03/events.out.tfevents.1722222464.node03.4084291.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e56380fe6f1430afcbcccd359bc8fa12a579239c8b184e5644964c951a6628c0
3
- size 103293
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94d963811fc6e7fe602be10e3f00e5d2a0c577853a9c600576a7fd0b20eebbfe
3
+ size 108927
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.4860824931955114,
4
+ "train_runtime": 9341.4813,
5
+ "train_samples": 5678,
6
+ "train_samples_per_second": 1.823,
7
+ "train_steps_per_second": 0.114
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,2146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 100,
6
+ "global_step": 1065,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "grad_norm": 2.1187476009028985,
14
+ "learning_rate": 4.6728971962616815e-09,
15
+ "logits/chosen": -2.8477635383605957,
16
+ "logits/rejected": -2.8469698429107666,
17
+ "logps/chosen": -522.6112670898438,
18
+ "logps/rejected": -359.48583984375,
19
+ "loss": 0.6931,
20
+ "rewards/accuracies": 0.0,
21
+ "rewards/chosen": 0.0,
22
+ "rewards/margins": 0.0,
23
+ "rewards/margins_max": 0.0,
24
+ "rewards/margins_min": 0.0,
25
+ "rewards/margins_std": 0.0,
26
+ "rewards/rejected": 0.0,
27
+ "step": 1
28
+ },
29
+ {
30
+ "epoch": 0.03,
31
+ "grad_norm": 9.45301120153611,
32
+ "learning_rate": 4.672897196261682e-08,
33
+ "logits/chosen": -2.920534610748291,
34
+ "logits/rejected": -2.796057939529419,
35
+ "logps/chosen": -313.45556640625,
36
+ "logps/rejected": -170.39085388183594,
37
+ "loss": 0.6933,
38
+ "rewards/accuracies": 0.3888888955116272,
39
+ "rewards/chosen": 0.00014884205302223563,
40
+ "rewards/margins": 0.0004137727664783597,
41
+ "rewards/margins_max": 0.0010748545173555613,
42
+ "rewards/margins_min": -0.0002473089552950114,
43
+ "rewards/margins_std": 0.0009349107276648283,
44
+ "rewards/rejected": -0.0002649306843522936,
45
+ "step": 10
46
+ },
47
+ {
48
+ "epoch": 0.06,
49
+ "grad_norm": 2.057969985068329,
50
+ "learning_rate": 9.345794392523364e-08,
51
+ "logits/chosen": -2.7648427486419678,
52
+ "logits/rejected": -2.7124834060668945,
53
+ "logps/chosen": -381.097412109375,
54
+ "logps/rejected": -244.3661651611328,
55
+ "loss": 0.6931,
56
+ "rewards/accuracies": 0.6000000238418579,
57
+ "rewards/chosen": 0.0001247787440661341,
58
+ "rewards/margins": 0.00046473107067868114,
59
+ "rewards/margins_max": 0.002304071094840765,
60
+ "rewards/margins_min": -0.0013746089534834027,
61
+ "rewards/margins_std": 0.002601219806820154,
62
+ "rewards/rejected": -0.00033995244302786887,
63
+ "step": 20
64
+ },
65
+ {
66
+ "epoch": 0.08,
67
+ "grad_norm": 2.3298229196080453,
68
+ "learning_rate": 1.4018691588785045e-07,
69
+ "logits/chosen": -2.8766956329345703,
70
+ "logits/rejected": -2.8248698711395264,
71
+ "logps/chosen": -376.1761169433594,
72
+ "logps/rejected": -252.6289825439453,
73
+ "loss": 0.6928,
74
+ "rewards/accuracies": 0.5,
75
+ "rewards/chosen": -0.0007242198335006833,
76
+ "rewards/margins": -0.0010633254423737526,
77
+ "rewards/margins_max": 0.0014907626900821924,
78
+ "rewards/margins_min": -0.003617413341999054,
79
+ "rewards/margins_std": 0.003612025873735547,
80
+ "rewards/rejected": 0.0003391056088730693,
81
+ "step": 30
82
+ },
83
+ {
84
+ "epoch": 0.11,
85
+ "grad_norm": 1.9887297898182148,
86
+ "learning_rate": 1.8691588785046729e-07,
87
+ "logits/chosen": -2.7375640869140625,
88
+ "logits/rejected": -2.7709343433380127,
89
+ "logps/chosen": -306.8404541015625,
90
+ "logps/rejected": -317.5980529785156,
91
+ "loss": 0.6925,
92
+ "rewards/accuracies": 0.699999988079071,
93
+ "rewards/chosen": 0.0008523034630343318,
94
+ "rewards/margins": 0.0015208481345325708,
95
+ "rewards/margins_max": 0.003997668623924255,
96
+ "rewards/margins_min": -0.0009559727041050792,
97
+ "rewards/margins_std": 0.0035027533303946257,
98
+ "rewards/rejected": -0.0006685447879135609,
99
+ "step": 40
100
+ },
101
+ {
102
+ "epoch": 0.14,
103
+ "grad_norm": 2.366739060633413,
104
+ "learning_rate": 2.336448598130841e-07,
105
+ "logits/chosen": -2.7989935874938965,
106
+ "logits/rejected": -2.7091264724731445,
107
+ "logps/chosen": -244.9490966796875,
108
+ "logps/rejected": -174.29336547851562,
109
+ "loss": 0.6918,
110
+ "rewards/accuracies": 0.75,
111
+ "rewards/chosen": 0.0027394755743443966,
112
+ "rewards/margins": 0.003348238067701459,
113
+ "rewards/margins_max": 0.00612503569573164,
114
+ "rewards/margins_min": 0.000571439799387008,
115
+ "rewards/margins_std": 0.003926985897123814,
116
+ "rewards/rejected": -0.0006087622605264187,
117
+ "step": 50
118
+ },
119
+ {
120
+ "epoch": 0.17,
121
+ "grad_norm": 2.3558875576706133,
122
+ "learning_rate": 2.803738317757009e-07,
123
+ "logits/chosen": -2.7635130882263184,
124
+ "logits/rejected": -2.6960761547088623,
125
+ "logps/chosen": -267.0390625,
126
+ "logps/rejected": -235.6971893310547,
127
+ "loss": 0.6908,
128
+ "rewards/accuracies": 0.8999999761581421,
129
+ "rewards/chosen": 0.003963738679885864,
130
+ "rewards/margins": 0.005967114120721817,
131
+ "rewards/margins_max": 0.00903690978884697,
132
+ "rewards/margins_min": 0.0028973170556128025,
133
+ "rewards/margins_std": 0.00434134854003787,
134
+ "rewards/rejected": -0.0020033749751746655,
135
+ "step": 60
136
+ },
137
+ {
138
+ "epoch": 0.2,
139
+ "grad_norm": 1.8314812654145756,
140
+ "learning_rate": 3.271028037383177e-07,
141
+ "logits/chosen": -2.696708917617798,
142
+ "logits/rejected": -2.699219226837158,
143
+ "logps/chosen": -333.2772521972656,
144
+ "logps/rejected": -214.91384887695312,
145
+ "loss": 0.6894,
146
+ "rewards/accuracies": 0.8999999761581421,
147
+ "rewards/chosen": 0.006634596735239029,
148
+ "rewards/margins": 0.007753113750368357,
149
+ "rewards/margins_max": 0.01218687929213047,
150
+ "rewards/margins_min": 0.003319349605590105,
151
+ "rewards/margins_std": 0.006270290352404118,
152
+ "rewards/rejected": -0.001118516898714006,
153
+ "step": 70
154
+ },
155
+ {
156
+ "epoch": 0.23,
157
+ "grad_norm": 2.0518785429362425,
158
+ "learning_rate": 3.7383177570093457e-07,
159
+ "logits/chosen": -2.9442861080169678,
160
+ "logits/rejected": -2.8594822883605957,
161
+ "logps/chosen": -336.1695861816406,
162
+ "logps/rejected": -287.2829895019531,
163
+ "loss": 0.6877,
164
+ "rewards/accuracies": 0.949999988079071,
165
+ "rewards/chosen": 0.008798221126198769,
166
+ "rewards/margins": 0.010371741838753223,
167
+ "rewards/margins_max": 0.016412314027547836,
168
+ "rewards/margins_min": 0.004331169184297323,
169
+ "rewards/margins_std": 0.008542660623788834,
170
+ "rewards/rejected": -0.0015735204797238111,
171
+ "step": 80
172
+ },
173
+ {
174
+ "epoch": 0.25,
175
+ "grad_norm": 2.4705637775674445,
176
+ "learning_rate": 4.205607476635514e-07,
177
+ "logits/chosen": -2.7755093574523926,
178
+ "logits/rejected": -2.766479730606079,
179
+ "logps/chosen": -275.9761657714844,
180
+ "logps/rejected": -203.57989501953125,
181
+ "loss": 0.6849,
182
+ "rewards/accuracies": 0.8999999761581421,
183
+ "rewards/chosen": 0.009990612044930458,
184
+ "rewards/margins": 0.013643826358020306,
185
+ "rewards/margins_max": 0.023769836872816086,
186
+ "rewards/margins_min": 0.0035178144462406635,
187
+ "rewards/margins_std": 0.014320341870188713,
188
+ "rewards/rejected": -0.003653213381767273,
189
+ "step": 90
190
+ },
191
+ {
192
+ "epoch": 0.28,
193
+ "grad_norm": 3.423377277744714,
194
+ "learning_rate": 4.672897196261682e-07,
195
+ "logits/chosen": -2.963571548461914,
196
+ "logits/rejected": -2.890608787536621,
197
+ "logps/chosen": -456.03790283203125,
198
+ "logps/rejected": -342.5239562988281,
199
+ "loss": 0.6815,
200
+ "rewards/accuracies": 0.949999988079071,
201
+ "rewards/chosen": 0.022206444293260574,
202
+ "rewards/margins": 0.02596345543861389,
203
+ "rewards/margins_max": 0.03808971494436264,
204
+ "rewards/margins_min": 0.013837194070219994,
205
+ "rewards/margins_std": 0.017149122431874275,
206
+ "rewards/rejected": -0.003757009282708168,
207
+ "step": 100
208
+ },
209
+ {
210
+ "epoch": 0.28,
211
+ "eval_logits/chosen": -2.8531277179718018,
212
+ "eval_logits/rejected": -2.8079347610473633,
213
+ "eval_logps/chosen": -285.407470703125,
214
+ "eval_logps/rejected": -259.7350769042969,
215
+ "eval_loss": 0.691760778427124,
216
+ "eval_rewards/accuracies": 0.5515872836112976,
217
+ "eval_rewards/chosen": -0.001862519420683384,
218
+ "eval_rewards/margins": 0.0036616926081478596,
219
+ "eval_rewards/margins_max": 0.018075836822390556,
220
+ "eval_rewards/margins_min": -0.008730978704988956,
221
+ "eval_rewards/margins_std": 0.011965448036789894,
222
+ "eval_rewards/rejected": -0.005524212494492531,
223
+ "eval_runtime": 284.5486,
224
+ "eval_samples_per_second": 7.029,
225
+ "eval_steps_per_second": 0.221,
226
+ "step": 100
227
+ },
228
+ {
229
+ "epoch": 0.31,
230
+ "grad_norm": 2.5097052065928027,
231
+ "learning_rate": 4.999879018839287e-07,
232
+ "logits/chosen": -2.851182460784912,
233
+ "logits/rejected": -2.726973056793213,
234
+ "logps/chosen": -325.54608154296875,
235
+ "logps/rejected": -247.0763702392578,
236
+ "loss": 0.6763,
237
+ "rewards/accuracies": 0.8999999761581421,
238
+ "rewards/chosen": 0.02153041586279869,
239
+ "rewards/margins": 0.028114447370171547,
240
+ "rewards/margins_max": 0.047935038805007935,
241
+ "rewards/margins_min": 0.008293859660625458,
242
+ "rewards/margins_std": 0.02803054451942444,
243
+ "rewards/rejected": -0.006584033370018005,
244
+ "step": 110
245
+ },
246
+ {
247
+ "epoch": 0.34,
248
+ "grad_norm": 2.0812989283586854,
249
+ "learning_rate": 4.997728568369408e-07,
250
+ "logits/chosen": -2.974186897277832,
251
+ "logits/rejected": -2.940366506576538,
252
+ "logps/chosen": -368.3757629394531,
253
+ "logps/rejected": -332.0965576171875,
254
+ "loss": 0.6727,
255
+ "rewards/accuracies": 0.8999999761581421,
256
+ "rewards/chosen": 0.03565026447176933,
257
+ "rewards/margins": 0.040509190410375595,
258
+ "rewards/margins_max": 0.06030985713005066,
259
+ "rewards/margins_min": 0.02070852555334568,
260
+ "rewards/margins_std": 0.02800237201154232,
261
+ "rewards/rejected": -0.004858926869928837,
262
+ "step": 120
263
+ },
264
+ {
265
+ "epoch": 0.37,
266
+ "grad_norm": 2.0440187983236333,
267
+ "learning_rate": 4.992892309373227e-07,
268
+ "logits/chosen": -2.8396294116973877,
269
+ "logits/rejected": -2.754568338394165,
270
+ "logps/chosen": -385.7127380371094,
271
+ "logps/rejected": -260.06982421875,
272
+ "loss": 0.6628,
273
+ "rewards/accuracies": 1.0,
274
+ "rewards/chosen": 0.05259498208761215,
275
+ "rewards/margins": 0.07006160914897919,
276
+ "rewards/margins_max": 0.09621742367744446,
277
+ "rewards/margins_min": 0.04390579089522362,
278
+ "rewards/margins_std": 0.036989908665418625,
279
+ "rewards/rejected": -0.017466628924012184,
280
+ "step": 130
281
+ },
282
+ {
283
+ "epoch": 0.39,
284
+ "grad_norm": 1.7570365040633982,
285
+ "learning_rate": 4.985375442281968e-07,
286
+ "logits/chosen": -2.810422658920288,
287
+ "logits/rejected": -2.7825303077697754,
288
+ "logps/chosen": -346.6025085449219,
289
+ "logps/rejected": -234.14028930664062,
290
+ "loss": 0.6609,
291
+ "rewards/accuracies": 0.949999988079071,
292
+ "rewards/chosen": 0.046087414026260376,
293
+ "rewards/margins": 0.07035960257053375,
294
+ "rewards/margins_max": 0.1019645556807518,
295
+ "rewards/margins_min": 0.03875464200973511,
296
+ "rewards/margins_std": 0.04469615966081619,
297
+ "rewards/rejected": -0.02427218295633793,
298
+ "step": 140
299
+ },
300
+ {
301
+ "epoch": 0.42,
302
+ "grad_norm": 2.1174766652736734,
303
+ "learning_rate": 4.975186049985817e-07,
304
+ "logits/chosen": -2.8763267993927,
305
+ "logits/rejected": -2.8073649406433105,
306
+ "logps/chosen": -292.82110595703125,
307
+ "logps/rejected": -249.9117889404297,
308
+ "loss": 0.656,
309
+ "rewards/accuracies": 1.0,
310
+ "rewards/chosen": 0.040011487901210785,
311
+ "rewards/margins": 0.06143321841955185,
312
+ "rewards/margins_max": 0.09422849118709564,
313
+ "rewards/margins_min": 0.028637951239943504,
314
+ "rewards/margins_std": 0.046379510313272476,
315
+ "rewards/rejected": -0.021421726793050766,
316
+ "step": 150
317
+ },
318
+ {
319
+ "epoch": 0.45,
320
+ "grad_norm": 1.8832476734272086,
321
+ "learning_rate": 4.962335089142375e-07,
322
+ "logits/chosen": -2.885507822036743,
323
+ "logits/rejected": -2.7683348655700684,
324
+ "logps/chosen": -324.96630859375,
325
+ "logps/rejected": -245.7427520751953,
326
+ "loss": 0.6451,
327
+ "rewards/accuracies": 0.949999988079071,
328
+ "rewards/chosen": 0.0675334483385086,
329
+ "rewards/margins": 0.09363511204719543,
330
+ "rewards/margins_max": 0.16065378487110138,
331
+ "rewards/margins_min": 0.026616457849740982,
332
+ "rewards/margins_std": 0.09477868676185608,
333
+ "rewards/rejected": -0.026101673021912575,
334
+ "step": 160
335
+ },
336
+ {
337
+ "epoch": 0.48,
338
+ "grad_norm": 1.8398878088209214,
339
+ "learning_rate": 4.946836378394966e-07,
340
+ "logits/chosen": -2.923537254333496,
341
+ "logits/rejected": -2.767728328704834,
342
+ "logps/chosen": -389.24859619140625,
343
+ "logps/rejected": -236.61965942382812,
344
+ "loss": 0.6424,
345
+ "rewards/accuracies": 1.0,
346
+ "rewards/chosen": 0.11232654005289078,
347
+ "rewards/margins": 0.14306743443012238,
348
+ "rewards/margins_max": 0.20021724700927734,
349
+ "rewards/margins_min": 0.08591761440038681,
350
+ "rewards/margins_std": 0.08082203567028046,
351
+ "rewards/rejected": -0.0307408906519413,
352
+ "step": 170
353
+ },
354
+ {
355
+ "epoch": 0.51,
356
+ "grad_norm": 2.1174596632681406,
357
+ "learning_rate": 4.92870658351344e-07,
358
+ "logits/chosen": -2.8234031200408936,
359
+ "logits/rejected": -2.7299137115478516,
360
+ "logps/chosen": -316.28955078125,
361
+ "logps/rejected": -382.33795166015625,
362
+ "loss": 0.6352,
363
+ "rewards/accuracies": 0.8999999761581421,
364
+ "rewards/chosen": 0.07846479117870331,
365
+ "rewards/margins": 0.10477204620838165,
366
+ "rewards/margins_max": 0.1448402851819992,
367
+ "rewards/margins_min": 0.0647037997841835,
368
+ "rewards/margins_std": 0.05666505545377731,
369
+ "rewards/rejected": -0.026307255029678345,
370
+ "step": 180
371
+ },
372
+ {
373
+ "epoch": 0.54,
374
+ "grad_norm": 2.6042942677071843,
375
+ "learning_rate": 4.90796519947347e-07,
376
+ "logits/chosen": -2.763111114501953,
377
+ "logits/rejected": -2.5937225818634033,
378
+ "logps/chosen": -443.094970703125,
379
+ "logps/rejected": -226.3105010986328,
380
+ "loss": 0.6271,
381
+ "rewards/accuracies": 0.8999999761581421,
382
+ "rewards/chosen": 0.1301725208759308,
383
+ "rewards/margins": 0.16800908744335175,
384
+ "rewards/margins_max": 0.2520313858985901,
385
+ "rewards/margins_min": 0.08398674428462982,
386
+ "rewards/margins_std": 0.1188255324959755,
387
+ "rewards/rejected": -0.03783654794096947,
388
+ "step": 190
389
+ },
390
+ {
391
+ "epoch": 0.56,
392
+ "grad_norm": 2.0876444144177917,
393
+ "learning_rate": 4.88463452949359e-07,
394
+ "logits/chosen": -2.899054765701294,
395
+ "logits/rejected": -2.7967333793640137,
396
+ "logps/chosen": -320.50201416015625,
397
+ "logps/rejected": -210.9119873046875,
398
+ "loss": 0.6235,
399
+ "rewards/accuracies": 0.949999988079071,
400
+ "rewards/chosen": 0.10293698310852051,
401
+ "rewards/margins": 0.16662296652793884,
402
+ "rewards/margins_max": 0.2462502270936966,
403
+ "rewards/margins_min": 0.08699572086334229,
404
+ "rewards/margins_std": 0.11260994523763657,
405
+ "rewards/rejected": -0.06368599832057953,
406
+ "step": 200
407
+ },
408
+ {
409
+ "epoch": 0.56,
410
+ "eval_logits/chosen": -2.8159401416778564,
411
+ "eval_logits/rejected": -2.7712416648864746,
412
+ "eval_logps/chosen": -289.04779052734375,
413
+ "eval_logps/rejected": -264.60650634765625,
414
+ "eval_loss": 0.6873364448547363,
415
+ "eval_rewards/accuracies": 0.5873016119003296,
416
+ "eval_rewards/chosen": -0.03826585412025452,
417
+ "eval_rewards/margins": 0.015972578898072243,
418
+ "eval_rewards/margins_max": 0.08591616153717041,
419
+ "eval_rewards/margins_min": -0.049886591732501984,
420
+ "eval_rewards/margins_std": 0.060073789209127426,
421
+ "eval_rewards/rejected": -0.05423842743039131,
422
+ "eval_runtime": 282.4152,
423
+ "eval_samples_per_second": 7.082,
424
+ "eval_steps_per_second": 0.223,
425
+ "step": 200
426
+ },
427
+ {
428
+ "epoch": 0.59,
429
+ "grad_norm": 1.9895443604598388,
430
+ "learning_rate": 4.858739661052539e-07,
431
+ "logits/chosen": -2.6872963905334473,
432
+ "logits/rejected": -2.5995287895202637,
433
+ "logps/chosen": -346.6341247558594,
434
+ "logps/rejected": -273.494873046875,
435
+ "loss": 0.6138,
436
+ "rewards/accuracies": 0.949999988079071,
437
+ "rewards/chosen": 0.10202369838953018,
438
+ "rewards/margins": 0.1488794982433319,
439
+ "rewards/margins_max": 0.25132912397384644,
440
+ "rewards/margins_min": 0.046429865062236786,
441
+ "rewards/margins_std": 0.14488564431667328,
442
+ "rewards/rejected": -0.04685578867793083,
443
+ "step": 210
444
+ },
445
+ {
446
+ "epoch": 0.62,
447
+ "grad_norm": 2.038515214808528,
448
+ "learning_rate": 4.830308438912687e-07,
449
+ "logits/chosen": -2.9196786880493164,
450
+ "logits/rejected": -2.7616395950317383,
451
+ "logps/chosen": -391.030029296875,
452
+ "logps/rejected": -285.9990234375,
453
+ "loss": 0.6046,
454
+ "rewards/accuracies": 1.0,
455
+ "rewards/chosen": 0.18239402770996094,
456
+ "rewards/margins": 0.25943291187286377,
457
+ "rewards/margins_max": 0.3546218276023865,
458
+ "rewards/margins_min": 0.16424404084682465,
459
+ "rewards/margins_std": 0.134617418050766,
460
+ "rewards/rejected": -0.07703889906406403,
461
+ "step": 220
462
+ },
463
+ {
464
+ "epoch": 0.65,
465
+ "grad_norm": 2.223677630134096,
466
+ "learning_rate": 4.799371435178545e-07,
467
+ "logits/chosen": -2.8694205284118652,
468
+ "logits/rejected": -2.7869362831115723,
469
+ "logps/chosen": -371.1324157714844,
470
+ "logps/rejected": -342.9097900390625,
471
+ "loss": 0.5936,
472
+ "rewards/accuracies": 0.8500000238418579,
473
+ "rewards/chosen": 0.09625955671072006,
474
+ "rewards/margins": 0.1594955176115036,
475
+ "rewards/margins_max": 0.2610929310321808,
476
+ "rewards/margins_min": 0.057898085564374924,
477
+ "rewards/margins_std": 0.14368046820163727,
478
+ "rewards/rejected": -0.06323595345020294,
479
+ "step": 230
480
+ },
481
+ {
482
+ "epoch": 0.68,
483
+ "grad_norm": 1.8530691591955215,
484
+ "learning_rate": 4.765961916422574e-07,
485
+ "logits/chosen": -2.8748717308044434,
486
+ "logits/rejected": -2.7152183055877686,
487
+ "logps/chosen": -333.4825134277344,
488
+ "logps/rejected": -300.70013427734375,
489
+ "loss": 0.6075,
490
+ "rewards/accuracies": 0.8999999761581421,
491
+ "rewards/chosen": 0.07665253430604935,
492
+ "rewards/margins": 0.14553789794445038,
493
+ "rewards/margins_max": 0.21554379165172577,
494
+ "rewards/margins_min": 0.07553200423717499,
495
+ "rewards/margins_std": 0.09900327771902084,
496
+ "rewards/rejected": -0.06888536363840103,
497
+ "step": 240
498
+ },
499
+ {
500
+ "epoch": 0.7,
501
+ "grad_norm": 2.0510009647926952,
502
+ "learning_rate": 4.730115807913626e-07,
503
+ "logits/chosen": -2.8893144130706787,
504
+ "logits/rejected": -2.7036194801330566,
505
+ "logps/chosen": -399.37310791015625,
506
+ "logps/rejected": -265.6583251953125,
507
+ "loss": 0.5851,
508
+ "rewards/accuracies": 0.949999988079071,
509
+ "rewards/chosen": 0.1585393249988556,
510
+ "rewards/margins": 0.2598261535167694,
511
+ "rewards/margins_max": 0.33601224422454834,
512
+ "rewards/margins_min": 0.18364010751247406,
513
+ "rewards/margins_std": 0.10774336010217667,
514
+ "rewards/rejected": -0.10128685086965561,
515
+ "step": 250
516
+ },
517
+ {
518
+ "epoch": 0.73,
519
+ "grad_norm": 1.7857120908996174,
520
+ "learning_rate": 4.691871654986485e-07,
521
+ "logits/chosen": -2.8211376667022705,
522
+ "logits/rejected": -2.7870113849639893,
523
+ "logps/chosen": -309.5736389160156,
524
+ "logps/rejected": -234.69400024414062,
525
+ "loss": 0.5798,
526
+ "rewards/accuracies": 0.8999999761581421,
527
+ "rewards/chosen": 0.12248677015304565,
528
+ "rewards/margins": 0.22701454162597656,
529
+ "rewards/margins_max": 0.3895031213760376,
530
+ "rewards/margins_min": 0.06452600657939911,
531
+ "rewards/margins_std": 0.2297935038805008,
532
+ "rewards/rejected": -0.1045277863740921,
533
+ "step": 260
534
+ },
535
+ {
536
+ "epoch": 0.76,
537
+ "grad_norm": 2.1087812930387346,
538
+ "learning_rate": 4.6512705815940536e-07,
539
+ "logits/chosen": -2.831230640411377,
540
+ "logits/rejected": -2.6917014122009277,
541
+ "logps/chosen": -413.9813537597656,
542
+ "logps/rejected": -245.874267578125,
543
+ "loss": 0.5779,
544
+ "rewards/accuracies": 1.0,
545
+ "rewards/chosen": 0.15015338361263275,
546
+ "rewards/margins": 0.28351056575775146,
547
+ "rewards/margins_max": 0.36979129910469055,
548
+ "rewards/margins_min": 0.19722983241081238,
549
+ "rewards/margins_std": 0.12201935052871704,
550
+ "rewards/rejected": -0.13335716724395752,
551
+ "step": 270
552
+ },
553
+ {
554
+ "epoch": 0.79,
555
+ "grad_norm": 2.022102416277495,
556
+ "learning_rate": 4.6083562460867544e-07,
557
+ "logits/chosen": -2.694611072540283,
558
+ "logits/rejected": -2.6558010578155518,
559
+ "logps/chosen": -322.9226989746094,
560
+ "logps/rejected": -277.32080078125,
561
+ "loss": 0.5765,
562
+ "rewards/accuracies": 0.8999999761581421,
563
+ "rewards/chosen": 0.061540234833955765,
564
+ "rewards/margins": 0.1778351366519928,
565
+ "rewards/margins_max": 0.27743834257125854,
566
+ "rewards/margins_min": 0.07823189347982407,
567
+ "rewards/margins_std": 0.14086022973060608,
568
+ "rewards/rejected": -0.11629488319158554,
569
+ "step": 280
570
+ },
571
+ {
572
+ "epoch": 0.82,
573
+ "grad_norm": 2.3105731875548994,
574
+ "learning_rate": 4.563174794266683e-07,
575
+ "logits/chosen": -2.8924269676208496,
576
+ "logits/rejected": -2.731158494949341,
577
+ "logps/chosen": -346.88446044921875,
578
+ "logps/rejected": -285.9337463378906,
579
+ "loss": 0.5777,
580
+ "rewards/accuracies": 0.949999988079071,
581
+ "rewards/chosen": 0.1496116816997528,
582
+ "rewards/margins": 0.2949182093143463,
583
+ "rewards/margins_max": 0.4682803750038147,
584
+ "rewards/margins_min": 0.12155600637197495,
585
+ "rewards/margins_std": 0.24517109990119934,
586
+ "rewards/rejected": -0.14530649781227112,
587
+ "step": 290
588
+ },
589
+ {
590
+ "epoch": 0.85,
591
+ "grad_norm": 2.2339977666217017,
592
+ "learning_rate": 4.515774809767012e-07,
593
+ "logits/chosen": -2.811025381088257,
594
+ "logits/rejected": -2.7584595680236816,
595
+ "logps/chosen": -323.22955322265625,
596
+ "logps/rejected": -328.38958740234375,
597
+ "loss": 0.5521,
598
+ "rewards/accuracies": 0.949999988079071,
599
+ "rewards/chosen": 0.1451384723186493,
600
+ "rewards/margins": 0.3018813729286194,
601
+ "rewards/margins_max": 0.4272955060005188,
602
+ "rewards/margins_min": 0.17646725475788116,
603
+ "rewards/margins_std": 0.1773623526096344,
604
+ "rewards/rejected": -0.1567429006099701,
605
+ "step": 300
606
+ },
607
+ {
608
+ "epoch": 0.85,
609
+ "eval_logits/chosen": -2.7701103687286377,
610
+ "eval_logits/rejected": -2.726138114929199,
611
+ "eval_logps/chosen": -298.4896545410156,
612
+ "eval_logps/rejected": -276.009521484375,
613
+ "eval_loss": 0.6808469295501709,
614
+ "eval_rewards/accuracies": 0.5952380895614624,
615
+ "eval_rewards/chosen": -0.13268397748470306,
616
+ "eval_rewards/margins": 0.035584457218647,
617
+ "eval_rewards/margins_max": 0.18226537108421326,
618
+ "eval_rewards/margins_min": -0.1063792034983635,
619
+ "eval_rewards/margins_std": 0.1266382783651352,
620
+ "eval_rewards/rejected": -0.16826842725276947,
621
+ "eval_runtime": 283.0232,
622
+ "eval_samples_per_second": 7.067,
623
+ "eval_steps_per_second": 0.223,
624
+ "step": 300
625
+ },
626
+ {
627
+ "epoch": 0.87,
628
+ "grad_norm": 1.9067578499115883,
629
+ "learning_rate": 4.4662072618099887e-07,
630
+ "logits/chosen": -2.9651057720184326,
631
+ "logits/rejected": -2.7346513271331787,
632
+ "logps/chosen": -406.3728942871094,
633
+ "logps/rejected": -280.020263671875,
634
+ "loss": 0.5581,
635
+ "rewards/accuracies": 0.949999988079071,
636
+ "rewards/chosen": 0.17874427139759064,
637
+ "rewards/margins": 0.3691082000732422,
638
+ "rewards/margins_max": 0.5541077852249146,
639
+ "rewards/margins_min": 0.1841086447238922,
640
+ "rewards/margins_std": 0.2616288661956787,
641
+ "rewards/rejected": -0.19036395847797394,
642
+ "step": 310
643
+ },
644
+ {
645
+ "epoch": 0.9,
646
+ "grad_norm": 1.9111722338805972,
647
+ "learning_rate": 4.414525450399712e-07,
648
+ "logits/chosen": -2.8192591667175293,
649
+ "logits/rejected": -2.7278454303741455,
650
+ "logps/chosen": -334.79351806640625,
651
+ "logps/rejected": -257.9205322265625,
652
+ "loss": 0.5423,
653
+ "rewards/accuracies": 0.8999999761581421,
654
+ "rewards/chosen": 0.11459338665008545,
655
+ "rewards/margins": 0.31284791231155396,
656
+ "rewards/margins_max": 0.4758702218532562,
657
+ "rewards/margins_min": 0.14982566237449646,
658
+ "rewards/margins_std": 0.23054833710193634,
659
+ "rewards/rejected": -0.1982545554637909,
660
+ "step": 320
661
+ },
662
+ {
663
+ "epoch": 0.93,
664
+ "grad_norm": 2.315451928799758,
665
+ "learning_rate": 4.360784949008615e-07,
666
+ "logits/chosen": -2.9191551208496094,
667
+ "logits/rejected": -2.7938694953918457,
668
+ "logps/chosen": -364.45758056640625,
669
+ "logps/rejected": -270.68011474609375,
670
+ "loss": 0.5471,
671
+ "rewards/accuracies": 0.8999999761581421,
672
+ "rewards/chosen": 0.15229038894176483,
673
+ "rewards/margins": 0.3847652077674866,
674
+ "rewards/margins_max": 0.5681120157241821,
675
+ "rewards/margins_min": 0.2014184296131134,
676
+ "rewards/margins_std": 0.25929152965545654,
677
+ "rewards/rejected": -0.23247484862804413,
678
+ "step": 330
679
+ },
680
+ {
681
+ "epoch": 0.96,
682
+ "grad_norm": 2.2165358335084067,
683
+ "learning_rate": 4.305043544819289e-07,
684
+ "logits/chosen": -2.81691312789917,
685
+ "logits/rejected": -2.674722194671631,
686
+ "logps/chosen": -359.3749694824219,
687
+ "logps/rejected": -222.2421417236328,
688
+ "loss": 0.5382,
689
+ "rewards/accuracies": 0.949999988079071,
690
+ "rewards/chosen": 0.09120534360408783,
691
+ "rewards/margins": 0.2705990970134735,
692
+ "rewards/margins_max": 0.40515756607055664,
693
+ "rewards/margins_min": 0.13604064285755157,
694
+ "rewards/margins_std": 0.19029441475868225,
695
+ "rewards/rejected": -0.17939376831054688,
696
+ "step": 340
697
+ },
698
+ {
699
+ "epoch": 0.99,
700
+ "grad_norm": 3.4285964794606154,
701
+ "learning_rate": 4.247361176585903e-07,
702
+ "logits/chosen": -2.7804062366485596,
703
+ "logits/rejected": -2.6804087162017822,
704
+ "logps/chosen": -407.67974853515625,
705
+ "logps/rejected": -324.3394470214844,
706
+ "loss": 0.531,
707
+ "rewards/accuracies": 0.949999988079071,
708
+ "rewards/chosen": 0.20723724365234375,
709
+ "rewards/margins": 0.46674785017967224,
710
+ "rewards/margins_max": 0.6301173567771912,
711
+ "rewards/margins_min": 0.303378164768219,
712
+ "rewards/margins_std": 0.23103955388069153,
713
+ "rewards/rejected": -0.2595105469226837,
714
+ "step": 350
715
+ },
716
+ {
717
+ "epoch": 1.01,
718
+ "grad_norm": 1.8941910585756494,
719
+ "learning_rate": 4.187799870182038e-07,
720
+ "logits/chosen": -2.755005359649658,
721
+ "logits/rejected": -2.650050163269043,
722
+ "logps/chosen": -332.78167724609375,
723
+ "logps/rejected": -226.0217742919922,
724
+ "loss": 0.5144,
725
+ "rewards/accuracies": 1.0,
726
+ "rewards/chosen": 0.13424184918403625,
727
+ "rewards/margins": 0.36670905351638794,
728
+ "rewards/margins_max": 0.4970892369747162,
729
+ "rewards/margins_min": 0.2363288700580597,
730
+ "rewards/margins_std": 0.18438544869422913,
731
+ "rewards/rejected": -0.23246721923351288,
732
+ "step": 360
733
+ },
734
+ {
735
+ "epoch": 1.04,
736
+ "grad_norm": 2.838587843959735,
737
+ "learning_rate": 4.126423671904236e-07,
738
+ "logits/chosen": -2.6448826789855957,
739
+ "logits/rejected": -2.639240264892578,
740
+ "logps/chosen": -334.3022766113281,
741
+ "logps/rejected": -288.78326416015625,
742
+ "loss": 0.5104,
743
+ "rewards/accuracies": 0.8999999761581421,
744
+ "rewards/chosen": 0.17355194687843323,
745
+ "rewards/margins": 0.42354923486709595,
746
+ "rewards/margins_max": 0.6247254610061646,
747
+ "rewards/margins_min": 0.22237297892570496,
748
+ "rewards/margins_std": 0.2845061719417572,
749
+ "rewards/rejected": -0.24999728798866272,
750
+ "step": 370
751
+ },
752
+ {
753
+ "epoch": 1.07,
754
+ "grad_norm": 2.2802243997336906,
755
+ "learning_rate": 4.0632985796030007e-07,
756
+ "logits/chosen": -2.7517781257629395,
757
+ "logits/rejected": -2.5309536457061768,
758
+ "logps/chosen": -372.3118591308594,
759
+ "logps/rejected": -212.3002471923828,
760
+ "loss": 0.5067,
761
+ "rewards/accuracies": 0.8999999761581421,
762
+ "rewards/chosen": 0.1866866499185562,
763
+ "rewards/margins": 0.46490558981895447,
764
+ "rewards/margins_max": 0.6717337965965271,
765
+ "rewards/margins_min": 0.25807738304138184,
766
+ "rewards/margins_std": 0.292499303817749,
767
+ "rewards/rejected": -0.27821898460388184,
768
+ "step": 380
769
+ },
770
+ {
771
+ "epoch": 1.1,
772
+ "grad_norm": 1.903829570066699,
773
+ "learning_rate": 3.9984924717152713e-07,
774
+ "logits/chosen": -2.7687957286834717,
775
+ "logits/rejected": -2.7305006980895996,
776
+ "logps/chosen": -375.97857666015625,
777
+ "logps/rejected": -369.49609375,
778
+ "loss": 0.5018,
779
+ "rewards/accuracies": 0.949999988079071,
780
+ "rewards/chosen": 0.215342715382576,
781
+ "rewards/margins": 0.5938854813575745,
782
+ "rewards/margins_max": 0.8382770419120789,
783
+ "rewards/margins_min": 0.3494938910007477,
784
+ "rewards/margins_std": 0.34562191367149353,
785
+ "rewards/rejected": -0.3785427510738373,
786
+ "step": 390
787
+ },
788
+ {
789
+ "epoch": 1.13,
790
+ "grad_norm": 2.5748784749511193,
791
+ "learning_rate": 3.932075034274723e-07,
792
+ "logits/chosen": -2.6897547245025635,
793
+ "logits/rejected": -2.702376365661621,
794
+ "logps/chosen": -290.5245056152344,
795
+ "logps/rejected": -260.26007080078125,
796
+ "loss": 0.4853,
797
+ "rewards/accuracies": 0.949999988079071,
798
+ "rewards/chosen": 0.08214841037988663,
799
+ "rewards/margins": 0.4123876094818115,
800
+ "rewards/margins_max": 0.5694471001625061,
801
+ "rewards/margins_min": 0.25532811880111694,
802
+ "rewards/margins_std": 0.2221156805753708,
803
+ "rewards/rejected": -0.3302391767501831,
804
+ "step": 400
805
+ },
806
+ {
807
+ "epoch": 1.13,
808
+ "eval_logits/chosen": -2.7294929027557373,
809
+ "eval_logits/rejected": -2.687251567840576,
810
+ "eval_logps/chosen": -305.751953125,
811
+ "eval_logps/rejected": -285.3262939453125,
812
+ "eval_loss": 0.6748747825622559,
813
+ "eval_rewards/accuracies": 0.60317462682724,
814
+ "eval_rewards/chosen": -0.2053072601556778,
815
+ "eval_rewards/margins": 0.05612919479608536,
816
+ "eval_rewards/margins_max": 0.29521581530570984,
817
+ "eval_rewards/margins_min": -0.17038604617118835,
818
+ "eval_rewards/margins_std": 0.2055714726448059,
819
+ "eval_rewards/rejected": -0.26143649220466614,
820
+ "eval_runtime": 283.24,
821
+ "eval_samples_per_second": 7.061,
822
+ "eval_steps_per_second": 0.222,
823
+ "step": 400
824
+ },
825
+ {
826
+ "epoch": 1.15,
827
+ "grad_norm": 3.103699685286931,
828
+ "learning_rate": 3.8641176859783383e-07,
829
+ "logits/chosen": -2.776700973510742,
830
+ "logits/rejected": -2.7365381717681885,
831
+ "logps/chosen": -273.39312744140625,
832
+ "logps/rejected": -233.35983276367188,
833
+ "loss": 0.4943,
834
+ "rewards/accuracies": 0.8500000238418579,
835
+ "rewards/chosen": 0.21297307312488556,
836
+ "rewards/margins": 0.4645983576774597,
837
+ "rewards/margins_max": 0.7116575837135315,
838
+ "rewards/margins_min": 0.21753914654254913,
839
+ "rewards/margins_std": 0.3493945002555847,
840
+ "rewards/rejected": -0.25162526965141296,
841
+ "step": 410
842
+ },
843
+ {
844
+ "epoch": 1.18,
845
+ "grad_norm": 2.6715356265474646,
846
+ "learning_rate": 3.7946935013898606e-07,
847
+ "logits/chosen": -2.8254995346069336,
848
+ "logits/rejected": -2.712592601776123,
849
+ "logps/chosen": -370.1254577636719,
850
+ "logps/rejected": -297.41387939453125,
851
+ "loss": 0.4933,
852
+ "rewards/accuracies": 1.0,
853
+ "rewards/chosen": 0.2962881922721863,
854
+ "rewards/margins": 0.6147041320800781,
855
+ "rewards/margins_max": 0.8403900265693665,
856
+ "rewards/margins_min": 0.38901832699775696,
857
+ "rewards/margins_std": 0.31916797161102295,
858
+ "rewards/rejected": -0.318416029214859,
859
+ "step": 420
860
+ },
861
+ {
862
+ "epoch": 1.21,
863
+ "grad_norm": 2.575816189937093,
864
+ "learning_rate": 3.7238771323626817e-07,
865
+ "logits/chosen": -2.748086452484131,
866
+ "logits/rejected": -2.645881175994873,
867
+ "logps/chosen": -367.36517333984375,
868
+ "logps/rejected": -290.486083984375,
869
+ "loss": 0.4729,
870
+ "rewards/accuracies": 0.8999999761581421,
871
+ "rewards/chosen": 0.19724011421203613,
872
+ "rewards/margins": 0.4674545228481293,
873
+ "rewards/margins_max": 0.7218641042709351,
874
+ "rewards/margins_min": 0.21304507553577423,
875
+ "rewards/margins_std": 0.3597893714904785,
876
+ "rewards/rejected": -0.2702144682407379,
877
+ "step": 430
878
+ },
879
+ {
880
+ "epoch": 1.24,
881
+ "grad_norm": 3.2391287105434383,
882
+ "learning_rate": 3.651744727766676e-07,
883
+ "logits/chosen": -2.7392418384552,
884
+ "logits/rejected": -2.627821207046509,
885
+ "logps/chosen": -302.20477294921875,
886
+ "logps/rejected": -231.24520874023438,
887
+ "loss": 0.4817,
888
+ "rewards/accuracies": 1.0,
889
+ "rewards/chosen": 0.13048839569091797,
890
+ "rewards/margins": 0.4945225715637207,
891
+ "rewards/margins_max": 0.6880706548690796,
892
+ "rewards/margins_min": 0.3009744882583618,
893
+ "rewards/margins_std": 0.27371832728385925,
894
+ "rewards/rejected": -0.36403417587280273,
895
+ "step": 440
896
+ },
897
+ {
898
+ "epoch": 1.27,
899
+ "grad_norm": 2.3207662119539045,
900
+ "learning_rate": 3.5783738516052897e-07,
901
+ "logits/chosen": -2.6938626766204834,
902
+ "logits/rejected": -2.6267027854919434,
903
+ "logps/chosen": -321.3754577636719,
904
+ "logps/rejected": -300.9757995605469,
905
+ "loss": 0.4828,
906
+ "rewards/accuracies": 0.949999988079071,
907
+ "rewards/chosen": 0.1679908037185669,
908
+ "rewards/margins": 0.501402735710144,
909
+ "rewards/margins_max": 0.7609961628913879,
910
+ "rewards/margins_min": 0.24180929362773895,
911
+ "rewards/margins_std": 0.36712056398391724,
912
+ "rewards/rejected": -0.33341190218925476,
913
+ "step": 450
914
+ },
915
+ {
916
+ "epoch": 1.3,
917
+ "grad_norm": 3.167493076572815,
918
+ "learning_rate": 3.5038433996109404e-07,
919
+ "logits/chosen": -2.6855156421661377,
920
+ "logits/rejected": -2.6763596534729004,
921
+ "logps/chosen": -379.9665832519531,
922
+ "logps/rejected": -442.71319580078125,
923
+ "loss": 0.4632,
924
+ "rewards/accuracies": 0.949999988079071,
925
+ "rewards/chosen": 0.15311354398727417,
926
+ "rewards/margins": 0.5543171167373657,
927
+ "rewards/margins_max": 0.6853052973747253,
928
+ "rewards/margins_min": 0.42332905530929565,
929
+ "rewards/margins_std": 0.18524515628814697,
930
+ "rewards/rejected": -0.40120357275009155,
931
+ "step": 460
932
+ },
933
+ {
934
+ "epoch": 1.32,
935
+ "grad_norm": 2.627174748144401,
936
+ "learning_rate": 3.428233514408398e-07,
937
+ "logits/chosen": -2.725461959838867,
938
+ "logits/rejected": -2.6376640796661377,
939
+ "logps/chosen": -334.50274658203125,
940
+ "logps/rejected": -258.25244140625,
941
+ "loss": 0.4468,
942
+ "rewards/accuracies": 1.0,
943
+ "rewards/chosen": 0.22748108208179474,
944
+ "rewards/margins": 0.6032036542892456,
945
+ "rewards/margins_max": 0.7447773814201355,
946
+ "rewards/margins_min": 0.46162980794906616,
947
+ "rewards/margins_std": 0.20021554827690125,
948
+ "rewards/rejected": -0.3757224977016449,
949
+ "step": 470
950
+ },
951
+ {
952
+ "epoch": 1.35,
953
+ "grad_norm": 2.578290159541697,
954
+ "learning_rate": 3.3516254993373945e-07,
955
+ "logits/chosen": -2.8183326721191406,
956
+ "logits/rejected": -2.6640899181365967,
957
+ "logps/chosen": -394.9267578125,
958
+ "logps/rejected": -324.3505859375,
959
+ "loss": 0.4488,
960
+ "rewards/accuracies": 0.949999988079071,
961
+ "rewards/chosen": 0.209248349070549,
962
+ "rewards/margins": 0.6886187791824341,
963
+ "rewards/margins_max": 0.9021609425544739,
964
+ "rewards/margins_min": 0.4750765860080719,
965
+ "rewards/margins_std": 0.301994264125824,
966
+ "rewards/rejected": -0.4793704152107239,
967
+ "step": 480
968
+ },
969
+ {
970
+ "epoch": 1.38,
971
+ "grad_norm": 3.5319741105872913,
972
+ "learning_rate": 3.274101731027105e-07,
973
+ "logits/chosen": -2.6766858100891113,
974
+ "logits/rejected": -2.5564141273498535,
975
+ "logps/chosen": -206.2932586669922,
976
+ "logps/rejected": -255.5198211669922,
977
+ "loss": 0.4599,
978
+ "rewards/accuracies": 0.8999999761581421,
979
+ "rewards/chosen": 0.1098823994398117,
980
+ "rewards/margins": 0.49932241439819336,
981
+ "rewards/margins_max": 0.741881251335144,
982
+ "rewards/margins_min": 0.2567635178565979,
983
+ "rewards/margins_std": 0.34303003549575806,
984
+ "rewards/rejected": -0.38943997025489807,
985
+ "step": 490
986
+ },
987
+ {
988
+ "epoch": 1.41,
989
+ "grad_norm": 3.6901814684402083,
990
+ "learning_rate": 3.1957455708165314e-07,
991
+ "logits/chosen": -2.6062679290771484,
992
+ "logits/rejected": -2.5482537746429443,
993
+ "logps/chosen": -354.30804443359375,
994
+ "logps/rejected": -276.2196350097656,
995
+ "loss": 0.4561,
996
+ "rewards/accuracies": 0.949999988079071,
997
+ "rewards/chosen": 0.2989659011363983,
998
+ "rewards/margins": 0.682066023349762,
999
+ "rewards/margins_max": 0.8869129419326782,
1000
+ "rewards/margins_min": 0.47721901535987854,
1001
+ "rewards/margins_std": 0.2896973788738251,
1002
+ "rewards/rejected": -0.38310009241104126,
1003
+ "step": 500
1004
+ },
1005
+ {
1006
+ "epoch": 1.41,
1007
+ "eval_logits/chosen": -2.7037460803985596,
1008
+ "eval_logits/rejected": -2.6622040271759033,
1009
+ "eval_logps/chosen": -303.293701171875,
1010
+ "eval_logps/rejected": -285.461181640625,
1011
+ "eval_loss": 0.6651288866996765,
1012
+ "eval_rewards/accuracies": 0.591269850730896,
1013
+ "eval_rewards/chosen": -0.18072474002838135,
1014
+ "eval_rewards/margins": 0.08206041157245636,
1015
+ "eval_rewards/margins_max": 0.4090707004070282,
1016
+ "eval_rewards/margins_min": -0.23881544172763824,
1017
+ "eval_rewards/margins_std": 0.28740382194519043,
1018
+ "eval_rewards/rejected": -0.2627851665019989,
1019
+ "eval_runtime": 282.6678,
1020
+ "eval_samples_per_second": 7.075,
1021
+ "eval_steps_per_second": 0.223,
1022
+ "step": 500
1023
+ },
1024
+ {
1025
+ "epoch": 1.44,
1026
+ "grad_norm": 3.739183976606806,
1027
+ "learning_rate": 3.116641275116018e-07,
1028
+ "logits/chosen": -2.444211959838867,
1029
+ "logits/rejected": -2.4450695514678955,
1030
+ "logps/chosen": -274.13214111328125,
1031
+ "logps/rejected": -353.31646728515625,
1032
+ "loss": 0.4444,
1033
+ "rewards/accuracies": 1.0,
1034
+ "rewards/chosen": 0.185481995344162,
1035
+ "rewards/margins": 0.5661060810089111,
1036
+ "rewards/margins_max": 0.8111560940742493,
1037
+ "rewards/margins_min": 0.32105594873428345,
1038
+ "rewards/margins_std": 0.3465532064437866,
1039
+ "rewards/rejected": -0.38062405586242676,
1040
+ "step": 510
1041
+ },
1042
+ {
1043
+ "epoch": 1.46,
1044
+ "grad_norm": 3.184026882920763,
1045
+ "learning_rate": 3.036873904806295e-07,
1046
+ "logits/chosen": -2.719656467437744,
1047
+ "logits/rejected": -2.6494226455688477,
1048
+ "logps/chosen": -328.18524169921875,
1049
+ "logps/rejected": -284.12640380859375,
1050
+ "loss": 0.4602,
1051
+ "rewards/accuracies": 0.8999999761581421,
1052
+ "rewards/chosen": 0.13430781662464142,
1053
+ "rewards/margins": 0.6433862447738647,
1054
+ "rewards/margins_max": 0.9472837448120117,
1055
+ "rewards/margins_min": 0.3394888639450073,
1056
+ "rewards/margins_std": 0.4297758638858795,
1057
+ "rewards/rejected": -0.5090785026550293,
1058
+ "step": 520
1059
+ },
1060
+ {
1061
+ "epoch": 1.49,
1062
+ "grad_norm": 4.0242710472267955,
1063
+ "learning_rate": 2.956529233772492e-07,
1064
+ "logits/chosen": -2.6814956665039062,
1065
+ "logits/rejected": -2.6884987354278564,
1066
+ "logps/chosen": -363.94097900390625,
1067
+ "logps/rejected": -321.6993408203125,
1068
+ "loss": 0.4499,
1069
+ "rewards/accuracies": 1.0,
1070
+ "rewards/chosen": 0.15064643323421478,
1071
+ "rewards/margins": 0.6707555055618286,
1072
+ "rewards/margins_max": 0.8504557609558105,
1073
+ "rewards/margins_min": 0.49105510115623474,
1074
+ "rewards/margins_std": 0.2541346549987793,
1075
+ "rewards/rejected": -0.5201090574264526,
1076
+ "step": 530
1077
+ },
1078
+ {
1079
+ "epoch": 1.52,
1080
+ "grad_norm": 2.6946668653241135,
1081
+ "learning_rate": 2.875693656671431e-07,
1082
+ "logits/chosen": -2.8023953437805176,
1083
+ "logits/rejected": -2.6737780570983887,
1084
+ "logps/chosen": -369.5718078613281,
1085
+ "logps/rejected": -287.8340148925781,
1086
+ "loss": 0.435,
1087
+ "rewards/accuracies": 1.0,
1088
+ "rewards/chosen": 0.29981452226638794,
1089
+ "rewards/margins": 0.8034716844558716,
1090
+ "rewards/margins_max": 0.968321681022644,
1091
+ "rewards/margins_min": 0.6386215090751648,
1092
+ "rewards/margins_std": 0.23313331604003906,
1093
+ "rewards/rejected": -0.5036571025848389,
1094
+ "step": 540
1095
+ },
1096
+ {
1097
+ "epoch": 1.55,
1098
+ "grad_norm": 4.134360818413295,
1099
+ "learning_rate": 2.794454096031429e-07,
1100
+ "logits/chosen": -2.760481119155884,
1101
+ "logits/rejected": -2.706476926803589,
1102
+ "logps/chosen": -290.4170837402344,
1103
+ "logps/rejected": -311.6772766113281,
1104
+ "loss": 0.4046,
1105
+ "rewards/accuracies": 1.0,
1106
+ "rewards/chosen": 0.2053152620792389,
1107
+ "rewards/margins": 0.6515690088272095,
1108
+ "rewards/margins_max": 0.9271136522293091,
1109
+ "rewards/margins_min": 0.37602442502975464,
1110
+ "rewards/margins_std": 0.38967886567115784,
1111
+ "rewards/rejected": -0.44625377655029297,
1112
+ "step": 550
1113
+ },
1114
+ {
1115
+ "epoch": 1.58,
1116
+ "grad_norm": 3.840109613165422,
1117
+ "learning_rate": 2.7128979087844593e-07,
1118
+ "logits/chosen": -2.69515061378479,
1119
+ "logits/rejected": -2.638667583465576,
1120
+ "logps/chosen": -301.68402099609375,
1121
+ "logps/rejected": -388.26373291015625,
1122
+ "loss": 0.435,
1123
+ "rewards/accuracies": 0.8999999761581421,
1124
+ "rewards/chosen": 0.14999032020568848,
1125
+ "rewards/margins": 0.6446247100830078,
1126
+ "rewards/margins_max": 0.9054215550422668,
1127
+ "rewards/margins_min": 0.38382798433303833,
1128
+ "rewards/margins_std": 0.3688223958015442,
1129
+ "rewards/rejected": -0.4946344494819641,
1130
+ "step": 560
1131
+ },
1132
+ {
1133
+ "epoch": 1.61,
1134
+ "grad_norm": 3.064153419557981,
1135
+ "learning_rate": 2.6311127923312153e-07,
1136
+ "logits/chosen": -2.7153382301330566,
1137
+ "logits/rejected": -2.5977590084075928,
1138
+ "logps/chosen": -403.88983154296875,
1139
+ "logps/rejected": -367.3240661621094,
1140
+ "loss": 0.4246,
1141
+ "rewards/accuracies": 1.0,
1142
+ "rewards/chosen": 0.28311288356781006,
1143
+ "rewards/margins": 0.7977832555770874,
1144
+ "rewards/margins_max": 0.9812647104263306,
1145
+ "rewards/margins_min": 0.614301860332489,
1146
+ "rewards/margins_std": 0.25948190689086914,
1147
+ "rewards/rejected": -0.5146704316139221,
1148
+ "step": 570
1149
+ },
1150
+ {
1151
+ "epoch": 1.63,
1152
+ "grad_norm": 3.5144451959157386,
1153
+ "learning_rate": 2.5491866902400565e-07,
1154
+ "logits/chosen": -2.741267442703247,
1155
+ "logits/rejected": -2.664016008377075,
1156
+ "logps/chosen": -275.14654541015625,
1157
+ "logps/rejected": -273.2240295410156,
1158
+ "loss": 0.4194,
1159
+ "rewards/accuracies": 0.949999988079071,
1160
+ "rewards/chosen": 0.23318970203399658,
1161
+ "rewards/margins": 0.7509428858757019,
1162
+ "rewards/margins_max": 1.058239221572876,
1163
+ "rewards/margins_min": 0.4436466693878174,
1164
+ "rewards/margins_std": 0.4345824122428894,
1165
+ "rewards/rejected": -0.5177530646324158,
1166
+ "step": 580
1167
+ },
1168
+ {
1169
+ "epoch": 1.66,
1170
+ "grad_norm": 5.853626737663302,
1171
+ "learning_rate": 2.4672076976812543e-07,
1172
+ "logits/chosen": -2.604552745819092,
1173
+ "logits/rejected": -2.5189764499664307,
1174
+ "logps/chosen": -350.71343994140625,
1175
+ "logps/rejected": -339.68170166015625,
1176
+ "loss": 0.4303,
1177
+ "rewards/accuracies": 0.8999999761581421,
1178
+ "rewards/chosen": 0.1854722946882248,
1179
+ "rewards/margins": 0.6601113080978394,
1180
+ "rewards/margins_max": 1.0095475912094116,
1181
+ "rewards/margins_min": 0.3106750547885895,
1182
+ "rewards/margins_std": 0.4941774904727936,
1183
+ "rewards/rejected": -0.4746389389038086,
1184
+ "step": 590
1185
+ },
1186
+ {
1187
+ "epoch": 1.69,
1188
+ "grad_norm": 2.060171720513941,
1189
+ "learning_rate": 2.385263966698222e-07,
1190
+ "logits/chosen": -2.7409660816192627,
1191
+ "logits/rejected": -2.682070255279541,
1192
+ "logps/chosen": -274.4781188964844,
1193
+ "logps/rejected": -307.241455078125,
1194
+ "loss": 0.4337,
1195
+ "rewards/accuracies": 1.0,
1196
+ "rewards/chosen": 0.041744865477085114,
1197
+ "rewards/margins": 0.5090219974517822,
1198
+ "rewards/margins_max": 0.7057874798774719,
1199
+ "rewards/margins_min": 0.3122565746307373,
1200
+ "rewards/margins_std": 0.27826830744743347,
1201
+ "rewards/rejected": -0.4672771096229553,
1202
+ "step": 600
1203
+ },
1204
+ {
1205
+ "epoch": 1.69,
1206
+ "eval_logits/chosen": -2.691161632537842,
1207
+ "eval_logits/rejected": -2.65215802192688,
1208
+ "eval_logps/chosen": -311.7007751464844,
1209
+ "eval_logps/rejected": -293.9761047363281,
1210
+ "eval_loss": 0.6629500985145569,
1211
+ "eval_rewards/accuracies": 0.6111111044883728,
1212
+ "eval_rewards/chosen": -0.26479557156562805,
1213
+ "eval_rewards/margins": 0.0831385999917984,
1214
+ "eval_rewards/margins_max": 0.4556023180484772,
1215
+ "eval_rewards/margins_min": -0.2917100191116333,
1216
+ "eval_rewards/margins_std": 0.3299048840999603,
1217
+ "eval_rewards/rejected": -0.34793415665626526,
1218
+ "eval_runtime": 282.2224,
1219
+ "eval_samples_per_second": 7.087,
1220
+ "eval_steps_per_second": 0.223,
1221
+ "step": 600
1222
+ },
1223
+ {
1224
+ "epoch": 1.72,
1225
+ "grad_norm": 4.033016557049881,
1226
+ "learning_rate": 2.3034436114175838e-07,
1227
+ "logits/chosen": -2.6008808612823486,
1228
+ "logits/rejected": -2.50779390335083,
1229
+ "logps/chosen": -342.6214904785156,
1230
+ "logps/rejected": -306.4314270019531,
1231
+ "loss": 0.4179,
1232
+ "rewards/accuracies": 0.949999988079071,
1233
+ "rewards/chosen": 0.22867994010448456,
1234
+ "rewards/margins": 0.6097778081893921,
1235
+ "rewards/margins_max": 0.9516829252243042,
1236
+ "rewards/margins_min": 0.26787281036376953,
1237
+ "rewards/margins_std": 0.48352688550949097,
1238
+ "rewards/rejected": -0.3810979723930359,
1239
+ "step": 610
1240
+ },
1241
+ {
1242
+ "epoch": 1.75,
1243
+ "grad_norm": 4.877910820940742,
1244
+ "learning_rate": 2.2218346133000264e-07,
1245
+ "logits/chosen": -2.6137542724609375,
1246
+ "logits/rejected": -2.5171990394592285,
1247
+ "logps/chosen": -289.39947509765625,
1248
+ "logps/rejected": -239.6779022216797,
1249
+ "loss": 0.404,
1250
+ "rewards/accuracies": 0.949999988079071,
1251
+ "rewards/chosen": 0.21883878111839294,
1252
+ "rewards/margins": 0.6435804963111877,
1253
+ "rewards/margins_max": 0.8332638740539551,
1254
+ "rewards/margins_min": 0.45389705896377563,
1255
+ "rewards/margins_std": 0.2682528793811798,
1256
+ "rewards/rejected": -0.42474165558815,
1257
+ "step": 620
1258
+ },
1259
+ {
1260
+ "epoch": 1.77,
1261
+ "grad_norm": 3.6128582681053905,
1262
+ "learning_rate": 2.1405247265337917e-07,
1263
+ "logits/chosen": -2.6185965538024902,
1264
+ "logits/rejected": -2.5335779190063477,
1265
+ "logps/chosen": -362.9138488769531,
1266
+ "logps/rejected": -264.81561279296875,
1267
+ "loss": 0.4299,
1268
+ "rewards/accuracies": 1.0,
1269
+ "rewards/chosen": 0.31849801540374756,
1270
+ "rewards/margins": 0.7252637147903442,
1271
+ "rewards/margins_max": 0.9092816114425659,
1272
+ "rewards/margins_min": 0.541245698928833,
1273
+ "rewards/margins_std": 0.26024073362350464,
1274
+ "rewards/rejected": -0.4067656397819519,
1275
+ "step": 630
1276
+ },
1277
+ {
1278
+ "epoch": 1.8,
1279
+ "grad_norm": 4.049751113967747,
1280
+ "learning_rate": 2.0596013836725657e-07,
1281
+ "logits/chosen": -2.7523646354675293,
1282
+ "logits/rejected": -2.7036333084106445,
1283
+ "logps/chosen": -289.30523681640625,
1284
+ "logps/rejected": -251.0538330078125,
1285
+ "loss": 0.4276,
1286
+ "rewards/accuracies": 0.949999988079071,
1287
+ "rewards/chosen": 0.13170190155506134,
1288
+ "rewards/margins": 0.6432820558547974,
1289
+ "rewards/margins_max": 0.9033204913139343,
1290
+ "rewards/margins_min": 0.3832436501979828,
1291
+ "rewards/margins_std": 0.3677498400211334,
1292
+ "rewards/rejected": -0.5115801692008972,
1293
+ "step": 640
1294
+ },
1295
+ {
1296
+ "epoch": 1.83,
1297
+ "grad_norm": 3.954466219061399,
1298
+ "learning_rate": 1.9791516016192213e-07,
1299
+ "logits/chosen": -2.8034818172454834,
1300
+ "logits/rejected": -2.672067642211914,
1301
+ "logps/chosen": -308.1045837402344,
1302
+ "logps/rejected": -269.56695556640625,
1303
+ "loss": 0.4146,
1304
+ "rewards/accuracies": 0.8999999761581421,
1305
+ "rewards/chosen": 0.2629513740539551,
1306
+ "rewards/margins": 0.7304937243461609,
1307
+ "rewards/margins_max": 1.0699743032455444,
1308
+ "rewards/margins_min": 0.3910132348537445,
1309
+ "rewards/margins_std": 0.4800979495048523,
1310
+ "rewards/rejected": -0.4675424098968506,
1311
+ "step": 650
1312
+ },
1313
+ {
1314
+ "epoch": 1.86,
1315
+ "grad_norm": 3.912973705535835,
1316
+ "learning_rate": 1.8992618880565036e-07,
1317
+ "logits/chosen": -2.504817247390747,
1318
+ "logits/rejected": -2.4793152809143066,
1319
+ "logps/chosen": -300.5707702636719,
1320
+ "logps/rejected": -229.734130859375,
1321
+ "loss": 0.4153,
1322
+ "rewards/accuracies": 0.8500000238418579,
1323
+ "rewards/chosen": 0.2345268279314041,
1324
+ "rewards/margins": 0.631380558013916,
1325
+ "rewards/margins_max": 0.886853814125061,
1326
+ "rewards/margins_min": 0.3759072422981262,
1327
+ "rewards/margins_std": 0.3612937033176422,
1328
+ "rewards/rejected": -0.3968536853790283,
1329
+ "step": 660
1330
+ },
1331
+ {
1332
+ "epoch": 1.89,
1333
+ "grad_norm": 3.6522537954747967,
1334
+ "learning_rate": 1.8200181484252885e-07,
1335
+ "logits/chosen": -2.6700520515441895,
1336
+ "logits/rejected": -2.6946229934692383,
1337
+ "logps/chosen": -351.0549621582031,
1338
+ "logps/rejected": -369.70318603515625,
1339
+ "loss": 0.4096,
1340
+ "rewards/accuracies": 0.949999988079071,
1341
+ "rewards/chosen": 0.317019522190094,
1342
+ "rewards/margins": 0.8405925035476685,
1343
+ "rewards/margins_max": 1.086416482925415,
1344
+ "rewards/margins_min": 0.5947685837745667,
1345
+ "rewards/margins_std": 0.34764760732650757,
1346
+ "rewards/rejected": -0.5235730409622192,
1347
+ "step": 670
1348
+ },
1349
+ {
1350
+ "epoch": 1.92,
1351
+ "grad_norm": 5.476131318841013,
1352
+ "learning_rate": 1.7415055935504233e-07,
1353
+ "logits/chosen": -2.7517523765563965,
1354
+ "logits/rejected": -2.605607032775879,
1355
+ "logps/chosen": -362.13763427734375,
1356
+ "logps/rejected": -350.3736267089844,
1357
+ "loss": 0.4002,
1358
+ "rewards/accuracies": 1.0,
1359
+ "rewards/chosen": 0.3122175335884094,
1360
+ "rewards/margins": 0.8455464243888855,
1361
+ "rewards/margins_max": 1.089531660079956,
1362
+ "rewards/margins_min": 0.6015611886978149,
1363
+ "rewards/margins_std": 0.3450471758842468,
1364
+ "rewards/rejected": -0.5333288908004761,
1365
+ "step": 680
1366
+ },
1367
+ {
1368
+ "epoch": 1.94,
1369
+ "grad_norm": 4.5679980033562835,
1370
+ "learning_rate": 1.6638086480134952e-07,
1371
+ "logits/chosen": -2.5971922874450684,
1372
+ "logits/rejected": -2.5536816120147705,
1373
+ "logps/chosen": -225.9965057373047,
1374
+ "logps/rejected": -176.76919555664062,
1375
+ "loss": 0.414,
1376
+ "rewards/accuracies": 0.8999999761581421,
1377
+ "rewards/chosen": 0.10968823730945587,
1378
+ "rewards/margins": 0.5823364853858948,
1379
+ "rewards/margins_max": 0.9098749160766602,
1380
+ "rewards/margins_min": 0.2547979950904846,
1381
+ "rewards/margins_std": 0.4632093906402588,
1382
+ "rewards/rejected": -0.4726482927799225,
1383
+ "step": 690
1384
+ },
1385
+ {
1386
+ "epoch": 1.97,
1387
+ "grad_norm": 4.353608504785364,
1388
+ "learning_rate": 1.5870108593710471e-07,
1389
+ "logits/chosen": -2.530820369720459,
1390
+ "logits/rejected": -2.447427272796631,
1391
+ "logps/chosen": -393.30316162109375,
1392
+ "logps/rejected": -266.6943054199219,
1393
+ "loss": 0.4052,
1394
+ "rewards/accuracies": 0.949999988079071,
1395
+ "rewards/chosen": 0.3366183340549469,
1396
+ "rewards/margins": 0.8534075617790222,
1397
+ "rewards/margins_max": 1.1163585186004639,
1398
+ "rewards/margins_min": 0.5904566049575806,
1399
+ "rewards/margins_std": 0.3718687891960144,
1400
+ "rewards/rejected": -0.5167892575263977,
1401
+ "step": 700
1402
+ },
1403
+ {
1404
+ "epoch": 1.97,
1405
+ "eval_logits/chosen": -2.6818747520446777,
1406
+ "eval_logits/rejected": -2.6436822414398193,
1407
+ "eval_logps/chosen": -310.21429443359375,
1408
+ "eval_logps/rejected": -294.1273498535156,
1409
+ "eval_loss": 0.6605715155601501,
1410
+ "eval_rewards/accuracies": 0.6150793433189392,
1411
+ "eval_rewards/chosen": -0.24993041157722473,
1412
+ "eval_rewards/margins": 0.09951655566692352,
1413
+ "eval_rewards/margins_max": 0.5023199319839478,
1414
+ "eval_rewards/margins_min": -0.30413222312927246,
1415
+ "eval_rewards/margins_std": 0.36039867997169495,
1416
+ "eval_rewards/rejected": -0.34944698214530945,
1417
+ "eval_runtime": 283.0264,
1418
+ "eval_samples_per_second": 7.066,
1419
+ "eval_steps_per_second": 0.223,
1420
+ "step": 700
1421
+ },
1422
+ {
1423
+ "epoch": 2.0,
1424
+ "grad_norm": 3.529614094937248,
1425
+ "learning_rate": 1.5111948083158528e-07,
1426
+ "logits/chosen": -2.599970579147339,
1427
+ "logits/rejected": -2.5100104808807373,
1428
+ "logps/chosen": -284.42437744140625,
1429
+ "logps/rejected": -230.0814666748047,
1430
+ "loss": 0.4001,
1431
+ "rewards/accuracies": 1.0,
1432
+ "rewards/chosen": 0.21849575638771057,
1433
+ "rewards/margins": 0.6818246841430664,
1434
+ "rewards/margins_max": 0.9302431344985962,
1435
+ "rewards/margins_min": 0.4334062933921814,
1436
+ "rewards/margins_std": 0.3513166308403015,
1437
+ "rewards/rejected": -0.4633289873600006,
1438
+ "step": 710
1439
+ },
1440
+ {
1441
+ "epoch": 2.03,
1442
+ "grad_norm": 4.123857744606851,
1443
+ "learning_rate": 1.4364420198778658e-07,
1444
+ "logits/chosen": -2.6770248413085938,
1445
+ "logits/rejected": -2.6365342140197754,
1446
+ "logps/chosen": -330.5859680175781,
1447
+ "logps/rejected": -393.903564453125,
1448
+ "loss": 0.3881,
1449
+ "rewards/accuracies": 1.0,
1450
+ "rewards/chosen": 0.17028996348381042,
1451
+ "rewards/margins": 0.8054014444351196,
1452
+ "rewards/margins_max": 1.1888272762298584,
1453
+ "rewards/margins_min": 0.4219754636287689,
1454
+ "rewards/margins_std": 0.5422461628913879,
1455
+ "rewards/rejected": -0.6351114511489868,
1456
+ "step": 720
1457
+ },
1458
+ {
1459
+ "epoch": 2.06,
1460
+ "grad_norm": 2.4812842763075484,
1461
+ "learning_rate": 1.3628328757603242e-07,
1462
+ "logits/chosen": -2.666334629058838,
1463
+ "logits/rejected": -2.609488010406494,
1464
+ "logps/chosen": -396.9958801269531,
1465
+ "logps/rejected": -314.3293151855469,
1466
+ "loss": 0.3946,
1467
+ "rewards/accuracies": 1.0,
1468
+ "rewards/chosen": 0.25754523277282715,
1469
+ "rewards/margins": 0.8732002973556519,
1470
+ "rewards/margins_max": 1.0483942031860352,
1471
+ "rewards/margins_min": 0.6980066299438477,
1472
+ "rewards/margins_std": 0.2477613389492035,
1473
+ "rewards/rejected": -0.6156551837921143,
1474
+ "step": 730
1475
+ },
1476
+ {
1477
+ "epoch": 2.08,
1478
+ "grad_norm": 3.814814880999332,
1479
+ "learning_rate": 1.2904465279052723e-07,
1480
+ "logits/chosen": -2.7025318145751953,
1481
+ "logits/rejected": -2.6251301765441895,
1482
+ "logps/chosen": -323.8068542480469,
1483
+ "logps/rejected": -283.27886962890625,
1484
+ "loss": 0.3941,
1485
+ "rewards/accuracies": 0.949999988079071,
1486
+ "rewards/chosen": 0.28146830201148987,
1487
+ "rewards/margins": 0.8616737127304077,
1488
+ "rewards/margins_max": 1.2269595861434937,
1489
+ "rewards/margins_min": 0.4963875710964203,
1490
+ "rewards/margins_std": 0.5165923833847046,
1491
+ "rewards/rejected": -0.5802053213119507,
1492
+ "step": 740
1493
+ },
1494
+ {
1495
+ "epoch": 2.11,
1496
+ "grad_norm": 4.630565284841337,
1497
+ "learning_rate": 1.219360813381446e-07,
1498
+ "logits/chosen": -2.5932440757751465,
1499
+ "logits/rejected": -2.5545785427093506,
1500
+ "logps/chosen": -184.56271362304688,
1501
+ "logps/rejected": -184.19284057617188,
1502
+ "loss": 0.3873,
1503
+ "rewards/accuracies": 0.8999999761581421,
1504
+ "rewards/chosen": 0.040130000561475754,
1505
+ "rewards/margins": 0.5156465768814087,
1506
+ "rewards/margins_max": 0.7799071073532104,
1507
+ "rewards/margins_min": 0.2513861656188965,
1508
+ "rewards/margins_std": 0.3737207353115082,
1509
+ "rewards/rejected": -0.4755166471004486,
1510
+ "step": 750
1511
+ },
1512
+ {
1513
+ "epoch": 2.14,
1514
+ "grad_norm": 5.964876046183851,
1515
+ "learning_rate": 1.149652170686039e-07,
1516
+ "logits/chosen": -2.6942923069000244,
1517
+ "logits/rejected": -2.5908329486846924,
1518
+ "logps/chosen": -307.19537353515625,
1519
+ "logps/rejected": -318.4249572753906,
1520
+ "loss": 0.3927,
1521
+ "rewards/accuracies": 0.949999988079071,
1522
+ "rewards/chosen": 0.2586578130722046,
1523
+ "rewards/margins": 0.8149067759513855,
1524
+ "rewards/margins_max": 1.1336301565170288,
1525
+ "rewards/margins_min": 0.496183305978775,
1526
+ "rewards/margins_std": 0.45074301958084106,
1527
+ "rewards/rejected": -0.5562489628791809,
1528
+ "step": 760
1529
+ },
1530
+ {
1531
+ "epoch": 2.17,
1532
+ "grad_norm": 2.3299539026112637,
1533
+ "learning_rate": 1.0813955575503587e-07,
1534
+ "logits/chosen": -2.6312010288238525,
1535
+ "logits/rejected": -2.633394479751587,
1536
+ "logps/chosen": -318.4278869628906,
1537
+ "logps/rejected": -315.31573486328125,
1538
+ "loss": 0.3871,
1539
+ "rewards/accuracies": 0.949999988079071,
1540
+ "rewards/chosen": 0.15208546817302704,
1541
+ "rewards/margins": 0.7286194562911987,
1542
+ "rewards/margins_max": 1.0386446714401245,
1543
+ "rewards/margins_min": 0.4185941815376282,
1544
+ "rewards/margins_std": 0.4384419322013855,
1545
+ "rewards/rejected": -0.5765339732170105,
1546
+ "step": 770
1547
+ },
1548
+ {
1549
+ "epoch": 2.2,
1550
+ "grad_norm": 3.867334844691413,
1551
+ "learning_rate": 1.0146643703377486e-07,
1552
+ "logits/chosen": -2.7098231315612793,
1553
+ "logits/rejected": -2.597475290298462,
1554
+ "logps/chosen": -300.3232421875,
1555
+ "logps/rejected": -268.108154296875,
1556
+ "loss": 0.4025,
1557
+ "rewards/accuracies": 0.949999988079071,
1558
+ "rewards/chosen": 0.26718658208847046,
1559
+ "rewards/margins": 0.6681410670280457,
1560
+ "rewards/margins_max": 0.9694315791130066,
1561
+ "rewards/margins_min": 0.36685043573379517,
1562
+ "rewards/margins_std": 0.42608922719955444,
1563
+ "rewards/rejected": -0.4009544253349304,
1564
+ "step": 780
1565
+ },
1566
+ {
1567
+ "epoch": 2.23,
1568
+ "grad_norm": 4.14967296184984,
1569
+ "learning_rate": 9.495303651204494e-08,
1570
+ "logits/chosen": -2.6531028747558594,
1571
+ "logits/rejected": -2.603677272796631,
1572
+ "logps/chosen": -339.468017578125,
1573
+ "logps/rejected": -333.7980651855469,
1574
+ "loss": 0.3762,
1575
+ "rewards/accuracies": 1.0,
1576
+ "rewards/chosen": 0.2651078999042511,
1577
+ "rewards/margins": 0.8354951739311218,
1578
+ "rewards/margins_max": 1.0252331495285034,
1579
+ "rewards/margins_min": 0.6457570791244507,
1580
+ "rewards/margins_std": 0.2683301866054535,
1581
+ "rewards/rejected": -0.5703873634338379,
1582
+ "step": 790
1583
+ },
1584
+ {
1585
+ "epoch": 2.25,
1586
+ "grad_norm": 3.7983369249832113,
1587
+ "learning_rate": 8.860635805202615e-08,
1588
+ "logits/chosen": -2.6702463626861572,
1589
+ "logits/rejected": -2.6012625694274902,
1590
+ "logps/chosen": -335.74969482421875,
1591
+ "logps/rejected": -302.484375,
1592
+ "loss": 0.3797,
1593
+ "rewards/accuracies": 1.0,
1594
+ "rewards/chosen": 0.2764872908592224,
1595
+ "rewards/margins": 0.8479849100112915,
1596
+ "rewards/margins_max": 1.1567089557647705,
1597
+ "rewards/margins_min": 0.5392606854438782,
1598
+ "rewards/margins_std": 0.4366018772125244,
1599
+ "rewards/rejected": -0.5714975595474243,
1600
+ "step": 800
1601
+ },
1602
+ {
1603
+ "epoch": 2.25,
1604
+ "eval_logits/chosen": -2.6750190258026123,
1605
+ "eval_logits/rejected": -2.637268304824829,
1606
+ "eval_logps/chosen": -312.3301086425781,
1607
+ "eval_logps/rejected": -296.3420104980469,
1608
+ "eval_loss": 0.6601380705833435,
1609
+ "eval_rewards/accuracies": 0.6150793433189392,
1610
+ "eval_rewards/chosen": -0.27108922600746155,
1611
+ "eval_rewards/margins": 0.1005045622587204,
1612
+ "eval_rewards/margins_max": 0.5193765759468079,
1613
+ "eval_rewards/margins_min": -0.3193714916706085,
1614
+ "eval_rewards/margins_std": 0.3749600052833557,
1615
+ "eval_rewards/rejected": -0.37159380316734314,
1616
+ "eval_runtime": 283.5005,
1617
+ "eval_samples_per_second": 7.055,
1618
+ "eval_steps_per_second": 0.222,
1619
+ "step": 800
1620
+ },
1621
+ {
1622
+ "epoch": 2.28,
1623
+ "grad_norm": 4.752467066908081,
1624
+ "learning_rate": 8.24332262395994e-08,
1625
+ "logits/chosen": -2.736827850341797,
1626
+ "logits/rejected": -2.6829886436462402,
1627
+ "logps/chosen": -289.7952575683594,
1628
+ "logps/rejected": -302.0584411621094,
1629
+ "loss": 0.4061,
1630
+ "rewards/accuracies": 1.0,
1631
+ "rewards/chosen": 0.04772118479013443,
1632
+ "rewards/margins": 0.6294242739677429,
1633
+ "rewards/margins_max": 1.0222015380859375,
1634
+ "rewards/margins_min": 0.2366468459367752,
1635
+ "rewards/margins_std": 0.5554711222648621,
1636
+ "rewards/rejected": -0.5817030668258667,
1637
+ "step": 810
1638
+ },
1639
+ {
1640
+ "epoch": 2.31,
1641
+ "grad_norm": 4.046564156513315,
1642
+ "learning_rate": 7.644027904586586e-08,
1643
+ "logits/chosen": -2.7101123332977295,
1644
+ "logits/rejected": -2.640636682510376,
1645
+ "logps/chosen": -296.4427795410156,
1646
+ "logps/rejected": -262.5876770019531,
1647
+ "loss": 0.3872,
1648
+ "rewards/accuracies": 0.949999988079071,
1649
+ "rewards/chosen": 0.0908295139670372,
1650
+ "rewards/margins": 0.6406592130661011,
1651
+ "rewards/margins_max": 0.9368668794631958,
1652
+ "rewards/margins_min": 0.34445151686668396,
1653
+ "rewards/margins_std": 0.41890090703964233,
1654
+ "rewards/rejected": -0.5498296618461609,
1655
+ "step": 820
1656
+ },
1657
+ {
1658
+ "epoch": 2.34,
1659
+ "grad_norm": 5.0853428646839145,
1660
+ "learning_rate": 7.063396068933469e-08,
1661
+ "logits/chosen": -2.692904233932495,
1662
+ "logits/rejected": -2.6098153591156006,
1663
+ "logps/chosen": -424.69122314453125,
1664
+ "logps/rejected": -314.2643737792969,
1665
+ "loss": 0.3818,
1666
+ "rewards/accuracies": 0.8999999761581421,
1667
+ "rewards/chosen": 0.29205161333084106,
1668
+ "rewards/margins": 0.8924616575241089,
1669
+ "rewards/margins_max": 1.109025239944458,
1670
+ "rewards/margins_min": 0.675898015499115,
1671
+ "rewards/margins_std": 0.30626723170280457,
1672
+ "rewards/rejected": -0.600409984588623,
1673
+ "step": 830
1674
+ },
1675
+ {
1676
+ "epoch": 2.37,
1677
+ "grad_norm": 5.027960418606071,
1678
+ "learning_rate": 6.502051470645148e-08,
1679
+ "logits/chosen": -2.8256278038024902,
1680
+ "logits/rejected": -2.6598448753356934,
1681
+ "logps/chosen": -369.71649169921875,
1682
+ "logps/rejected": -345.41119384765625,
1683
+ "loss": 0.4041,
1684
+ "rewards/accuracies": 0.8999999761581421,
1685
+ "rewards/chosen": 0.2174655646085739,
1686
+ "rewards/margins": 0.8193850517272949,
1687
+ "rewards/margins_max": 1.032546877861023,
1688
+ "rewards/margins_min": 0.6062231063842773,
1689
+ "rewards/margins_std": 0.3014564514160156,
1690
+ "rewards/rejected": -0.6019193530082703,
1691
+ "step": 840
1692
+ },
1693
+ {
1694
+ "epoch": 2.39,
1695
+ "grad_norm": 4.090791288278811,
1696
+ "learning_rate": 5.960597723792194e-08,
1697
+ "logits/chosen": -2.6982929706573486,
1698
+ "logits/rejected": -2.5838606357574463,
1699
+ "logps/chosen": -351.38800048828125,
1700
+ "logps/rejected": -319.4389343261719,
1701
+ "loss": 0.3891,
1702
+ "rewards/accuracies": 1.0,
1703
+ "rewards/chosen": 0.3367738127708435,
1704
+ "rewards/margins": 0.9505535364151001,
1705
+ "rewards/margins_max": 1.2508819103240967,
1706
+ "rewards/margins_min": 0.6502249836921692,
1707
+ "rewards/margins_std": 0.4247285723686218,
1708
+ "rewards/rejected": -0.6137796640396118,
1709
+ "step": 850
1710
+ },
1711
+ {
1712
+ "epoch": 2.42,
1713
+ "grad_norm": 3.7885130179056823,
1714
+ "learning_rate": 5.4396170538046486e-08,
1715
+ "logits/chosen": -2.738215923309326,
1716
+ "logits/rejected": -2.669740915298462,
1717
+ "logps/chosen": -329.03961181640625,
1718
+ "logps/rejected": -324.8470458984375,
1719
+ "loss": 0.3743,
1720
+ "rewards/accuracies": 0.949999988079071,
1721
+ "rewards/chosen": 0.263808935880661,
1722
+ "rewards/margins": 0.8292306661605835,
1723
+ "rewards/margins_max": 1.189160943031311,
1724
+ "rewards/margins_min": 0.46930059790611267,
1725
+ "rewards/margins_std": 0.509018063545227,
1726
+ "rewards/rejected": -0.5654217004776001,
1727
+ "step": 860
1728
+ },
1729
+ {
1730
+ "epoch": 2.45,
1731
+ "grad_norm": 4.639054877280387,
1732
+ "learning_rate": 4.93966967140487e-08,
1733
+ "logits/chosen": -2.6751439571380615,
1734
+ "logits/rejected": -2.5972399711608887,
1735
+ "logps/chosen": -319.5000305175781,
1736
+ "logps/rejected": -372.52154541015625,
1737
+ "loss": 0.3717,
1738
+ "rewards/accuracies": 1.0,
1739
+ "rewards/chosen": 0.2105383574962616,
1740
+ "rewards/margins": 0.7771679759025574,
1741
+ "rewards/margins_max": 1.026515245437622,
1742
+ "rewards/margins_min": 0.5278206467628479,
1743
+ "rewards/margins_std": 0.35263046622276306,
1744
+ "rewards/rejected": -0.5666295289993286,
1745
+ "step": 870
1746
+ },
1747
+ {
1748
+ "epoch": 2.48,
1749
+ "grad_norm": 4.002088954960643,
1750
+ "learning_rate": 4.4612931702126433e-08,
1751
+ "logits/chosen": -2.7306954860687256,
1752
+ "logits/rejected": -2.65733003616333,
1753
+ "logps/chosen": -293.833984375,
1754
+ "logps/rejected": -305.6811828613281,
1755
+ "loss": 0.381,
1756
+ "rewards/accuracies": 1.0,
1757
+ "rewards/chosen": 0.2338138073682785,
1758
+ "rewards/margins": 0.8368278741836548,
1759
+ "rewards/margins_max": 1.1013085842132568,
1760
+ "rewards/margins_min": 0.5723473429679871,
1761
+ "rewards/margins_std": 0.37403208017349243,
1762
+ "rewards/rejected": -0.6030141115188599,
1763
+ "step": 880
1764
+ },
1765
+ {
1766
+ "epoch": 2.51,
1767
+ "grad_norm": 4.02403903649509,
1768
+ "learning_rate": 4.005001948670605e-08,
1769
+ "logits/chosen": -2.7370591163635254,
1770
+ "logits/rejected": -2.673333168029785,
1771
+ "logps/chosen": -437.3260192871094,
1772
+ "logps/rejected": -393.738037109375,
1773
+ "loss": 0.3683,
1774
+ "rewards/accuracies": 1.0,
1775
+ "rewards/chosen": 0.396183043718338,
1776
+ "rewards/margins": 1.0729153156280518,
1777
+ "rewards/margins_max": 1.3086248636245728,
1778
+ "rewards/margins_min": 0.8372055292129517,
1779
+ "rewards/margins_std": 0.33334383368492126,
1780
+ "rewards/rejected": -0.6767321825027466,
1781
+ "step": 890
1782
+ },
1783
+ {
1784
+ "epoch": 2.54,
1785
+ "grad_norm": 3.0814471446760416,
1786
+ "learning_rate": 3.571286656911376e-08,
1787
+ "logits/chosen": -2.713385820388794,
1788
+ "logits/rejected": -2.5615978240966797,
1789
+ "logps/chosen": -364.4679260253906,
1790
+ "logps/rejected": -330.7204284667969,
1791
+ "loss": 0.3692,
1792
+ "rewards/accuracies": 1.0,
1793
+ "rewards/chosen": 0.35871466994285583,
1794
+ "rewards/margins": 1.0124990940093994,
1795
+ "rewards/margins_max": 1.2225067615509033,
1796
+ "rewards/margins_min": 0.8024913668632507,
1797
+ "rewards/margins_std": 0.2969956696033478,
1798
+ "rewards/rejected": -0.6537843942642212,
1799
+ "step": 900
1800
+ },
1801
+ {
1802
+ "epoch": 2.54,
1803
+ "eval_logits/chosen": -2.6729512214660645,
1804
+ "eval_logits/rejected": -2.635655164718628,
1805
+ "eval_logps/chosen": -314.3625793457031,
1806
+ "eval_logps/rejected": -298.2907409667969,
1807
+ "eval_loss": 0.6601293683052063,
1808
+ "eval_rewards/accuracies": 0.60317462682724,
1809
+ "eval_rewards/chosen": -0.29141345620155334,
1810
+ "eval_rewards/margins": 0.09966748207807541,
1811
+ "eval_rewards/margins_max": 0.5207357406616211,
1812
+ "eval_rewards/margins_min": -0.33025211095809937,
1813
+ "eval_rewards/margins_std": 0.38038700819015503,
1814
+ "eval_rewards/rejected": -0.39108091592788696,
1815
+ "eval_runtime": 282.3187,
1816
+ "eval_samples_per_second": 7.084,
1817
+ "eval_steps_per_second": 0.223,
1818
+ "step": 900
1819
+ },
1820
+ {
1821
+ "epoch": 2.56,
1822
+ "grad_norm": 4.753390858655679,
1823
+ "learning_rate": 3.160613669161255e-08,
1824
+ "logits/chosen": -2.7725110054016113,
1825
+ "logits/rejected": -2.663914442062378,
1826
+ "logps/chosen": -388.418701171875,
1827
+ "logps/rejected": -295.52630615234375,
1828
+ "loss": 0.3675,
1829
+ "rewards/accuracies": 0.949999988079071,
1830
+ "rewards/chosen": 0.3076331317424774,
1831
+ "rewards/margins": 0.8788021206855774,
1832
+ "rewards/margins_max": 1.1915054321289062,
1833
+ "rewards/margins_min": 0.566098690032959,
1834
+ "rewards/margins_std": 0.44222941994667053,
1835
+ "rewards/rejected": -0.5711689591407776,
1836
+ "step": 910
1837
+ },
1838
+ {
1839
+ "epoch": 2.59,
1840
+ "grad_norm": 4.537990633918307,
1841
+ "learning_rate": 2.7734245822478436e-08,
1842
+ "logits/chosen": -2.663756847381592,
1843
+ "logits/rejected": -2.5272915363311768,
1844
+ "logps/chosen": -328.06951904296875,
1845
+ "logps/rejected": -251.6117706298828,
1846
+ "loss": 0.389,
1847
+ "rewards/accuracies": 0.949999988079071,
1848
+ "rewards/chosen": 0.32800599932670593,
1849
+ "rewards/margins": 0.918967604637146,
1850
+ "rewards/margins_max": 1.2975780963897705,
1851
+ "rewards/margins_min": 0.5403571724891663,
1852
+ "rewards/margins_std": 0.5354360342025757,
1853
+ "rewards/rejected": -0.5909615755081177,
1854
+ "step": 920
1855
+ },
1856
+ {
1857
+ "epoch": 2.62,
1858
+ "grad_norm": 3.641037815248602,
1859
+ "learning_rate": 2.410135740750821e-08,
1860
+ "logits/chosen": -2.7181553840637207,
1861
+ "logits/rejected": -2.6694235801696777,
1862
+ "logps/chosen": -324.9427795410156,
1863
+ "logps/rejected": -329.7734069824219,
1864
+ "loss": 0.3722,
1865
+ "rewards/accuracies": 1.0,
1866
+ "rewards/chosen": 0.21663251519203186,
1867
+ "rewards/margins": 0.817857563495636,
1868
+ "rewards/margins_max": 1.1236426830291748,
1869
+ "rewards/margins_min": 0.5120726823806763,
1870
+ "rewards/margins_std": 0.432445228099823,
1871
+ "rewards/rejected": -0.6012250185012817,
1872
+ "step": 930
1873
+ },
1874
+ {
1875
+ "epoch": 2.65,
1876
+ "grad_norm": 5.388181978682642,
1877
+ "learning_rate": 2.071137789306418e-08,
1878
+ "logits/chosen": -2.69647479057312,
1879
+ "logits/rejected": -2.595615863800049,
1880
+ "logps/chosen": -332.2839660644531,
1881
+ "logps/rejected": -261.37774658203125,
1882
+ "loss": 0.3866,
1883
+ "rewards/accuracies": 0.8500000238418579,
1884
+ "rewards/chosen": 0.06930097192525864,
1885
+ "rewards/margins": 0.5811508893966675,
1886
+ "rewards/margins_max": 0.9195789098739624,
1887
+ "rewards/margins_min": 0.24272289872169495,
1888
+ "rewards/margins_std": 0.47860950231552124,
1889
+ "rewards/rejected": -0.5118499398231506,
1890
+ "step": 940
1891
+ },
1892
+ {
1893
+ "epoch": 2.68,
1894
+ "grad_norm": 3.1225637476135146,
1895
+ "learning_rate": 1.7567952525471107e-08,
1896
+ "logits/chosen": -2.6809592247009277,
1897
+ "logits/rejected": -2.582211971282959,
1898
+ "logps/chosen": -279.14013671875,
1899
+ "logps/rejected": -261.57684326171875,
1900
+ "loss": 0.4137,
1901
+ "rewards/accuracies": 0.949999988079071,
1902
+ "rewards/chosen": 0.23004937171936035,
1903
+ "rewards/margins": 0.817657470703125,
1904
+ "rewards/margins_max": 1.0822389125823975,
1905
+ "rewards/margins_min": 0.5530759692192078,
1906
+ "rewards/margins_std": 0.37417474389076233,
1907
+ "rewards/rejected": -0.5876081585884094,
1908
+ "step": 950
1909
+ },
1910
+ {
1911
+ "epoch": 2.7,
1912
+ "grad_norm": 4.811402431024813,
1913
+ "learning_rate": 1.467446143128101e-08,
1914
+ "logits/chosen": -2.838263988494873,
1915
+ "logits/rejected": -2.7585349082946777,
1916
+ "logps/chosen": -307.2536926269531,
1917
+ "logps/rejected": -298.3413391113281,
1918
+ "loss": 0.3842,
1919
+ "rewards/accuracies": 0.949999988079071,
1920
+ "rewards/chosen": 0.19640304148197174,
1921
+ "rewards/margins": 0.740566074848175,
1922
+ "rewards/margins_max": 0.9685649871826172,
1923
+ "rewards/margins_min": 0.5125672817230225,
1924
+ "rewards/margins_std": 0.322439044713974,
1925
+ "rewards/rejected": -0.5441631078720093,
1926
+ "step": 960
1927
+ },
1928
+ {
1929
+ "epoch": 2.73,
1930
+ "grad_norm": 3.9661308717435024,
1931
+ "learning_rate": 1.2034015982622243e-08,
1932
+ "logits/chosen": -2.7469522953033447,
1933
+ "logits/rejected": -2.6567368507385254,
1934
+ "logps/chosen": -346.93096923828125,
1935
+ "logps/rejected": -366.0008544921875,
1936
+ "loss": 0.3761,
1937
+ "rewards/accuracies": 1.0,
1938
+ "rewards/chosen": 0.2820577621459961,
1939
+ "rewards/margins": 0.9072920680046082,
1940
+ "rewards/margins_max": 1.2230002880096436,
1941
+ "rewards/margins_min": 0.5915838479995728,
1942
+ "rewards/margins_std": 0.4464789032936096,
1943
+ "rewards/rejected": -0.6252343058586121,
1944
+ "step": 970
1945
+ },
1946
+ {
1947
+ "epoch": 2.76,
1948
+ "grad_norm": 4.504014449792588,
1949
+ "learning_rate": 9.649455451539418e-09,
1950
+ "logits/chosen": -2.5819973945617676,
1951
+ "logits/rejected": -2.5717263221740723,
1952
+ "logps/chosen": -221.2248077392578,
1953
+ "logps/rejected": -235.7423095703125,
1954
+ "loss": 0.3754,
1955
+ "rewards/accuracies": 1.0,
1956
+ "rewards/chosen": 0.16414210200309753,
1957
+ "rewards/margins": 0.7779913544654846,
1958
+ "rewards/margins_max": 1.047263741493225,
1959
+ "rewards/margins_min": 0.5087189674377441,
1960
+ "rewards/margins_std": 0.3808087408542633,
1961
+ "rewards/rejected": -0.6138492822647095,
1962
+ "step": 980
1963
+ },
1964
+ {
1965
+ "epoch": 2.79,
1966
+ "grad_norm": 4.358026111246204,
1967
+ "learning_rate": 7.523343956923194e-09,
1968
+ "logits/chosen": -2.758552312850952,
1969
+ "logits/rejected": -2.7074084281921387,
1970
+ "logps/chosen": -324.2768249511719,
1971
+ "logps/rejected": -352.4093933105469,
1972
+ "loss": 0.3805,
1973
+ "rewards/accuracies": 1.0,
1974
+ "rewards/chosen": 0.27801355719566345,
1975
+ "rewards/margins": 0.9486692547798157,
1976
+ "rewards/margins_max": 1.2882227897644043,
1977
+ "rewards/margins_min": 0.6091155409812927,
1978
+ "rewards/margins_std": 0.4802014231681824,
1979
+ "rewards/rejected": -0.6706556677818298,
1980
+ "step": 990
1981
+ },
1982
+ {
1983
+ "epoch": 2.82,
1984
+ "grad_norm": 5.5377851770132835,
1985
+ "learning_rate": 5.6579677073121945e-09,
1986
+ "logits/chosen": -2.6293694972991943,
1987
+ "logits/rejected": -2.630532741546631,
1988
+ "logps/chosen": -272.677490234375,
1989
+ "logps/rejected": -346.5768737792969,
1990
+ "loss": 0.3953,
1991
+ "rewards/accuracies": 0.949999988079071,
1992
+ "rewards/chosen": 0.09248457849025726,
1993
+ "rewards/margins": 0.6961058378219604,
1994
+ "rewards/margins_max": 1.1211340427398682,
1995
+ "rewards/margins_min": 0.27107757329940796,
1996
+ "rewards/margins_std": 0.6010805368423462,
1997
+ "rewards/rejected": -0.603621244430542,
1998
+ "step": 1000
1999
+ },
2000
+ {
2001
+ "epoch": 2.82,
2002
+ "eval_logits/chosen": -2.672739028930664,
2003
+ "eval_logits/rejected": -2.6355843544006348,
2004
+ "eval_logps/chosen": -315.5808410644531,
2005
+ "eval_logps/rejected": -299.26385498046875,
2006
+ "eval_loss": 0.6607216000556946,
2007
+ "eval_rewards/accuracies": 0.60317462682724,
2008
+ "eval_rewards/chosen": -0.3035964369773865,
2009
+ "eval_rewards/margins": 0.09721562266349792,
2010
+ "eval_rewards/margins_max": 0.5192965269088745,
2011
+ "eval_rewards/margins_min": -0.3338033854961395,
2012
+ "eval_rewards/margins_std": 0.38074997067451477,
2013
+ "eval_rewards/rejected": -0.4008120894432068,
2014
+ "eval_runtime": 282.6795,
2015
+ "eval_samples_per_second": 7.075,
2016
+ "eval_steps_per_second": 0.223,
2017
+ "step": 1000
2018
+ },
2019
+ {
2020
+ "epoch": 2.85,
2021
+ "grad_norm": 4.070107248003903,
2022
+ "learning_rate": 4.0553325425319585e-09,
2023
+ "logits/chosen": -2.742405891418457,
2024
+ "logits/rejected": -2.6809780597686768,
2025
+ "logps/chosen": -293.8223571777344,
2026
+ "logps/rejected": -319.0755615234375,
2027
+ "loss": 0.397,
2028
+ "rewards/accuracies": 0.949999988079071,
2029
+ "rewards/chosen": 0.21844346821308136,
2030
+ "rewards/margins": 0.8143243789672852,
2031
+ "rewards/margins_max": 1.0633985996246338,
2032
+ "rewards/margins_min": 0.565250039100647,
2033
+ "rewards/margins_std": 0.35224413871765137,
2034
+ "rewards/rejected": -0.5958808064460754,
2035
+ "step": 1010
2036
+ },
2037
+ {
2038
+ "epoch": 2.87,
2039
+ "grad_norm": 5.24767851592669,
2040
+ "learning_rate": 2.717161776814747e-09,
2041
+ "logits/chosen": -2.689424991607666,
2042
+ "logits/rejected": -2.602613687515259,
2043
+ "logps/chosen": -291.1136169433594,
2044
+ "logps/rejected": -294.42724609375,
2045
+ "loss": 0.3843,
2046
+ "rewards/accuracies": 0.8500000238418579,
2047
+ "rewards/chosen": 0.20367774367332458,
2048
+ "rewards/margins": 0.8175477981567383,
2049
+ "rewards/margins_max": 1.1360722780227661,
2050
+ "rewards/margins_min": 0.49902334809303284,
2051
+ "rewards/margins_std": 0.45046156644821167,
2052
+ "rewards/rejected": -0.6138700246810913,
2053
+ "step": 1020
2054
+ },
2055
+ {
2056
+ "epoch": 2.9,
2057
+ "grad_norm": 1.421102544538182,
2058
+ "learning_rate": 1.6448943457189613e-09,
2059
+ "logits/chosen": -2.7164969444274902,
2060
+ "logits/rejected": -2.6720075607299805,
2061
+ "logps/chosen": -343.25335693359375,
2062
+ "logps/rejected": -334.4537353515625,
2063
+ "loss": 0.3888,
2064
+ "rewards/accuracies": 1.0,
2065
+ "rewards/chosen": 0.21914701163768768,
2066
+ "rewards/margins": 0.7984877824783325,
2067
+ "rewards/margins_max": 1.024283528327942,
2068
+ "rewards/margins_min": 0.5726920366287231,
2069
+ "rewards/margins_std": 0.3193233907222748,
2070
+ "rewards/rejected": -0.5793408155441284,
2071
+ "step": 1030
2072
+ },
2073
+ {
2074
+ "epoch": 2.93,
2075
+ "grad_norm": 7.5028345473638955,
2076
+ "learning_rate": 8.396832588411229e-10,
2077
+ "logits/chosen": -2.589337110519409,
2078
+ "logits/rejected": -2.491760492324829,
2079
+ "logps/chosen": -321.55230712890625,
2080
+ "logps/rejected": -292.79095458984375,
2081
+ "loss": 0.3988,
2082
+ "rewards/accuracies": 1.0,
2083
+ "rewards/chosen": 0.24667803943157196,
2084
+ "rewards/margins": 0.8878324627876282,
2085
+ "rewards/margins_max": 1.2634468078613281,
2086
+ "rewards/margins_min": 0.512218177318573,
2087
+ "rewards/margins_std": 0.5311988592147827,
2088
+ "rewards/rejected": -0.6411544680595398,
2089
+ "step": 1040
2090
+ },
2091
+ {
2092
+ "epoch": 2.96,
2093
+ "grad_norm": 4.399778551251054,
2094
+ "learning_rate": 3.0239435998430374e-10,
2095
+ "logits/chosen": -2.7059104442596436,
2096
+ "logits/rejected": -2.6047158241271973,
2097
+ "logps/chosen": -310.2845458984375,
2098
+ "logps/rejected": -317.72857666015625,
2099
+ "loss": 0.3805,
2100
+ "rewards/accuracies": 1.0,
2101
+ "rewards/chosen": 0.21300096809864044,
2102
+ "rewards/margins": 0.8123617172241211,
2103
+ "rewards/margins_max": 1.104002594947815,
2104
+ "rewards/margins_min": 0.5207208395004272,
2105
+ "rewards/margins_std": 0.41244250535964966,
2106
+ "rewards/rejected": -0.5993608236312866,
2107
+ "step": 1050
2108
+ },
2109
+ {
2110
+ "epoch": 2.99,
2111
+ "grad_norm": 5.5908898228484585,
2112
+ "learning_rate": 3.360539611582669e-11,
2113
+ "logits/chosen": -2.64300799369812,
2114
+ "logits/rejected": -2.5719330310821533,
2115
+ "logps/chosen": -306.250244140625,
2116
+ "logps/rejected": -268.78790283203125,
2117
+ "loss": 0.402,
2118
+ "rewards/accuracies": 0.949999988079071,
2119
+ "rewards/chosen": 0.19606786966323853,
2120
+ "rewards/margins": 0.7896009683609009,
2121
+ "rewards/margins_max": 1.1580379009246826,
2122
+ "rewards/margins_min": 0.4211638569831848,
2123
+ "rewards/margins_std": 0.5210486650466919,
2124
+ "rewards/rejected": -0.5935330390930176,
2125
+ "step": 1060
2126
+ },
2127
+ {
2128
+ "epoch": 3.0,
2129
+ "step": 1065,
2130
+ "total_flos": 0.0,
2131
+ "train_loss": 0.4860824931955114,
2132
+ "train_runtime": 9341.4813,
2133
+ "train_samples_per_second": 1.823,
2134
+ "train_steps_per_second": 0.114
2135
+ }
2136
+ ],
2137
+ "logging_steps": 10,
2138
+ "max_steps": 1065,
2139
+ "num_input_tokens_seen": 0,
2140
+ "num_train_epochs": 3,
2141
+ "save_steps": 100,
2142
+ "total_flos": 0.0,
2143
+ "train_batch_size": 2,
2144
+ "trial_name": null,
2145
+ "trial_params": null
2146
+ }