Dongwei commited on
Commit
a73762d
·
verified ·
1 Parent(s): 6c50d3f

Model save

Browse files
README.md CHANGED
@@ -27,7 +27,7 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/dongwei_jiang/huggingface/runs/a455kyhz)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/dongwei_jiang/huggingface/runs/r6vxw2n1)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
all_results.json CHANGED
@@ -5,9 +5,9 @@
5
  "eval_samples_per_second": 0.372,
6
  "eval_steps_per_second": 0.053,
7
  "total_flos": 0.0,
8
- "train_loss": 0.0010332457457126966,
9
- "train_runtime": 12336.334,
10
  "train_samples": 7500,
11
- "train_samples_per_second": 0.608,
12
  "train_steps_per_second": 0.005
13
  }
 
5
  "eval_samples_per_second": 0.372,
6
  "eval_steps_per_second": 0.053,
7
  "total_flos": 0.0,
8
+ "train_loss": 0.001102253468012187,
9
+ "train_runtime": 12358.4402,
10
  "train_samples": 7500,
11
+ "train_samples_per_second": 0.607,
12
  "train_steps_per_second": 0.005
13
  }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:43f4a4415894eb44a772f2a952f60dfd74014ce17da021a268db1c61ff0e8431
3
  size 4877660776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d16cf0a5855474c715f5400c54f272528e5f7582f8490aabaa3b195585646d1
3
  size 4877660776
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:850e15aa199993ff28b9a5ac3fd5895348dc4b6d174fe973c4d6beb106667e16
3
  size 4932751008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77c341d2b8e43a12cdb2b96d79f7e71ae597157db282a048d831599bca5b0081
3
  size 4932751008
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b91fadec7880886ede2a0d8efec62f95e3342b36161972a121bc0ca58563026b
3
  size 4330865200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:287ad58b8bafccdef834389401693a93f4c6e9b26009e2275ecb9dd777a4cd7c
3
  size 4330865200
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46189c667de83c221c22d722a17469650d924b0a82aaf2bba1387775d3ccd4f1
3
  size 1089994880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f06ae450d81394bd9715da3799234c1b1d93d4d2600f13859bc40677b6007c73
3
  size 1089994880
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 0.0010332457457126966,
4
- "train_runtime": 12336.334,
5
  "train_samples": 7500,
6
- "train_samples_per_second": 0.608,
7
  "train_steps_per_second": 0.005
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 0.001102253468012187,
4
+ "train_runtime": 12358.4402,
5
  "train_samples": 7500,
6
+ "train_samples_per_second": 0.607,
7
  "train_steps_per_second": 0.005
8
  }
trainer_state.json CHANGED
@@ -9,96 +9,96 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "completion_length": 707.4456775665283,
13
  "epoch": 0.14925373134328357,
14
- "grad_norm": 0.06321819126605988,
15
- "kl": 0.006711304187774658,
16
  "learning_rate": 1.9876883405951378e-05,
17
- "loss": 0.0003,
18
- "reward": 0.5986607405939139,
19
- "reward_std": 0.16735767447389663,
20
- "rewards/accuracy_reward": 0.5986607405939139,
21
  "rewards/format_reward": 0.0,
22
  "step": 10
23
  },
24
  {
25
- "completion_length": 675.7138690948486,
26
  "epoch": 0.29850746268656714,
27
- "grad_norm": 0.07178156822919846,
28
- "kl": 0.025733184814453126,
29
  "learning_rate": 1.777145961456971e-05,
30
  "loss": 0.001,
31
- "reward": 0.6954241393133997,
32
- "reward_std": 0.16923737577162684,
33
- "rewards/accuracy_reward": 0.6954241393133997,
34
  "rewards/format_reward": 0.0,
35
  "step": 20
36
  },
37
  {
38
- "completion_length": 685.2908779144287,
39
  "epoch": 0.44776119402985076,
40
- "grad_norm": 0.07352711260318756,
41
- "kl": 0.02706298828125,
42
  "learning_rate": 1.3583679495453e-05,
43
- "loss": 0.0011,
44
- "reward": 0.6889509245753288,
45
- "reward_std": 0.15610567890107632,
46
- "rewards/accuracy_reward": 0.6889509245753288,
47
  "rewards/format_reward": 0.0,
48
  "step": 30
49
  },
50
  {
51
- "completion_length": 691.1547193527222,
52
  "epoch": 0.5970149253731343,
53
- "grad_norm": 0.059463296085596085,
54
- "kl": 0.03000030517578125,
55
  "learning_rate": 8.43565534959769e-06,
56
  "loss": 0.0012,
57
- "reward": 0.6849330635741353,
58
- "reward_std": 0.16573377684690058,
59
- "rewards/accuracy_reward": 0.6849330635741353,
60
  "rewards/format_reward": 0.0,
61
  "step": 40
62
  },
63
  {
64
- "completion_length": 690.842552947998,
65
  "epoch": 0.746268656716418,
66
- "grad_norm": 0.06777255237102509,
67
- "kl": 0.03056793212890625,
68
  "learning_rate": 3.7067960895016277e-06,
69
- "loss": 0.0012,
70
- "reward": 0.7001116393133998,
71
- "reward_std": 0.16762742078863085,
72
- "rewards/accuracy_reward": 0.7001116393133998,
73
  "rewards/format_reward": 0.0,
74
  "step": 50
75
  },
76
  {
77
- "completion_length": 668.4015930175781,
78
  "epoch": 0.8955223880597015,
79
- "grad_norm": 0.07001896947622299,
80
- "kl": 0.031087493896484374,
81
  "learning_rate": 6.641957350279838e-07,
82
- "loss": 0.0012,
83
- "reward": 0.7029018193483353,
84
- "reward_std": 0.16094307857565582,
85
- "rewards/accuracy_reward": 0.7029018193483353,
86
  "rewards/format_reward": 0.0,
87
  "step": 60
88
  },
89
  {
90
- "completion_length": 663.3861323765346,
91
  "epoch": 1.0,
92
- "kl": 0.03179168701171875,
93
- "reward": 0.7240115058209214,
94
- "reward_std": 0.1582813827054841,
95
- "rewards/accuracy_reward": 0.7240115058209214,
96
  "rewards/format_reward": 0.0,
97
  "step": 67,
98
  "total_flos": 0.0,
99
- "train_loss": 0.0010332457457126966,
100
- "train_runtime": 12336.334,
101
- "train_samples_per_second": 0.608,
102
  "train_steps_per_second": 0.005
103
  }
104
  ],
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "completion_length": 708.9171075820923,
13
  "epoch": 0.14925373134328357,
14
+ "grad_norm": 0.06562932580709457,
15
+ "kl": 0.005872213840484619,
16
  "learning_rate": 1.9876883405951378e-05,
17
+ "loss": 0.0002,
18
+ "reward": 0.603348241513595,
19
+ "reward_std": 0.16946082445792854,
20
+ "rewards/accuracy_reward": 0.603348241513595,
21
  "rewards/format_reward": 0.0,
22
  "step": 10
23
  },
24
  {
25
+ "completion_length": 682.0505897521973,
26
  "epoch": 0.29850746268656714,
27
+ "grad_norm": 0.05642814189195633,
28
+ "kl": 0.024619293212890626,
29
  "learning_rate": 1.777145961456971e-05,
30
  "loss": 0.001,
31
+ "reward": 0.7039062824100256,
32
+ "reward_std": 0.162486822437495,
33
+ "rewards/accuracy_reward": 0.7039062824100256,
34
  "rewards/format_reward": 0.0,
35
  "step": 20
36
  },
37
  {
38
+ "completion_length": 680.5775978088379,
39
  "epoch": 0.44776119402985076,
40
+ "grad_norm": 0.07271964848041534,
41
+ "kl": 0.029143524169921876,
42
  "learning_rate": 1.3583679495453e-05,
43
+ "loss": 0.0012,
44
+ "reward": 0.6966518176719546,
45
+ "reward_std": 0.15723272264003754,
46
+ "rewards/accuracy_reward": 0.6966518176719546,
47
  "rewards/format_reward": 0.0,
48
  "step": 30
49
  },
50
  {
51
+ "completion_length": 678.4286026000976,
52
  "epoch": 0.5970149253731343,
53
+ "grad_norm": 0.06629566103219986,
54
+ "kl": 0.029315948486328125,
55
  "learning_rate": 8.43565534959769e-06,
56
  "loss": 0.0012,
57
+ "reward": 0.6925223540514708,
58
+ "reward_std": 0.1698813715018332,
59
+ "rewards/accuracy_reward": 0.6925223540514708,
60
  "rewards/format_reward": 0.0,
61
  "step": 40
62
  },
63
  {
64
+ "completion_length": 678.2587354660034,
65
  "epoch": 0.746268656716418,
66
+ "grad_norm": 0.07359491288661957,
67
+ "kl": 0.03377456665039062,
68
  "learning_rate": 3.7067960895016277e-06,
69
+ "loss": 0.0014,
70
+ "reward": 0.6977678883820773,
71
+ "reward_std": 0.1756029822397977,
72
+ "rewards/accuracy_reward": 0.6977678883820773,
73
  "rewards/format_reward": 0.0,
74
  "step": 50
75
  },
76
  {
77
+ "completion_length": 676.1143146514893,
78
  "epoch": 0.8955223880597015,
79
+ "grad_norm": 0.07282646745443344,
80
+ "kl": 0.03675537109375,
81
  "learning_rate": 6.641957350279838e-07,
82
+ "loss": 0.0015,
83
+ "reward": 0.6957589590921998,
84
+ "reward_std": 0.167550537455827,
85
+ "rewards/accuracy_reward": 0.6957589590921998,
86
  "rewards/format_reward": 0.0,
87
  "step": 60
88
  },
89
  {
90
+ "completion_length": 673.9289711543491,
91
  "epoch": 1.0,
92
+ "kl": 0.03508213588169643,
93
+ "reward": 0.7025404186653239,
94
+ "reward_std": 0.1512090131374342,
95
+ "rewards/accuracy_reward": 0.7025404186653239,
96
  "rewards/format_reward": 0.0,
97
  "step": 67,
98
  "total_flos": 0.0,
99
+ "train_loss": 0.001102253468012187,
100
+ "train_runtime": 12358.4402,
101
+ "train_samples_per_second": 0.607,
102
  "train_steps_per_second": 0.005
103
  }
104
  ],
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:13cbabc08ca0e5abe40c178d16e74839353ac52a59e2740d6b323fe07f596c30
3
  size 7032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:010ba5f0a144da22011142faca812261dbf0129a623c2c90d9c7561bd143a44b
3
  size 7032