li-muyang commited on
Commit
6538260
·
verified ·
1 Parent(s): c230a7e

Model save

Browse files
README.md CHANGED
@@ -16,15 +16,15 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model was trained from scratch on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 0.6100
20
- - Rewards/chosen: -0.5090
21
- - Rewards/rejected: -0.8748
22
- - Rewards/accuracies: 0.7383
23
- - Rewards/margins: 0.3658
24
- - Logps/rejected: -1150.8049
25
- - Logps/chosen: -782.0501
26
- - Logits/rejected: -0.8126
27
- - Logits/chosen: -0.8429
28
 
29
  ## Model description
30
 
@@ -55,13 +55,13 @@ The following hyperparameters were used during training:
55
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
  - lr_scheduler_type: cosine
57
  - lr_scheduler_warmup_ratio: 0.1
58
- - num_epochs: 1
59
 
60
  ### Training results
61
 
62
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
  |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
- | 0.5979 | 0.9984 | 477 | 0.6100 | -0.5090 | -0.8748 | 0.7383 | 0.3658 | -1150.8049 | -782.0501 | -0.8126 | -0.8429 |
65
 
66
 
67
  ### Framework versions
 
16
 
17
  This model was trained from scratch on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 0.5651
20
+ - Rewards/chosen: -0.6143
21
+ - Rewards/rejected: -1.1708
22
+ - Rewards/accuracies: 0.75
23
+ - Rewards/margins: 0.5566
24
+ - Logps/rejected: -393.4054
25
+ - Logps/chosen: -340.7309
26
+ - Logits/rejected: -2.0252
27
+ - Logits/chosen: -2.0773
28
 
29
  ## Model description
30
 
 
55
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
  - lr_scheduler_type: cosine
57
  - lr_scheduler_warmup_ratio: 0.1
58
+ - training_steps: 238
59
 
60
  ### Training results
61
 
62
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
  |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.5646 | 0.4982 | 238 | 0.5651 | -0.6143 | -1.1708 | 0.75 | 0.5566 | -393.4054 | -340.7309 | -2.0252 | -2.0773 |
65
 
66
 
67
  ### Framework versions
all_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 0.9984301412872841,
3
  "total_flos": 0.0,
4
- "train_loss": 0.6406088435175034,
5
- "train_runtime": 13597.1074,
6
  "train_samples": 61134,
7
- "train_samples_per_second": 4.496,
8
- "train_steps_per_second": 0.035
9
  }
 
1
  {
2
+ "epoch": 0.4981684981684982,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.6031974924712622,
5
+ "train_runtime": 7143.0668,
6
  "train_samples": 61134,
7
+ "train_samples_per_second": 4.265,
8
+ "train_steps_per_second": 0.033
9
  }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "data/sft/zephyr-7b-sft-1e-every25/checkpoint-600",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "data/sft/zephyr-7b-sft-1e-every25/checkpoint-300",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
generation_config.json CHANGED
@@ -1,9 +1,6 @@
1
  {
2
  "_from_model_config": true,
3
- "bos_token_id": 128000,
4
- "do_sample": true,
5
- "eos_token_id": 128001,
6
- "temperature": 0.6,
7
- "top_p": 0.9,
8
  "transformers_version": "4.45.2"
9
  }
 
1
  {
2
  "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
 
 
 
5
  "transformers_version": "4.45.2"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e4d2c8e646e5701192e73de8c38e6d4056e9f571ca8e10a64d702d40172ec86e
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a32f071986396548120705b6e6ade178f0c494255183edea1b50e12ed1e3fc17
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7050cc11c07f527e41773fffa439f1cabdeb7522827afd0523198340ec011529
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2746b9a1ef34aa815afde48356838d1bfdf801963a64795722506c6812ac6bcd
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b3a7c01323eff3dc3201152846f798738d11befcbd65f5366c003220d7aa2f6
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4457f1c2a6471e778a536844dcfea1b8a5e6136f86dad9d0da048e982fbaf836
3
  size 4540516344
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 0.9984301412872841,
3
  "total_flos": 0.0,
4
- "train_loss": 0.6406088435175034,
5
- "train_runtime": 13597.1074,
6
  "train_samples": 61134,
7
- "train_samples_per_second": 4.496,
8
- "train_steps_per_second": 0.035
9
  }
 
1
  {
2
+ "epoch": 0.4981684981684982,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.6031974924712622,
5
+ "train_runtime": 7143.0668,
6
  "train_samples": 61134,
7
+ "train_samples_per_second": 4.265,
8
+ "train_steps_per_second": 0.033
9
  }
trainer_state.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9984301412872841,
5
  "eval_steps": 500,
6
- "global_step": 477,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0020931449502878076,
13
- "grad_norm": 0.32324101626998236,
14
- "learning_rate": 1.0416666666666666e-08,
15
- "logits/chosen": -1.254675030708313,
16
- "logits/rejected": -1.2519813776016235,
17
- "logps/chosen": -357.39349365234375,
18
- "logps/rejected": -360.9093322753906,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
@@ -25,737 +25,377 @@
25
  },
26
  {
27
  "epoch": 0.020931449502878074,
28
- "grad_norm": 0.31303582401914404,
29
- "learning_rate": 1.0416666666666667e-07,
30
- "logits/chosen": -1.1969424486160278,
31
- "logits/rejected": -1.1853214502334595,
32
- "logps/chosen": -299.4916687011719,
33
- "logps/rejected": -277.1794128417969,
34
- "loss": 0.6931,
35
- "rewards/accuracies": 0.4236111044883728,
36
- "rewards/chosen": 2.0686069547082298e-05,
37
- "rewards/margins": 2.5644327251939103e-05,
38
- "rewards/rejected": -4.958255431120051e-06,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.04186289900575615,
43
- "grad_norm": 0.2946808627363446,
44
- "learning_rate": 2.0833333333333333e-07,
45
- "logits/chosen": -1.1608688831329346,
46
- "logits/rejected": -1.1570008993148804,
47
- "logps/chosen": -311.9692077636719,
48
- "logps/rejected": -279.7148742675781,
49
- "loss": 0.6931,
50
- "rewards/accuracies": 0.4749999940395355,
51
- "rewards/chosen": 5.6396173022221774e-05,
52
- "rewards/margins": 1.9542889276635833e-05,
53
- "rewards/rejected": 3.6853296478511766e-05,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.06279434850863422,
58
- "grad_norm": 0.29651950322714443,
59
- "learning_rate": 3.1249999999999997e-07,
60
- "logits/chosen": -1.178495168685913,
61
- "logits/rejected": -1.1777125597000122,
62
- "logps/chosen": -289.05224609375,
63
- "logps/rejected": -248.8190460205078,
64
- "loss": 0.6931,
65
- "rewards/accuracies": 0.543749988079071,
66
- "rewards/chosen": 0.0005276039009913802,
67
- "rewards/margins": 0.0001321145100519061,
68
- "rewards/rejected": 0.0003954893327318132,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.0837257980115123,
73
- "grad_norm": 0.2988220823405562,
74
- "learning_rate": 4.1666666666666667e-07,
75
- "logits/chosen": -1.1629207134246826,
76
- "logits/rejected": -1.1547033786773682,
77
- "logps/chosen": -257.3613586425781,
78
- "logps/rejected": -259.5196228027344,
79
- "loss": 0.6929,
80
- "rewards/accuracies": 0.6000000238418579,
81
- "rewards/chosen": 0.0016067821998149157,
82
- "rewards/margins": 0.0005107645411044359,
83
- "rewards/rejected": 0.0010960176587104797,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.10465724751439037,
88
- "grad_norm": 0.3125736010449426,
89
- "learning_rate": 4.999731868769026e-07,
90
- "logits/chosen": -1.1483451128005981,
91
- "logits/rejected": -1.1426894664764404,
92
- "logps/chosen": -285.2168273925781,
93
- "logps/rejected": -270.5537414550781,
94
- "loss": 0.6925,
95
- "rewards/accuracies": 0.675000011920929,
96
- "rewards/chosen": 0.0029888246208429337,
97
- "rewards/margins": 0.00123431789688766,
98
- "rewards/rejected": 0.0017545067239552736,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.12558869701726844,
103
- "grad_norm": 0.32861986652173775,
104
- "learning_rate": 4.990353313429303e-07,
105
- "logits/chosen": -1.1673046350479126,
106
- "logits/rejected": -1.1617118120193481,
107
- "logps/chosen": -250.5665283203125,
108
- "logps/rejected": -242.58969116210938,
109
- "loss": 0.6919,
110
- "rewards/accuracies": 0.768750011920929,
111
- "rewards/chosen": 0.005732725840061903,
112
- "rewards/margins": 0.0029540960676968098,
113
- "rewards/rejected": 0.002778630005195737,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.14652014652014653,
118
- "grad_norm": 0.3018029007900129,
119
- "learning_rate": 4.967625656594781e-07,
120
- "logits/chosen": -1.1635328531265259,
121
- "logits/rejected": -1.1576188802719116,
122
- "logps/chosen": -292.01702880859375,
123
- "logps/rejected": -280.9066162109375,
124
- "loss": 0.691,
125
- "rewards/accuracies": 0.7124999761581421,
126
- "rewards/chosen": 0.005355691071599722,
127
- "rewards/margins": 0.0041871508583426476,
128
- "rewards/rejected": 0.0011685403296723962,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.1674515960230246,
133
- "grad_norm": 0.3706523862968378,
134
- "learning_rate": 4.93167072587771e-07,
135
- "logits/chosen": -1.1743751764297485,
136
- "logits/rejected": -1.1609123945236206,
137
- "logps/chosen": -324.23712158203125,
138
- "logps/rejected": -257.1612854003906,
139
- "loss": 0.6901,
140
- "rewards/accuracies": 0.7250000238418579,
141
- "rewards/chosen": 0.0037769668269902468,
142
- "rewards/margins": 0.007478375919163227,
143
- "rewards/rejected": -0.0037014088593423367,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.18838304552590268,
148
- "grad_norm": 0.40916342963365604,
149
- "learning_rate": 4.882681251368548e-07,
150
- "logits/chosen": -1.210192084312439,
151
- "logits/rejected": -1.1824644804000854,
152
- "logps/chosen": -260.08148193359375,
153
- "logps/rejected": -273.74151611328125,
154
- "loss": 0.6878,
155
- "rewards/accuracies": 0.706250011920929,
156
- "rewards/chosen": -0.005586659070104361,
157
- "rewards/margins": 0.010486402548849583,
158
- "rewards/rejected": -0.016073061153292656,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.20931449502878074,
163
- "grad_norm": 0.4754293468181655,
164
- "learning_rate": 4.820919832540181e-07,
165
- "logits/chosen": -1.2199891805648804,
166
- "logits/rejected": -1.220046043395996,
167
- "logps/chosen": -312.9206848144531,
168
- "logps/rejected": -314.90960693359375,
169
- "loss": 0.6854,
170
- "rewards/accuracies": 0.7562500238418579,
171
- "rewards/chosen": -0.013386862352490425,
172
- "rewards/margins": 0.020824002102017403,
173
- "rewards/rejected": -0.03421086445450783,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.2302459445316588,
178
- "grad_norm": 0.8738829496526755,
179
- "learning_rate": 4.7467175306295647e-07,
180
- "logits/chosen": -1.1820493936538696,
181
- "logits/rejected": -1.1728696823120117,
182
- "logps/chosen": -311.4639587402344,
183
- "logps/rejected": -316.75201416015625,
184
- "loss": 0.6831,
185
- "rewards/accuracies": 0.6625000238418579,
186
- "rewards/chosen": -0.023116732016205788,
187
- "rewards/margins": 0.02223314717411995,
188
- "rewards/rejected": -0.045349881052970886,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.25117739403453687,
193
- "grad_norm": 0.8961784473053371,
194
- "learning_rate": 4.6604720940421207e-07,
195
- "logits/chosen": -1.1100571155548096,
196
- "logits/rejected": -1.1283613443374634,
197
- "logps/chosen": -302.40948486328125,
198
- "logps/rejected": -335.69512939453125,
199
- "loss": 0.6764,
200
- "rewards/accuracies": 0.6937500238418579,
201
- "rewards/chosen": -0.03481901437044144,
202
- "rewards/margins": 0.0378703810274601,
203
- "rewards/rejected": -0.07268939912319183,
204
  "step": 120
205
  },
206
  {
207
  "epoch": 0.272108843537415,
208
- "grad_norm": 1.497566674498532,
209
- "learning_rate": 4.5626458262912735e-07,
210
- "logits/chosen": -1.14939284324646,
211
- "logits/rejected": -1.142646074295044,
212
- "logps/chosen": -352.44049072265625,
213
- "logps/rejected": -388.8111267089844,
214
- "loss": 0.6693,
215
- "rewards/accuracies": 0.6625000238418579,
216
- "rewards/chosen": -0.07200166583061218,
217
- "rewards/margins": 0.04512103646993637,
218
- "rewards/rejected": -0.11712269484996796,
219
  "step": 130
220
  },
221
  {
222
  "epoch": 0.29304029304029305,
223
- "grad_norm": 2.001200835384951,
224
- "learning_rate": 4.453763107901675e-07,
225
- "logits/chosen": -1.196212649345398,
226
- "logits/rejected": -1.1793403625488281,
227
- "logps/chosen": -385.7120056152344,
228
- "logps/rejected": -407.84112548828125,
229
- "loss": 0.6639,
230
- "rewards/accuracies": 0.737500011920929,
231
- "rewards/chosen": -0.06641264259815216,
232
- "rewards/margins": 0.07408300787210464,
233
- "rewards/rejected": -0.1404956430196762,
234
  "step": 140
235
  },
236
  {
237
  "epoch": 0.3139717425431711,
238
- "grad_norm": 1.97796076277746,
239
- "learning_rate": 4.3344075855595097e-07,
240
- "logits/chosen": -1.15965735912323,
241
- "logits/rejected": -1.1490386724472046,
242
- "logps/chosen": -375.55120849609375,
243
- "logps/rejected": -405.73077392578125,
244
- "loss": 0.6608,
245
  "rewards/accuracies": 0.6875,
246
- "rewards/chosen": -0.09626610577106476,
247
- "rewards/margins": 0.06254641711711884,
248
- "rewards/rejected": -0.1588125228881836,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.3349031920460492,
253
- "grad_norm": 3.0008548992733766,
254
- "learning_rate": 4.2052190435769554e-07,
255
- "logits/chosen": -1.1561930179595947,
256
- "logits/rejected": -1.1568859815597534,
257
- "logps/chosen": -409.51068115234375,
258
- "logps/rejected": -472.51080322265625,
259
- "loss": 0.6568,
260
  "rewards/accuracies": 0.706250011920929,
261
- "rewards/chosen": -0.1316855102777481,
262
- "rewards/margins": 0.08769272267818451,
263
- "rewards/rejected": -0.21937823295593262,
264
  "step": 160
265
  },
266
  {
267
  "epoch": 0.35583464154892724,
268
- "grad_norm": 1.9634265810681608,
269
- "learning_rate": 4.0668899744407567e-07,
270
- "logits/chosen": -1.048796534538269,
271
- "logits/rejected": -1.0480598211288452,
272
- "logps/chosen": -440.39984130859375,
273
- "logps/rejected": -503.9505310058594,
274
- "loss": 0.6554,
275
- "rewards/accuracies": 0.637499988079071,
276
- "rewards/chosen": -0.18013329803943634,
277
- "rewards/margins": 0.08887016773223877,
278
- "rewards/rejected": -0.2690034508705139,
279
  "step": 170
280
  },
281
  {
282
  "epoch": 0.37676609105180536,
283
- "grad_norm": 2.2698140691586306,
284
- "learning_rate": 3.920161866827889e-07,
285
- "logits/chosen": -1.1098086833953857,
286
- "logits/rejected": -1.0865398645401,
287
- "logps/chosen": -460.97833251953125,
288
- "logps/rejected": -526.1106567382812,
289
- "loss": 0.6414,
290
- "rewards/accuracies": 0.7124999761581421,
291
- "rewards/chosen": -0.18967969715595245,
292
- "rewards/margins": 0.08924828469753265,
293
- "rewards/rejected": -0.2789279818534851,
294
  "step": 180
295
  },
296
  {
297
  "epoch": 0.3976975405546834,
298
- "grad_norm": 3.600851457498562,
299
- "learning_rate": 3.765821230985757e-07,
300
- "logits/chosen": -1.0811136960983276,
301
- "logits/rejected": -1.0507347583770752,
302
- "logps/chosen": -491.7823791503906,
303
- "logps/rejected": -581.665771484375,
304
- "loss": 0.6463,
305
- "rewards/accuracies": 0.668749988079071,
306
- "rewards/chosen": -0.22724099457263947,
307
- "rewards/margins": 0.09368610382080078,
308
- "rewards/rejected": -0.32092708349227905,
309
  "step": 190
310
  },
311
  {
312
  "epoch": 0.4186289900575615,
313
- "grad_norm": 3.2360530090506083,
314
- "learning_rate": 3.604695382782159e-07,
315
- "logits/chosen": -0.986294150352478,
316
- "logits/rejected": -1.0004829168319702,
317
- "logps/chosen": -555.3709716796875,
318
- "logps/rejected": -693.2188720703125,
319
- "loss": 0.6419,
320
- "rewards/accuracies": 0.6812499761581421,
321
- "rewards/chosen": -0.3050481081008911,
322
- "rewards/margins": 0.11720240116119385,
323
- "rewards/rejected": -0.42225056886672974,
324
  "step": 200
325
  },
326
  {
327
  "epoch": 0.43956043956043955,
328
- "grad_norm": 3.8624898658875404,
329
- "learning_rate": 3.4376480090239047e-07,
330
- "logits/chosen": -0.9977714419364929,
331
- "logits/rejected": -0.9668005108833313,
332
- "logps/chosen": -665.3328247070312,
333
- "logps/rejected": -751.362060546875,
334
- "loss": 0.6487,
335
- "rewards/accuracies": 0.699999988079071,
336
- "rewards/chosen": -0.3542812466621399,
337
- "rewards/margins": 0.13943365216255188,
338
- "rewards/rejected": -0.4937148988246918,
339
  "step": 210
340
  },
341
  {
342
  "epoch": 0.4604918890633176,
343
- "grad_norm": 2.527165628308305,
344
- "learning_rate": 3.265574537815398e-07,
345
- "logits/chosen": -0.9821435809135437,
346
- "logits/rejected": -1.0059399604797363,
347
- "logps/chosen": -432.90325927734375,
348
- "logps/rejected": -561.0076293945312,
349
- "loss": 0.6455,
350
- "rewards/accuracies": 0.6937500238418579,
351
- "rewards/chosen": -0.20027010142803192,
352
- "rewards/margins": 0.1195964589715004,
353
- "rewards/rejected": -0.3198665678501129,
354
  "step": 220
355
  },
356
  {
357
  "epoch": 0.48142333856619574,
358
- "grad_norm": 3.0201938437726894,
359
- "learning_rate": 3.0893973387735683e-07,
360
- "logits/chosen": -0.9755321741104126,
361
- "logits/rejected": -0.9638468027114868,
362
- "logps/chosen": -537.1400756835938,
363
- "logps/rejected": -711.3963012695312,
364
- "loss": 0.6351,
365
- "rewards/accuracies": 0.7437499761581421,
366
- "rewards/chosen": -0.28014427423477173,
367
- "rewards/margins": 0.19521525502204895,
368
- "rewards/rejected": -0.4753595292568207,
369
  "step": 230
370
  },
371
  {
372
- "epoch": 0.5023547880690737,
373
- "grad_norm": 3.5945229999665003,
374
- "learning_rate": 2.910060778827554e-07,
375
- "logits/chosen": -0.9908636212348938,
376
- "logits/rejected": -0.9440512657165527,
377
- "logps/chosen": -623.9000854492188,
378
- "logps/rejected": -765.6077880859375,
379
- "loss": 0.6229,
380
- "rewards/accuracies": 0.6875,
381
- "rewards/chosen": -0.3405396342277527,
382
- "rewards/margins": 0.15900567173957825,
383
- "rewards/rejected": -0.49954527616500854,
384
- "step": 240
385
- },
386
- {
387
- "epoch": 0.5232862375719518,
388
- "grad_norm": 4.649933778191512,
389
- "learning_rate": 2.7285261601056697e-07,
390
- "logits/chosen": -0.981595516204834,
391
- "logits/rejected": -0.942014217376709,
392
- "logps/chosen": -623.5603637695312,
393
- "logps/rejected": -795.4835815429688,
394
- "loss": 0.6341,
395
- "rewards/accuracies": 0.737500011920929,
396
- "rewards/chosen": -0.34366101026535034,
397
- "rewards/margins": 0.21082744002342224,
398
- "rewards/rejected": -0.5544884204864502,
399
- "step": 250
400
- },
401
- {
402
- "epoch": 0.54421768707483,
403
- "grad_norm": 5.029295020454676,
404
- "learning_rate": 2.5457665670441937e-07,
405
- "logits/chosen": -1.0086824893951416,
406
- "logits/rejected": -1.00519597530365,
407
- "logps/chosen": -633.7659912109375,
408
- "logps/rejected": -827.9234619140625,
409
- "loss": 0.6268,
410
- "rewards/accuracies": 0.699999988079071,
411
- "rewards/chosen": -0.3744930624961853,
412
- "rewards/margins": 0.21027839183807373,
413
- "rewards/rejected": -0.584771454334259,
414
- "step": 260
415
- },
416
- {
417
- "epoch": 0.565149136577708,
418
- "grad_norm": 4.940462479346541,
419
- "learning_rate": 2.3627616503391812e-07,
420
- "logits/chosen": -0.9375359416007996,
421
- "logits/rejected": -0.9241452217102051,
422
- "logps/chosen": -676.0524291992188,
423
- "logps/rejected": -876.9054565429688,
424
- "loss": 0.628,
425
- "rewards/accuracies": 0.6875,
426
- "rewards/chosen": -0.3771703243255615,
427
- "rewards/margins": 0.2305041253566742,
428
- "rewards/rejected": -0.6076744794845581,
429
- "step": 270
430
- },
431
- {
432
- "epoch": 0.5860805860805861,
433
- "grad_norm": 3.2120566855593844,
434
- "learning_rate": 2.1804923757009882e-07,
435
- "logits/chosen": -0.8946016430854797,
436
- "logits/rejected": -0.8984814882278442,
437
- "logps/chosen": -645.8856201171875,
438
- "logps/rejected": -814.6409301757812,
439
- "loss": 0.6274,
440
- "rewards/accuracies": 0.6312500238418579,
441
- "rewards/chosen": -0.3892592191696167,
442
- "rewards/margins": 0.19356074929237366,
443
- "rewards/rejected": -0.5828199982643127,
444
- "step": 280
445
- },
446
- {
447
- "epoch": 0.6070120355834642,
448
- "grad_norm": 3.208297566535166,
449
- "learning_rate": 1.9999357655598891e-07,
450
- "logits/chosen": -0.9358976483345032,
451
- "logits/rejected": -0.8927936553955078,
452
- "logps/chosen": -712.9257202148438,
453
- "logps/rejected": -900.0843505859375,
454
- "loss": 0.6289,
455
- "rewards/accuracies": 0.6875,
456
- "rewards/chosen": -0.4654994010925293,
457
- "rewards/margins": 0.1835438311100006,
458
- "rewards/rejected": -0.6490432620048523,
459
- "step": 290
460
- },
461
- {
462
- "epoch": 0.6279434850863422,
463
- "grad_norm": 4.268524839111709,
464
- "learning_rate": 1.8220596619089573e-07,
465
- "logits/chosen": -0.9108999371528625,
466
- "logits/rejected": -0.8883649110794067,
467
- "logps/chosen": -782.2093505859375,
468
- "logps/rejected": -916.7747802734375,
469
- "loss": 0.6109,
470
- "rewards/accuracies": 0.6937500238418579,
471
- "rewards/chosen": -0.4553016126155853,
472
- "rewards/margins": 0.18240997195243835,
473
- "rewards/rejected": -0.6377116441726685,
474
- "step": 300
475
- },
476
- {
477
- "epoch": 0.6488749345892203,
478
- "grad_norm": 4.440153357261806,
479
- "learning_rate": 1.647817538357072e-07,
480
- "logits/chosen": -0.8766281008720398,
481
- "logits/rejected": -0.8332917094230652,
482
- "logps/chosen": -758.6160888671875,
483
- "logps/rejected": -1003.10498046875,
484
- "loss": 0.6126,
485
- "rewards/accuracies": 0.6625000238418579,
486
- "rewards/chosen": -0.4533390998840332,
487
- "rewards/margins": 0.2982035279273987,
488
- "rewards/rejected": -0.7515425682067871,
489
- "step": 310
490
- },
491
- {
492
- "epoch": 0.6698063840920984,
493
- "grad_norm": 4.836923344104393,
494
- "learning_rate": 1.478143389201113e-07,
495
- "logits/chosen": -0.9347564578056335,
496
- "logits/rejected": -0.8702303767204285,
497
- "logps/chosen": -684.4344482421875,
498
- "logps/rejected": -951.7136840820312,
499
- "loss": 0.6077,
500
- "rewards/accuracies": 0.731249988079071,
501
- "rewards/chosen": -0.42780131101608276,
502
- "rewards/margins": 0.29773804545402527,
503
- "rewards/rejected": -0.7255394458770752,
504
- "step": 320
505
- },
506
- {
507
- "epoch": 0.6907378335949764,
508
- "grad_norm": 5.557060773999832,
509
- "learning_rate": 1.3139467229135998e-07,
510
- "logits/chosen": -0.8965336680412292,
511
- "logits/rejected": -0.8489995002746582,
512
- "logps/chosen": -773.0374145507812,
513
- "logps/rejected": -1043.8154296875,
514
- "loss": 0.616,
515
- "rewards/accuracies": 0.699999988079071,
516
- "rewards/chosen": -0.4970160126686096,
517
- "rewards/margins": 0.27822786569595337,
518
- "rewards/rejected": -0.7752438187599182,
519
- "step": 330
520
- },
521
- {
522
- "epoch": 0.7116692830978545,
523
- "grad_norm": 8.028071802637974,
524
- "learning_rate": 1.1561076868822755e-07,
525
- "logits/chosen": -0.7840911746025085,
526
- "logits/rejected": -0.7743405103683472,
527
- "logps/chosen": -797.73974609375,
528
- "logps/rejected": -1051.7327880859375,
529
- "loss": 0.6039,
530
- "rewards/accuracies": 0.668749988079071,
531
- "rewards/chosen": -0.5024133324623108,
532
- "rewards/margins": 0.2816846966743469,
533
- "rewards/rejected": -0.7840980291366577,
534
- "step": 340
535
- },
536
- {
537
- "epoch": 0.7326007326007326,
538
- "grad_norm": 5.482245632746445,
539
- "learning_rate": 1.0054723495346482e-07,
540
- "logits/chosen": -0.8621054887771606,
541
- "logits/rejected": -0.8484535217285156,
542
- "logps/chosen": -779.8978881835938,
543
- "logps/rejected": -1020.26513671875,
544
- "loss": 0.5996,
545
- "rewards/accuracies": 0.699999988079071,
546
- "rewards/chosen": -0.5172659158706665,
547
- "rewards/margins": 0.27023959159851074,
548
- "rewards/rejected": -0.7875055074691772,
549
- "step": 350
550
- },
551
- {
552
- "epoch": 0.7535321821036107,
553
- "grad_norm": 4.61428054230484,
554
- "learning_rate": 8.628481651367875e-08,
555
- "logits/chosen": -0.8745294809341431,
556
- "logits/rejected": -0.8429532051086426,
557
- "logps/chosen": -868.0397338867188,
558
- "logps/rejected": -1069.2259521484375,
559
- "loss": 0.6226,
560
- "rewards/accuracies": 0.6875,
561
- "rewards/chosen": -0.5554712414741516,
562
- "rewards/margins": 0.24238689243793488,
563
- "rewards/rejected": -0.7978580594062805,
564
- "step": 360
565
- },
566
- {
567
- "epoch": 0.7744636316064888,
568
- "grad_norm": 4.012064999162859,
569
- "learning_rate": 7.289996455765748e-08,
570
- "logits/chosen": -0.9377773404121399,
571
- "logits/rejected": -0.8822382688522339,
572
- "logps/chosen": -725.4534912109375,
573
- "logps/rejected": -960.8234252929688,
574
- "loss": 0.6084,
575
- "rewards/accuracies": 0.7124999761581421,
576
- "rewards/chosen": -0.462857186794281,
577
- "rewards/margins": 0.2657161355018616,
578
- "rewards/rejected": -0.7285734415054321,
579
- "step": 370
580
- },
581
- {
582
- "epoch": 0.7953950811093669,
583
- "grad_norm": 3.5554616562598995,
584
- "learning_rate": 6.046442623320145e-08,
585
- "logits/chosen": -0.7937309145927429,
586
- "logits/rejected": -0.766541063785553,
587
- "logps/chosen": -826.75537109375,
588
- "logps/rejected": -1213.797607421875,
589
- "loss": 0.6092,
590
- "rewards/accuracies": 0.668749988079071,
591
- "rewards/chosen": -0.5811253786087036,
592
- "rewards/margins": 0.37074214220046997,
593
- "rewards/rejected": -0.951867401599884,
594
- "step": 380
595
- },
596
- {
597
- "epoch": 0.8163265306122449,
598
- "grad_norm": 5.133888886461569,
599
- "learning_rate": 4.904486005914027e-08,
600
- "logits/chosen": -0.8645860552787781,
601
- "logits/rejected": -0.7781597971916199,
602
- "logps/chosen": -887.5338745117188,
603
- "logps/rejected": -1190.846923828125,
604
- "loss": 0.6032,
605
- "rewards/accuracies": 0.668749988079071,
606
- "rewards/chosen": -0.5518342852592468,
607
- "rewards/margins": 0.3350018858909607,
608
- "rewards/rejected": -0.8868362307548523,
609
- "step": 390
610
- },
611
- {
612
- "epoch": 0.837257980115123,
613
- "grad_norm": 5.1958119384493875,
614
- "learning_rate": 3.8702478614051345e-08,
615
- "logits/chosen": -0.8455416560173035,
616
- "logits/rejected": -0.8282378911972046,
617
- "logps/chosen": -726.9100952148438,
618
- "logps/rejected": -962.5436401367188,
619
- "loss": 0.6137,
620
- "rewards/accuracies": 0.6812499761581421,
621
- "rewards/chosen": -0.46447521448135376,
622
- "rewards/margins": 0.25824955105781555,
623
- "rewards/rejected": -0.7227246761322021,
624
- "step": 400
625
- },
626
- {
627
- "epoch": 0.858189429618001,
628
- "grad_norm": 5.087694596010447,
629
- "learning_rate": 2.9492720416985e-08,
630
- "logits/chosen": -0.881341278553009,
631
- "logits/rejected": -0.8477126359939575,
632
- "logps/chosen": -797.9344482421875,
633
- "logps/rejected": -1068.551513671875,
634
- "loss": 0.6109,
635
- "rewards/accuracies": 0.699999988079071,
636
- "rewards/chosen": -0.4897310137748718,
637
- "rewards/margins": 0.31405144929885864,
638
- "rewards/rejected": -0.8037824630737305,
639
- "step": 410
640
- },
641
- {
642
- "epoch": 0.8791208791208791,
643
- "grad_norm": 4.7011066546022855,
644
- "learning_rate": 2.1464952759020856e-08,
645
- "logits/chosen": -0.7331596612930298,
646
- "logits/rejected": -0.7492183446884155,
647
- "logps/chosen": -795.1165161132812,
648
- "logps/rejected": -1116.110595703125,
649
- "loss": 0.5976,
650
- "rewards/accuracies": 0.706250011920929,
651
- "rewards/chosen": -0.5313975811004639,
652
- "rewards/margins": 0.31534695625305176,
653
- "rewards/rejected": -0.8467445373535156,
654
- "step": 420
655
- },
656
- {
657
- "epoch": 0.9000523286237572,
658
- "grad_norm": 4.174114432694521,
659
- "learning_rate": 1.4662207078575684e-08,
660
- "logits/chosen": -0.8007818460464478,
661
- "logits/rejected": -0.7300530672073364,
662
- "logps/chosen": -845.8348388671875,
663
- "logps/rejected": -1122.674072265625,
664
- "loss": 0.6044,
665
- "rewards/accuracies": 0.668749988079071,
666
- "rewards/chosen": -0.5535520315170288,
667
- "rewards/margins": 0.30010417103767395,
668
- "rewards/rejected": -0.8536561131477356,
669
- "step": 430
670
- },
671
- {
672
- "epoch": 0.9209837781266352,
673
- "grad_norm": 4.573763510440249,
674
- "learning_rate": 9.12094829893642e-09,
675
- "logits/chosen": -0.8669272661209106,
676
- "logits/rejected": -0.8168144226074219,
677
- "logps/chosen": -778.4366455078125,
678
- "logps/rejected": -945.3597412109375,
679
- "loss": 0.596,
680
- "rewards/accuracies": 0.637499988079071,
681
- "rewards/chosen": -0.5131410956382751,
682
- "rewards/margins": 0.20753808319568634,
683
- "rewards/rejected": -0.7206791639328003,
684
- "step": 440
685
- },
686
- {
687
- "epoch": 0.9419152276295133,
688
- "grad_norm": 5.324804725259421,
689
- "learning_rate": 4.8708793644441086e-09,
690
- "logits/chosen": -0.7154260873794556,
691
- "logits/rejected": -0.6910079121589661,
692
- "logps/chosen": -808.1412963867188,
693
- "logps/rejected": -1094.1324462890625,
694
- "loss": 0.6039,
695
- "rewards/accuracies": 0.6937500238418579,
696
- "rewards/chosen": -0.5372119545936584,
697
- "rewards/margins": 0.29753977060317993,
698
- "rewards/rejected": -0.8347517251968384,
699
- "step": 450
700
- },
701
- {
702
- "epoch": 0.9628466771323915,
703
- "grad_norm": 4.938304952484282,
704
- "learning_rate": 1.9347820230782295e-09,
705
- "logits/chosen": -0.7675133943557739,
706
- "logits/rejected": -0.7233086228370667,
707
- "logps/chosen": -805.8953857421875,
708
- "logps/rejected": -1008.8405151367188,
709
- "loss": 0.6124,
710
- "rewards/accuracies": 0.731249988079071,
711
- "rewards/chosen": -0.5324074625968933,
712
- "rewards/margins": 0.2462809532880783,
713
- "rewards/rejected": -0.7786884307861328,
714
- "step": 460
715
- },
716
- {
717
- "epoch": 0.9837781266352695,
718
- "grad_norm": 7.346736713965509,
719
- "learning_rate": 3.2839470889836627e-10,
720
- "logits/chosen": -0.8261724710464478,
721
- "logits/rejected": -0.8037177324295044,
722
- "logps/chosen": -807.8523559570312,
723
- "logps/rejected": -1025.576904296875,
724
- "loss": 0.5979,
725
- "rewards/accuracies": 0.7250000238418579,
726
- "rewards/chosen": -0.5114091634750366,
727
- "rewards/margins": 0.23410066962242126,
728
- "rewards/rejected": -0.7455097436904907,
729
- "step": 470
730
- },
731
- {
732
- "epoch": 0.9984301412872841,
733
- "eval_logits/chosen": -0.8428901433944702,
734
- "eval_logits/rejected": -0.8125907778739929,
735
- "eval_logps/chosen": -782.0501098632812,
736
- "eval_logps/rejected": -1150.804931640625,
737
- "eval_loss": 0.6099801063537598,
738
- "eval_rewards/accuracies": 0.73828125,
739
- "eval_rewards/chosen": -0.5090480446815491,
740
- "eval_rewards/margins": 0.3657650351524353,
741
- "eval_rewards/rejected": -0.8748130202293396,
742
- "eval_runtime": 245.867,
743
- "eval_samples_per_second": 8.134,
744
- "eval_steps_per_second": 0.13,
745
- "step": 477
746
- },
747
- {
748
- "epoch": 0.9984301412872841,
749
- "step": 477,
750
  "total_flos": 0.0,
751
- "train_loss": 0.6406088435175034,
752
- "train_runtime": 13597.1074,
753
- "train_samples_per_second": 4.496,
754
- "train_steps_per_second": 0.035
755
  }
756
  ],
757
  "logging_steps": 10,
758
- "max_steps": 477,
759
  "num_input_tokens_seen": 0,
760
  "num_train_epochs": 1,
761
  "save_steps": 500,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4981684981684982,
5
  "eval_steps": 500,
6
+ "global_step": 238,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0020931449502878076,
13
+ "grad_norm": 8.664774799838723,
14
+ "learning_rate": 2.083333333333333e-08,
15
+ "logits/chosen": -2.577885627746582,
16
+ "logits/rejected": -2.5179054737091064,
17
+ "logps/chosen": -333.919921875,
18
+ "logps/rejected": -379.9395446777344,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
 
25
  },
26
  {
27
  "epoch": 0.020931449502878074,
28
+ "grad_norm": 8.566465757508732,
29
+ "learning_rate": 2.0833333333333333e-07,
30
+ "logits/chosen": -2.3459672927856445,
31
+ "logits/rejected": -2.306079387664795,
32
+ "logps/chosen": -323.2978515625,
33
+ "logps/rejected": -286.4886169433594,
34
+ "loss": 0.693,
35
+ "rewards/accuracies": 0.4930555522441864,
36
+ "rewards/chosen": 0.0005530998460017145,
37
+ "rewards/margins": 0.00043918879237025976,
38
+ "rewards/rejected": 0.00011391111183911562,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.04186289900575615,
43
+ "grad_norm": 7.716242185132959,
44
+ "learning_rate": 4.1666666666666667e-07,
45
+ "logits/chosen": -2.4348297119140625,
46
+ "logits/rejected": -2.360680103302002,
47
+ "logps/chosen": -325.21881103515625,
48
+ "logps/rejected": -293.9873352050781,
49
+ "loss": 0.691,
50
+ "rewards/accuracies": 0.59375,
51
+ "rewards/chosen": 0.017020396888256073,
52
+ "rewards/margins": 0.004037556238472462,
53
+ "rewards/rejected": 0.012982839718461037,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.06279434850863422,
58
+ "grad_norm": 7.832502055380039,
59
+ "learning_rate": 4.99030821197584e-07,
60
+ "logits/chosen": -2.412220001220703,
61
+ "logits/rejected": -2.3266615867614746,
62
+ "logps/chosen": -302.0142822265625,
63
+ "logps/rejected": -254.2772674560547,
64
+ "loss": 0.6851,
65
+ "rewards/accuracies": 0.6499999761581421,
66
+ "rewards/chosen": 0.06097496673464775,
67
+ "rewards/margins": 0.01743171736598015,
68
+ "rewards/rejected": 0.0435432493686676,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.0837257980115123,
73
+ "grad_norm": 7.902158374801651,
74
+ "learning_rate": 4.931352528237397e-07,
75
+ "logits/chosen": -2.3256008625030518,
76
+ "logits/rejected": -2.2759265899658203,
77
+ "logps/chosen": -278.6626892089844,
78
+ "logps/rejected": -268.6101379394531,
79
+ "loss": 0.6725,
80
+ "rewards/accuracies": 0.675000011920929,
81
+ "rewards/chosen": 0.06535704433917999,
82
+ "rewards/margins": 0.043393589556217194,
83
+ "rewards/rejected": 0.021963462233543396,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.10465724751439037,
88
+ "grad_norm": 8.407115911648058,
89
+ "learning_rate": 4.820092227512735e-07,
90
+ "logits/chosen": -2.383488893508911,
91
+ "logits/rejected": -2.303358554840088,
92
+ "logps/chosen": -293.69610595703125,
93
+ "logps/rejected": -292.30072021484375,
94
+ "loss": 0.658,
95
+ "rewards/accuracies": 0.6937500238418579,
96
+ "rewards/chosen": 0.002248649951070547,
97
+ "rewards/margins": 0.07593157142400742,
98
+ "rewards/rejected": -0.07368291914463043,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.12558869701726844,
103
+ "grad_norm": 11.158531298553909,
104
+ "learning_rate": 4.658920803689553e-07,
105
+ "logits/chosen": -2.446989059448242,
106
+ "logits/rejected": -2.3837265968322754,
107
+ "logps/chosen": -269.00372314453125,
108
+ "logps/rejected": -268.125,
109
+ "loss": 0.6402,
110
+ "rewards/accuracies": 0.7250000238418579,
111
+ "rewards/chosen": -0.011755615472793579,
112
+ "rewards/margins": 0.1335989534854889,
113
+ "rewards/rejected": -0.14535458385944366,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.14652014652014653,
118
+ "grad_norm": 11.281874103715333,
119
+ "learning_rate": 4.4513054666826144e-07,
120
+ "logits/chosen": -2.4044275283813477,
121
+ "logits/rejected": -2.3712165355682373,
122
+ "logps/chosen": -324.79327392578125,
123
+ "logps/rejected": -327.00067138671875,
124
+ "loss": 0.6198,
125
+ "rewards/accuracies": 0.6937500238418579,
126
+ "rewards/chosen": -0.1347241848707199,
127
+ "rewards/margins": 0.18882441520690918,
128
+ "rewards/rejected": -0.3235485851764679,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.1674515960230246,
133
+ "grad_norm": 12.908325443862251,
134
+ "learning_rate": 4.201712553872657e-07,
135
+ "logits/chosen": -2.5128796100616455,
136
+ "logits/rejected": -2.4123282432556152,
137
+ "logps/chosen": -363.02239990234375,
138
+ "logps/rejected": -309.88299560546875,
139
+ "loss": 0.6084,
140
+ "rewards/accuracies": 0.75,
141
+ "rewards/chosen": -0.14533400535583496,
142
+ "rewards/margins": 0.2766640782356262,
143
+ "rewards/rejected": -0.4219980239868164,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.18838304552590268,
148
+ "grad_norm": 13.536324985927438,
149
+ "learning_rate": 3.9155114477557926e-07,
150
+ "logits/chosen": -2.455129623413086,
151
+ "logits/rejected": -2.4033994674682617,
152
+ "logps/chosen": -286.1517639160156,
153
+ "logps/rejected": -317.22894287109375,
154
+ "loss": 0.5962,
155
+ "rewards/accuracies": 0.7250000238418579,
156
+ "rewards/chosen": -0.20300355553627014,
157
+ "rewards/margins": 0.3028048872947693,
158
+ "rewards/rejected": -0.5058084726333618,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.20931449502878074,
163
+ "grad_norm": 14.798663112211683,
164
+ "learning_rate": 3.598859066780754e-07,
165
+ "logits/chosen": -2.449500322341919,
166
+ "logits/rejected": -2.4120354652404785,
167
+ "logps/chosen": -336.9562072753906,
168
+ "logps/rejected": -350.5511779785156,
169
+ "loss": 0.595,
170
+ "rewards/accuracies": 0.731249988079071,
171
+ "rewards/chosen": -0.2370326966047287,
172
+ "rewards/margins": 0.3764740824699402,
173
+ "rewards/rejected": -0.6135067939758301,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.2302459445316588,
178
+ "grad_norm": 14.361890292809987,
179
+ "learning_rate": 3.2585674142717477e-07,
180
+ "logits/chosen": -2.495793104171753,
181
+ "logits/rejected": -2.4411723613739014,
182
+ "logps/chosen": -330.66552734375,
183
+ "logps/rejected": -342.8299255371094,
184
+ "loss": 0.6005,
185
+ "rewards/accuracies": 0.731249988079071,
186
+ "rewards/chosen": -0.2372889220714569,
187
+ "rewards/margins": 0.3125527799129486,
188
+ "rewards/rejected": -0.5498417019844055,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.25117739403453687,
193
+ "grad_norm": 14.525239841150341,
194
+ "learning_rate": 2.9019570347986706e-07,
195
+ "logits/chosen": -2.403376579284668,
196
+ "logits/rejected": -2.4048409461975098,
197
+ "logps/chosen": -321.9924621582031,
198
+ "logps/rejected": -352.308349609375,
199
+ "loss": 0.5896,
200
+ "rewards/accuracies": 0.7124999761581421,
201
+ "rewards/chosen": -0.3897819221019745,
202
+ "rewards/margins": 0.3965305685997009,
203
+ "rewards/rejected": -0.786312460899353,
204
  "step": 120
205
  },
206
  {
207
  "epoch": 0.272108843537415,
208
+ "grad_norm": 12.48897066480979,
209
+ "learning_rate": 2.536699530523291e-07,
210
+ "logits/chosen": -2.447078227996826,
211
+ "logits/rejected": -2.423405170440674,
212
+ "logps/chosen": -341.9205017089844,
213
+ "logps/rejected": -363.8473205566406,
214
+ "loss": 0.5687,
215
+ "rewards/accuracies": 0.6875,
216
+ "rewards/chosen": -0.42893487215042114,
217
+ "rewards/margins": 0.39049193263053894,
218
+ "rewards/rejected": -0.8194268345832825,
219
  "step": 130
220
  },
221
  {
222
  "epoch": 0.29304029304029305,
223
+ "grad_norm": 18.31143310183984,
224
+ "learning_rate": 2.1706525253979534e-07,
225
+ "logits/chosen": -2.4714255332946777,
226
+ "logits/rejected": -2.4288697242736816,
227
+ "logps/chosen": -371.23675537109375,
228
+ "logps/rejected": -369.59149169921875,
229
+ "loss": 0.5698,
230
+ "rewards/accuracies": 0.7749999761581421,
231
+ "rewards/chosen": -0.355832040309906,
232
+ "rewards/margins": 0.5035004615783691,
233
+ "rewards/rejected": -0.8593324422836304,
234
  "step": 140
235
  },
236
  {
237
  "epoch": 0.3139717425431711,
238
+ "grad_norm": 20.116564883995512,
239
+ "learning_rate": 1.8116906275593507e-07,
240
+ "logits/chosen": -2.358585834503174,
241
+ "logits/rejected": -2.3133060932159424,
242
+ "logps/chosen": -350.17510986328125,
243
+ "logps/rejected": -356.8619689941406,
244
+ "loss": 0.5758,
245
  "rewards/accuracies": 0.6875,
246
+ "rewards/chosen": -0.550204873085022,
247
+ "rewards/margins": 0.4276268482208252,
248
+ "rewards/rejected": -0.9778317213058472,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.3349031920460492,
253
+ "grad_norm": 17.263103800422705,
254
+ "learning_rate": 1.4675360263490295e-07,
255
+ "logits/chosen": -2.293246030807495,
256
+ "logits/rejected": -2.204132318496704,
257
+ "logps/chosen": -353.46380615234375,
258
+ "logps/rejected": -375.65008544921875,
259
+ "loss": 0.5731,
260
  "rewards/accuracies": 0.706250011920929,
261
+ "rewards/chosen": -0.6218055486679077,
262
+ "rewards/margins": 0.5060153603553772,
263
+ "rewards/rejected": -1.1278208494186401,
264
  "step": 160
265
  },
266
  {
267
  "epoch": 0.35583464154892724,
268
+ "grad_norm": 21.892050941895622,
269
+ "learning_rate": 1.1455923682523475e-07,
270
+ "logits/chosen": -2.0538814067840576,
271
+ "logits/rejected": -2.005150318145752,
272
+ "logps/chosen": -362.171630859375,
273
+ "logps/rejected": -370.13824462890625,
274
+ "loss": 0.579,
275
+ "rewards/accuracies": 0.6875,
276
+ "rewards/chosen": -0.8733247518539429,
277
+ "rewards/margins": 0.3510749936103821,
278
+ "rewards/rejected": -1.2243998050689697,
279
  "step": 170
280
  },
281
  {
282
  "epoch": 0.37676609105180536,
283
+ "grad_norm": 21.58845651065895,
284
+ "learning_rate": 8.527854855097224e-08,
285
+ "logits/chosen": -2.1525955200195312,
286
+ "logits/rejected": -2.091989517211914,
287
+ "logps/chosen": -361.34674072265625,
288
+ "logps/rejected": -368.7857666015625,
289
+ "loss": 0.5487,
290
+ "rewards/accuracies": 0.668749988079071,
291
+ "rewards/chosen": -0.7656270861625671,
292
+ "rewards/margins": 0.3676658272743225,
293
+ "rewards/rejected": -1.1332929134368896,
294
  "step": 180
295
  },
296
  {
297
  "epoch": 0.3976975405546834,
298
+ "grad_norm": 18.9231423895233,
299
+ "learning_rate": 5.9541440373546445e-08,
300
+ "logits/chosen": -2.0911571979522705,
301
+ "logits/rejected": -2.0631556510925293,
302
+ "logps/chosen": -333.4042053222656,
303
+ "logps/rejected": -363.6320495605469,
304
+ "loss": 0.566,
305
+ "rewards/accuracies": 0.706250011920929,
306
+ "rewards/chosen": -0.6202136278152466,
307
+ "rewards/margins": 0.3467435836791992,
308
+ "rewards/rejected": -0.9669572710990906,
309
  "step": 190
310
  },
311
  {
312
  "epoch": 0.4186289900575615,
313
+ "grad_norm": 56.6391329702711,
314
+ "learning_rate": 3.790158337517127e-08,
315
+ "logits/chosen": -2.0397160053253174,
316
+ "logits/rejected": -2.031644821166992,
317
+ "logps/chosen": -331.656982421875,
318
+ "logps/rejected": -385.3870544433594,
319
+ "loss": 0.5701,
320
+ "rewards/accuracies": 0.7124999761581421,
321
+ "rewards/chosen": -0.6595208644866943,
322
+ "rewards/margins": 0.3587157130241394,
323
+ "rewards/rejected": -1.018236517906189,
324
  "step": 200
325
  },
326
  {
327
  "epoch": 0.43956043956043955,
328
+ "grad_norm": 20.720749008009797,
329
+ "learning_rate": 2.0824506276503894e-08,
330
+ "logits/chosen": -2.1288130283355713,
331
+ "logits/rejected": -2.0033929347991943,
332
+ "logps/chosen": -395.6886901855469,
333
+ "logps/rejected": -377.8100280761719,
334
+ "loss": 0.5716,
335
+ "rewards/accuracies": 0.6812499761581421,
336
+ "rewards/chosen": -0.6847324371337891,
337
+ "rewards/margins": 0.4595041275024414,
338
+ "rewards/rejected": -1.1442365646362305,
339
  "step": 210
340
  },
341
  {
342
  "epoch": 0.4604918890633176,
343
+ "grad_norm": 18.207704353879663,
344
+ "learning_rate": 8.677580722139671e-09,
345
+ "logits/chosen": -2.0156641006469727,
346
+ "logits/rejected": -2.000734806060791,
347
+ "logps/chosen": -328.88897705078125,
348
+ "logps/rejected": -371.46075439453125,
349
+ "loss": 0.5723,
350
+ "rewards/accuracies": 0.6875,
351
+ "rewards/chosen": -0.7237851619720459,
352
+ "rewards/margins": 0.4359474778175354,
353
+ "rewards/rejected": -1.159732699394226,
354
  "step": 220
355
  },
356
  {
357
  "epoch": 0.48142333856619574,
358
+ "grad_norm": 16.046846183278838,
359
+ "learning_rate": 1.722118176089915e-09,
360
+ "logits/chosen": -2.1439056396484375,
361
+ "logits/rejected": -2.052799940109253,
362
+ "logps/chosen": -336.7804870605469,
363
+ "logps/rejected": -378.94598388671875,
364
+ "loss": 0.5646,
365
+ "rewards/accuracies": 0.75,
366
+ "rewards/chosen": -0.6018725633621216,
367
+ "rewards/margins": 0.680876612663269,
368
+ "rewards/rejected": -1.2827491760253906,
369
  "step": 230
370
  },
371
  {
372
+ "epoch": 0.4981684981684982,
373
+ "eval_logits/chosen": -2.077331781387329,
374
+ "eval_logits/rejected": -2.0252277851104736,
375
+ "eval_logps/chosen": -340.7309265136719,
376
+ "eval_logps/rejected": -393.4054260253906,
377
+ "eval_loss": 0.5650774240493774,
378
+ "eval_rewards/accuracies": 0.75,
379
+ "eval_rewards/chosen": -0.614263653755188,
380
+ "eval_rewards/margins": 0.5565669536590576,
381
+ "eval_rewards/rejected": -1.1708307266235352,
382
+ "eval_runtime": 168.9579,
383
+ "eval_samples_per_second": 11.837,
384
+ "eval_steps_per_second": 0.189,
385
+ "step": 238
386
+ },
387
+ {
388
+ "epoch": 0.4981684981684982,
389
+ "step": 238,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
390
  "total_flos": 0.0,
391
+ "train_loss": 0.6031974924712622,
392
+ "train_runtime": 7143.0668,
393
+ "train_samples_per_second": 4.265,
394
+ "train_steps_per_second": 0.033
395
  }
396
  ],
397
  "logging_steps": 10,
398
+ "max_steps": 238,
399
  "num_input_tokens_seen": 0,
400
  "num_train_epochs": 1,
401
  "save_steps": 500,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a917c64a8bc731aa9c248cac3f53adb5e79283709a6de18e8be835bb3be98d41
3
  size 7672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d14e0d3014898aeb2f2673a7410657991ac2ba68b3d48a107a58b78d8283c657
3
  size 7672