li-muyang commited on
Commit
acef35f
·
verified ·
1 Parent(s): b7fa9c0

Model save

Browse files
README.md CHANGED
@@ -16,15 +16,15 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model was trained from scratch on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 0.5765
20
- - Rewards/chosen: -0.2836
21
- - Rewards/rejected: -0.7661
22
- - Rewards/accuracies: 0.75
23
- - Rewards/margins: 0.4825
24
- - Logps/rejected: -342.4585
25
- - Logps/chosen: -295.9262
26
- - Logits/rejected: -2.6851
27
- - Logits/chosen: -2.7117
28
 
29
  ## Model description
30
 
@@ -61,7 +61,7 @@ The following hyperparameters were used during training:
61
 
62
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
  |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
- | 0.5798 | 0.4982 | 238 | 0.5765 | -0.2836 | -0.7661 | 0.75 | 0.4825 | -342.4585 | -295.9262 | -2.6851 | -2.7117 |
65
 
66
 
67
  ### Framework versions
 
16
 
17
  This model was trained from scratch on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 0.5669
20
+ - Rewards/chosen: -0.4034
21
+ - Rewards/rejected: -0.9628
22
+ - Rewards/accuracies: 0.7461
23
+ - Rewards/margins: 0.5594
24
+ - Logps/rejected: -372.7966
25
+ - Logps/chosen: -315.5382
26
+ - Logits/rejected: -2.3014
27
+ - Logits/chosen: -2.3480
28
 
29
  ## Model description
30
 
 
61
 
62
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
  |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.5667 | 0.4982 | 238 | 0.5669 | -0.4034 | -0.9628 | 0.7461 | 0.5594 | -372.7966 | -315.5382 | -2.3014 | -2.3480 |
65
 
66
 
67
  ### Framework versions
all_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.4981684981684982,
3
  "total_flos": 0.0,
4
- "train_loss": 0.6047679316095945,
5
- "train_runtime": 7201.8834,
6
  "train_samples": 61134,
7
- "train_samples_per_second": 4.23,
8
  "train_steps_per_second": 0.033
9
  }
 
1
  {
2
  "epoch": 0.4981684981684982,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.6061525304778284,
5
+ "train_runtime": 7204.502,
6
  "train_samples": 61134,
7
+ "train_samples_per_second": 4.228,
8
  "train_steps_per_second": 0.033
9
  }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "data/sft/zephyr-7b-sft-1e-every25/checkpoint-900",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "data/sft/zephyr-7b-sft-1e-every25/checkpoint-600",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d93eb6300b877466ea2f4e521cf819b0ef638a9ea09ad57832ad62ea8917b3f
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4d2c8e646e5701192e73de8c38e6d4056e9f571ca8e10a64d702d40172ec86e
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09174fa607bcca81805e06045dda478fdf2a5dc93bfb0a52c322bf4fa96de23a
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7050cc11c07f527e41773fffa439f1cabdeb7522827afd0523198340ec011529
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:81fa5d1748aec6d9e1442511af340a774590249d11e7d8f0b2e7d2f38c73ea44
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b3a7c01323eff3dc3201152846f798738d11befcbd65f5366c003220d7aa2f6
3
  size 4540516344
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.4981684981684982,
3
  "total_flos": 0.0,
4
- "train_loss": 0.6047679316095945,
5
- "train_runtime": 7201.8834,
6
  "train_samples": 61134,
7
- "train_samples_per_second": 4.23,
8
  "train_steps_per_second": 0.033
9
  }
 
1
  {
2
  "epoch": 0.4981684981684982,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.6061525304778284,
5
+ "train_runtime": 7204.502,
6
  "train_samples": 61134,
7
+ "train_samples_per_second": 4.228,
8
  "train_steps_per_second": 0.033
9
  }
trainer_state.json CHANGED
@@ -10,12 +10,12 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.0020931449502878076,
13
- "grad_norm": 12.610560260880211,
14
  "learning_rate": 2.083333333333333e-08,
15
- "logits/chosen": -2.99812388420105,
16
- "logits/rejected": -2.9638350009918213,
17
- "logps/chosen": -315.4977722167969,
18
- "logps/rejected": -361.9093322753906,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
@@ -25,372 +25,372 @@
25
  },
26
  {
27
  "epoch": 0.020931449502878074,
28
- "grad_norm": 9.936649993419687,
29
  "learning_rate": 2.0833333333333333e-07,
30
- "logits/chosen": -2.788989543914795,
31
- "logits/rejected": -2.7662315368652344,
32
- "logps/chosen": -314.0841064453125,
33
- "logps/rejected": -282.2810974121094,
34
- "loss": 0.693,
35
- "rewards/accuracies": 0.4722222089767456,
36
- "rewards/chosen": 0.0008651986136101186,
37
- "rewards/margins": 0.0003082120092585683,
38
- "rewards/rejected": 0.0005569865461438894,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.04186289900575615,
43
- "grad_norm": 8.667356504170359,
44
  "learning_rate": 4.1666666666666667e-07,
45
- "logits/chosen": -2.8659820556640625,
46
- "logits/rejected": -2.8171162605285645,
47
- "logps/chosen": -311.6136779785156,
48
- "logps/rejected": -284.09893798828125,
49
- "loss": 0.6909,
50
- "rewards/accuracies": 0.6000000238418579,
51
- "rewards/chosen": 0.013467146083712578,
52
- "rewards/margins": 0.004726298153400421,
53
- "rewards/rejected": 0.008740848861634731,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.06279434850863422,
58
- "grad_norm": 8.034528876736365,
59
  "learning_rate": 4.99030821197584e-07,
60
- "logits/chosen": -2.8421683311462402,
61
- "logits/rejected": -2.7854294776916504,
62
- "logps/chosen": -289.38385009765625,
63
- "logps/rejected": -247.28732299804688,
64
- "loss": 0.6833,
65
- "rewards/accuracies": 0.65625,
66
- "rewards/chosen": 0.05833645910024643,
67
- "rewards/margins": 0.024186396971344948,
68
- "rewards/rejected": 0.03415006399154663,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.0837257980115123,
73
- "grad_norm": 7.833193286645188,
74
  "learning_rate": 4.931352528237397e-07,
75
- "logits/chosen": -2.7391459941864014,
76
- "logits/rejected": -2.716399669647217,
77
- "logps/chosen": -269.075927734375,
78
- "logps/rejected": -262.9975891113281,
79
- "loss": 0.6682,
80
- "rewards/accuracies": 0.6625000238418579,
81
- "rewards/chosen": 0.052591562271118164,
82
- "rewards/margins": 0.0480208583176136,
83
- "rewards/rejected": 0.0045707011595368385,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.10465724751439037,
88
- "grad_norm": 8.72813708365478,
89
  "learning_rate": 4.820092227512735e-07,
90
- "logits/chosen": -2.7570505142211914,
91
- "logits/rejected": -2.696572780609131,
92
- "logps/chosen": -282.76092529296875,
93
- "logps/rejected": -286.132568359375,
94
- "loss": 0.6548,
95
- "rewards/accuracies": 0.71875,
96
- "rewards/chosen": -0.010979737155139446,
97
- "rewards/margins": 0.09300607442855835,
98
- "rewards/rejected": -0.10398580878973007,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.12558869701726844,
103
- "grad_norm": 13.480613892862241,
104
  "learning_rate": 4.658920803689553e-07,
105
- "logits/chosen": -2.789520502090454,
106
- "logits/rejected": -2.7424087524414062,
107
- "logps/chosen": -258.74908447265625,
108
- "logps/rejected": -264.25225830078125,
109
- "loss": 0.6357,
110
- "rewards/accuracies": 0.737500011920929,
111
- "rewards/chosen": -0.012783573940396309,
112
- "rewards/margins": 0.15775156021118164,
113
- "rewards/rejected": -0.1705351322889328,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.14652014652014653,
118
- "grad_norm": 10.234102018070638,
119
  "learning_rate": 4.4513054666826144e-07,
120
- "logits/chosen": -2.7376105785369873,
121
- "logits/rejected": -2.709526300430298,
122
- "logps/chosen": -311.8622131347656,
123
- "logps/rejected": -316.22576904296875,
124
- "loss": 0.6163,
125
  "rewards/accuracies": 0.699999988079071,
126
- "rewards/chosen": -0.11481525003910065,
127
- "rewards/margins": 0.20760241150856018,
128
- "rewards/rejected": -0.32241764664649963,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.1674515960230246,
133
- "grad_norm": 20.552548250652773,
134
  "learning_rate": 4.201712553872657e-07,
135
- "logits/chosen": -2.816467761993408,
136
- "logits/rejected": -2.7381579875946045,
137
- "logps/chosen": -341.7183837890625,
138
- "logps/rejected": -293.2793884277344,
139
- "loss": 0.6055,
140
- "rewards/accuracies": 0.7562500238418579,
141
- "rewards/chosen": -0.03545045107603073,
142
- "rewards/margins": 0.28459519147872925,
143
- "rewards/rejected": -0.3200456500053406,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.18838304552590268,
148
- "grad_norm": 13.720188030623374,
149
  "learning_rate": 3.9155114477557926e-07,
150
- "logits/chosen": -2.7560811042785645,
151
- "logits/rejected": -2.7106573581695557,
152
- "logps/chosen": -275.1942443847656,
153
- "logps/rejected": -306.68206787109375,
154
- "loss": 0.5887,
155
- "rewards/accuracies": 0.6875,
156
- "rewards/chosen": -0.1888897567987442,
157
- "rewards/margins": 0.30051952600479126,
158
- "rewards/rejected": -0.48940929770469666,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.20931449502878074,
163
- "grad_norm": 16.303248781308948,
164
  "learning_rate": 3.598859066780754e-07,
165
- "logits/chosen": -2.7324023246765137,
166
- "logits/rejected": -2.7062618732452393,
167
- "logps/chosen": -322.99603271484375,
168
- "logps/rejected": -339.0654602050781,
169
- "loss": 0.5902,
170
- "rewards/accuracies": 0.768750011920929,
171
- "rewards/chosen": -0.19065451622009277,
172
- "rewards/margins": 0.39800626039505005,
173
- "rewards/rejected": -0.588660717010498,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.2302459445316588,
178
- "grad_norm": 20.16983103502839,
179
  "learning_rate": 3.2585674142717477e-07,
180
- "logits/chosen": -2.7619636058807373,
181
- "logits/rejected": -2.713339328765869,
182
- "logps/chosen": -338.515869140625,
183
- "logps/rejected": -351.4220886230469,
184
- "loss": 0.5967,
185
- "rewards/accuracies": 0.6499999761581421,
186
- "rewards/chosen": -0.4326956868171692,
187
- "rewards/margins": 0.302705854177475,
188
- "rewards/rejected": -0.7354015111923218,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.25117739403453687,
193
- "grad_norm": 18.288819495100896,
194
  "learning_rate": 2.9019570347986706e-07,
195
- "logits/chosen": -2.670757293701172,
196
- "logits/rejected": -2.6768569946289062,
197
- "logps/chosen": -309.2828674316406,
198
- "logps/rejected": -339.7432861328125,
199
- "loss": 0.579,
200
- "rewards/accuracies": 0.71875,
201
- "rewards/chosen": -0.33222970366477966,
202
- "rewards/margins": 0.40109533071517944,
203
- "rewards/rejected": -0.7333250045776367,
204
  "step": 120
205
  },
206
  {
207
  "epoch": 0.272108843537415,
208
- "grad_norm": 15.354064982647847,
209
  "learning_rate": 2.536699530523291e-07,
210
- "logits/chosen": -2.722832679748535,
211
- "logits/rejected": -2.7066872119903564,
212
- "logps/chosen": -318.85833740234375,
213
- "logps/rejected": -335.77911376953125,
214
- "loss": 0.5666,
215
- "rewards/accuracies": 0.6812499761581421,
216
- "rewards/chosen": -0.2824149429798126,
217
- "rewards/margins": 0.33272355794906616,
218
- "rewards/rejected": -0.6151384711265564,
219
  "step": 130
220
  },
221
  {
222
  "epoch": 0.29304029304029305,
223
- "grad_norm": 17.28336393009731,
224
  "learning_rate": 2.1706525253979534e-07,
225
- "logits/chosen": -2.7583320140838623,
226
- "logits/rejected": -2.729793071746826,
227
- "logps/chosen": -346.2691955566406,
228
- "logps/rejected": -344.5732116699219,
229
- "loss": 0.5763,
230
- "rewards/accuracies": 0.7562500238418579,
231
- "rewards/chosen": -0.24767926335334778,
232
- "rewards/margins": 0.4790892004966736,
233
- "rewards/rejected": -0.7267683744430542,
234
  "step": 140
235
  },
236
  {
237
  "epoch": 0.3139717425431711,
238
- "grad_norm": 19.361596281568826,
239
  "learning_rate": 1.8116906275593507e-07,
240
- "logits/chosen": -2.7468533515930176,
241
- "logits/rejected": -2.7160446643829346,
242
- "logps/chosen": -323.2176208496094,
243
- "logps/rejected": -324.5989990234375,
244
- "loss": 0.5796,
245
- "rewards/accuracies": 0.6875,
246
- "rewards/chosen": -0.3653566539287567,
247
- "rewards/margins": 0.3610517382621765,
248
- "rewards/rejected": -0.7264083623886108,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.3349031920460492,
253
- "grad_norm": 16.313995999292402,
254
  "learning_rate": 1.4675360263490295e-07,
255
- "logits/chosen": -2.7838118076324463,
256
- "logits/rejected": -2.7365283966064453,
257
- "logps/chosen": -319.6917724609375,
258
- "logps/rejected": -340.581787109375,
259
- "loss": 0.5759,
260
- "rewards/accuracies": 0.7250000238418579,
261
- "rewards/chosen": -0.40762004256248474,
262
- "rewards/margins": 0.4462064802646637,
263
- "rewards/rejected": -0.8538265228271484,
264
  "step": 160
265
  },
266
  {
267
  "epoch": 0.35583464154892724,
268
- "grad_norm": 22.925451744364146,
269
  "learning_rate": 1.1455923682523475e-07,
270
- "logits/chosen": -2.659465789794922,
271
- "logits/rejected": -2.644275188446045,
272
- "logps/chosen": -311.614990234375,
273
- "logps/rejected": -323.3099670410156,
274
- "loss": 0.585,
275
- "rewards/accuracies": 0.668749988079071,
276
- "rewards/chosen": -0.4685409665107727,
277
- "rewards/margins": 0.36185160279273987,
278
- "rewards/rejected": -0.8303925395011902,
279
  "step": 170
280
  },
281
  {
282
  "epoch": 0.37676609105180536,
283
- "grad_norm": 13.975706307085083,
284
  "learning_rate": 8.527854855097224e-08,
285
- "logits/chosen": -2.7547802925109863,
286
- "logits/rejected": -2.7258901596069336,
287
- "logps/chosen": -313.0289001464844,
288
- "logps/rejected": -321.9787902832031,
289
- "loss": 0.5538,
290
- "rewards/accuracies": 0.706250011920929,
291
- "rewards/chosen": -0.37977224588394165,
292
- "rewards/margins": 0.3600301146507263,
293
- "rewards/rejected": -0.739802360534668,
294
  "step": 180
295
  },
296
  {
297
  "epoch": 0.3976975405546834,
298
- "grad_norm": 16.933160186335314,
299
  "learning_rate": 5.9541440373546445e-08,
300
- "logits/chosen": -2.689134120941162,
301
- "logits/rejected": -2.664074420928955,
302
- "logps/chosen": -296.32672119140625,
303
- "logps/rejected": -319.95001220703125,
304
- "loss": 0.5755,
305
- "rewards/accuracies": 0.668749988079071,
306
- "rewards/chosen": -0.34621429443359375,
307
- "rewards/margins": 0.2947639226913452,
308
- "rewards/rejected": -0.6409782767295837,
309
  "step": 190
310
  },
311
  {
312
  "epoch": 0.4186289900575615,
313
- "grad_norm": 21.908273233954745,
314
  "learning_rate": 3.790158337517127e-08,
315
- "logits/chosen": -2.653926372528076,
316
- "logits/rejected": -2.6524300575256348,
317
- "logps/chosen": -289.92926025390625,
318
- "logps/rejected": -341.435546875,
319
- "loss": 0.5823,
320
  "rewards/accuracies": 0.6937500238418579,
321
- "rewards/chosen": -0.3269258737564087,
322
- "rewards/margins": 0.3465423882007599,
323
- "rewards/rejected": -0.6734683513641357,
324
  "step": 200
325
  },
326
  {
327
  "epoch": 0.43956043956043955,
328
- "grad_norm": 18.994509414379117,
329
  "learning_rate": 2.0824506276503894e-08,
330
- "logits/chosen": -2.7132248878479004,
331
- "logits/rejected": -2.648906946182251,
332
- "logps/chosen": -352.23211669921875,
333
- "logps/rejected": -327.8628234863281,
334
- "loss": 0.5851,
335
- "rewards/accuracies": 0.6937500238418579,
336
- "rewards/chosen": -0.38004761934280396,
337
- "rewards/margins": 0.3670490086078644,
338
- "rewards/rejected": -0.7470966577529907,
339
  "step": 210
340
  },
341
  {
342
  "epoch": 0.4604918890633176,
343
- "grad_norm": 16.241884556359377,
344
  "learning_rate": 8.677580722139671e-09,
345
- "logits/chosen": -2.635542154312134,
346
- "logits/rejected": -2.6469016075134277,
347
- "logps/chosen": -288.9513244628906,
348
- "logps/rejected": -323.00494384765625,
349
- "loss": 0.573,
350
- "rewards/accuracies": 0.6937500238418579,
351
- "rewards/chosen": -0.3638390898704529,
352
- "rewards/margins": 0.3917424976825714,
353
- "rewards/rejected": -0.7555815577507019,
354
  "step": 220
355
  },
356
  {
357
  "epoch": 0.48142333856619574,
358
- "grad_norm": 22.899672406649472,
359
  "learning_rate": 1.722118176089915e-09,
360
- "logits/chosen": -2.742950439453125,
361
- "logits/rejected": -2.702911376953125,
362
- "logps/chosen": -292.0196533203125,
363
- "logps/rejected": -330.7127380371094,
364
- "loss": 0.5798,
365
- "rewards/accuracies": 0.800000011920929,
366
- "rewards/chosen": -0.25979113578796387,
367
- "rewards/margins": 0.5986371040344238,
368
- "rewards/rejected": -0.8584282994270325,
369
  "step": 230
370
  },
371
  {
372
  "epoch": 0.4981684981684982,
373
- "eval_logits/chosen": -2.711683750152588,
374
- "eval_logits/rejected": -2.68514084815979,
375
- "eval_logps/chosen": -295.9261779785156,
376
- "eval_logps/rejected": -342.4585266113281,
377
- "eval_loss": 0.576471209526062,
378
- "eval_rewards/accuracies": 0.75,
379
- "eval_rewards/chosen": -0.283584326505661,
380
- "eval_rewards/margins": 0.48247623443603516,
381
- "eval_rewards/rejected": -0.7660605311393738,
382
- "eval_runtime": 168.4238,
383
- "eval_samples_per_second": 11.875,
384
- "eval_steps_per_second": 0.19,
385
  "step": 238
386
  },
387
  {
388
  "epoch": 0.4981684981684982,
389
  "step": 238,
390
  "total_flos": 0.0,
391
- "train_loss": 0.6047679316095945,
392
- "train_runtime": 7201.8834,
393
- "train_samples_per_second": 4.23,
394
  "train_steps_per_second": 0.033
395
  }
396
  ],
 
10
  "log_history": [
11
  {
12
  "epoch": 0.0020931449502878076,
13
+ "grad_norm": 26.601607092770625,
14
  "learning_rate": 2.083333333333333e-08,
15
+ "logits/chosen": -2.9139022827148438,
16
+ "logits/rejected": -2.8787596225738525,
17
+ "logps/chosen": -325.8533020019531,
18
+ "logps/rejected": -372.9187927246094,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
 
25
  },
26
  {
27
  "epoch": 0.020931449502878074,
28
+ "grad_norm": 16.144681025948177,
29
  "learning_rate": 2.0833333333333333e-07,
30
+ "logits/chosen": -2.730161428451538,
31
+ "logits/rejected": -2.7064931392669678,
32
+ "logps/chosen": -331.04364013671875,
33
+ "logps/rejected": -309.8411865234375,
34
+ "loss": 0.6931,
35
+ "rewards/accuracies": 0.4861111044883728,
36
+ "rewards/chosen": 0.001100256573408842,
37
+ "rewards/margins": 0.0007432710262946784,
38
+ "rewards/rejected": 0.0003569853724911809,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.04186289900575615,
43
+ "grad_norm": 14.575580508073863,
44
  "learning_rate": 4.1666666666666667e-07,
45
+ "logits/chosen": -2.798081874847412,
46
+ "logits/rejected": -2.7473387718200684,
47
+ "logps/chosen": -321.3909912109375,
48
+ "logps/rejected": -299.4921569824219,
49
+ "loss": 0.6918,
50
+ "rewards/accuracies": 0.5687500238418579,
51
+ "rewards/chosen": 0.006536015775054693,
52
+ "rewards/margins": 0.0068586282432079315,
53
+ "rewards/rejected": -0.0003226128756068647,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.06279434850863422,
58
+ "grad_norm": 8.070909879894359,
59
  "learning_rate": 4.99030821197584e-07,
60
+ "logits/chosen": -2.7842161655426025,
61
+ "logits/rejected": -2.728454113006592,
62
+ "logps/chosen": -304.2439880371094,
63
+ "logps/rejected": -275.6659851074219,
64
+ "loss": 0.6863,
65
+ "rewards/accuracies": 0.6937500238418579,
66
+ "rewards/chosen": 0.04330765828490257,
67
+ "rewards/margins": 0.05840452387928963,
68
+ "rewards/rejected": -0.015096860937774181,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.0837257980115123,
73
+ "grad_norm": 8.530980057951403,
74
  "learning_rate": 4.931352528237397e-07,
75
+ "logits/chosen": -2.7036166191101074,
76
+ "logits/rejected": -2.671600103378296,
77
+ "logps/chosen": -289.6343078613281,
78
+ "logps/rejected": -277.79425048828125,
79
+ "loss": 0.6767,
80
+ "rewards/accuracies": 0.65625,
81
+ "rewards/chosen": 0.02837621606886387,
82
+ "rewards/margins": 0.037141989916563034,
83
+ "rewards/rejected": -0.008765773847699165,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.10465724751439037,
88
+ "grad_norm": 11.721866187252868,
89
  "learning_rate": 4.820092227512735e-07,
90
+ "logits/chosen": -2.740696430206299,
91
+ "logits/rejected": -2.6848952770233154,
92
+ "logps/chosen": -290.8323059082031,
93
+ "logps/rejected": -310.22576904296875,
94
+ "loss": 0.6607,
95
+ "rewards/accuracies": 0.6875,
96
+ "rewards/chosen": -0.014719474129378796,
97
+ "rewards/margins": 0.10523072630167007,
98
+ "rewards/rejected": -0.11995019763708115,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.12558869701726844,
103
+ "grad_norm": 10.915648900872057,
104
  "learning_rate": 4.658920803689553e-07,
105
+ "logits/chosen": -2.785388469696045,
106
+ "logits/rejected": -2.744814395904541,
107
+ "logps/chosen": -268.2928466796875,
108
+ "logps/rejected": -272.32122802734375,
109
+ "loss": 0.6461,
110
+ "rewards/accuracies": 0.7562500238418579,
111
+ "rewards/chosen": -0.02511899545788765,
112
+ "rewards/margins": 0.14482316374778748,
113
+ "rewards/rejected": -0.16994217038154602,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.14652014652014653,
118
+ "grad_norm": 11.152537838170248,
119
  "learning_rate": 4.4513054666826144e-07,
120
+ "logits/chosen": -2.745926856994629,
121
+ "logits/rejected": -2.7201263904571533,
122
+ "logps/chosen": -332.7641296386719,
123
+ "logps/rejected": -326.02862548828125,
124
+ "loss": 0.6338,
125
  "rewards/accuracies": 0.699999988079071,
126
+ "rewards/chosen": -0.12678995728492737,
127
+ "rewards/margins": 0.15600599348545074,
128
+ "rewards/rejected": -0.2827959656715393,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.1674515960230246,
133
+ "grad_norm": 20.747423551686875,
134
  "learning_rate": 4.201712553872657e-07,
135
+ "logits/chosen": -2.8221614360809326,
136
+ "logits/rejected": -2.75636887550354,
137
+ "logps/chosen": -364.9082336425781,
138
+ "logps/rejected": -309.4717102050781,
139
+ "loss": 0.6129,
140
+ "rewards/accuracies": 0.737500011920929,
141
+ "rewards/chosen": -0.07667700946331024,
142
+ "rewards/margins": 0.23987922072410583,
143
+ "rewards/rejected": -0.3165562152862549,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.18838304552590268,
148
+ "grad_norm": 12.643373189764793,
149
  "learning_rate": 3.9155114477557926e-07,
150
+ "logits/chosen": -2.7849178314208984,
151
+ "logits/rejected": -2.748291492462158,
152
+ "logps/chosen": -287.956787109375,
153
+ "logps/rejected": -340.713134765625,
154
+ "loss": 0.5971,
155
+ "rewards/accuracies": 0.699999988079071,
156
+ "rewards/chosen": -0.24098113179206848,
157
+ "rewards/margins": 0.3181685507297516,
158
+ "rewards/rejected": -0.5591496825218201,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.20931449502878074,
163
+ "grad_norm": 13.623537820936637,
164
  "learning_rate": 3.598859066780754e-07,
165
+ "logits/chosen": -2.76792311668396,
166
+ "logits/rejected": -2.7459118366241455,
167
+ "logps/chosen": -346.0640563964844,
168
+ "logps/rejected": -348.5198059082031,
169
+ "loss": 0.6098,
170
+ "rewards/accuracies": 0.71875,
171
+ "rewards/chosen": -0.2652357220649719,
172
+ "rewards/margins": 0.34126365184783936,
173
+ "rewards/rejected": -0.6064993739128113,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.2302459445316588,
178
+ "grad_norm": 15.41884704737969,
179
  "learning_rate": 3.2585674142717477e-07,
180
+ "logits/chosen": -2.7856485843658447,
181
+ "logits/rejected": -2.7417104244232178,
182
+ "logps/chosen": -335.01116943359375,
183
+ "logps/rejected": -349.71392822265625,
184
+ "loss": 0.6003,
185
+ "rewards/accuracies": 0.6312500238418579,
186
+ "rewards/chosen": -0.32472461462020874,
187
+ "rewards/margins": 0.3165324330329895,
188
+ "rewards/rejected": -0.6412570476531982,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.25117739403453687,
193
+ "grad_norm": 15.439444003212843,
194
  "learning_rate": 2.9019570347986706e-07,
195
+ "logits/chosen": -2.710737705230713,
196
+ "logits/rejected": -2.718116283416748,
197
+ "logps/chosen": -330.0270690917969,
198
+ "logps/rejected": -354.4695739746094,
199
+ "loss": 0.5832,
200
+ "rewards/accuracies": 0.762499988079071,
201
+ "rewards/chosen": -0.3180462718009949,
202
+ "rewards/margins": 0.3863913416862488,
203
+ "rewards/rejected": -0.7044375538825989,
204
  "step": 120
205
  },
206
  {
207
  "epoch": 0.272108843537415,
208
+ "grad_norm": 42.02069053016682,
209
  "learning_rate": 2.536699530523291e-07,
210
+ "logits/chosen": -2.7691874504089355,
211
+ "logits/rejected": -2.752965211868286,
212
+ "logps/chosen": -347.0602722167969,
213
+ "logps/rejected": -357.85504150390625,
214
+ "loss": 0.5817,
215
+ "rewards/accuracies": 0.668749988079071,
216
+ "rewards/chosen": -0.2763240337371826,
217
+ "rewards/margins": 0.27957138419151306,
218
+ "rewards/rejected": -0.5558954477310181,
219
  "step": 130
220
  },
221
  {
222
  "epoch": 0.29304029304029305,
223
+ "grad_norm": 24.79876226607902,
224
  "learning_rate": 2.1706525253979534e-07,
225
+ "logits/chosen": -2.76993989944458,
226
+ "logits/rejected": -2.750406265258789,
227
+ "logps/chosen": -360.31005859375,
228
+ "logps/rejected": -362.851806640625,
229
+ "loss": 0.588,
230
+ "rewards/accuracies": 0.7124999761581421,
231
+ "rewards/chosen": -0.26679104566574097,
232
+ "rewards/margins": 0.46773427724838257,
233
+ "rewards/rejected": -0.7345253229141235,
234
  "step": 140
235
  },
236
  {
237
  "epoch": 0.3139717425431711,
238
+ "grad_norm": 22.26447349378322,
239
  "learning_rate": 1.8116906275593507e-07,
240
+ "logits/chosen": -2.7364563941955566,
241
+ "logits/rejected": -2.7079169750213623,
242
+ "logps/chosen": -351.19189453125,
243
+ "logps/rejected": -349.318603515625,
244
+ "loss": 0.5755,
245
+ "rewards/accuracies": 0.6937500238418579,
246
+ "rewards/chosen": -0.43332210183143616,
247
+ "rewards/margins": 0.3800078332424164,
248
+ "rewards/rejected": -0.8133300542831421,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.3349031920460492,
253
+ "grad_norm": 26.67231794642688,
254
  "learning_rate": 1.4675360263490295e-07,
255
+ "logits/chosen": -2.730668544769287,
256
+ "logits/rejected": -2.683061122894287,
257
+ "logps/chosen": -329.1626281738281,
258
+ "logps/rejected": -350.14288330078125,
259
+ "loss": 0.5765,
260
+ "rewards/accuracies": 0.699999988079071,
261
+ "rewards/chosen": -0.4029548764228821,
262
+ "rewards/margins": 0.5021006464958191,
263
+ "rewards/rejected": -0.9050555229187012,
264
  "step": 160
265
  },
266
  {
267
  "epoch": 0.35583464154892724,
268
+ "grad_norm": 20.223571694588983,
269
  "learning_rate": 1.1455923682523475e-07,
270
+ "logits/chosen": -2.532707691192627,
271
+ "logits/rejected": -2.512026309967041,
272
+ "logps/chosen": -324.9989929199219,
273
+ "logps/rejected": -334.66888427734375,
274
+ "loss": 0.5755,
275
+ "rewards/accuracies": 0.675000011920929,
276
+ "rewards/chosen": -0.36262771487236023,
277
+ "rewards/margins": 0.39737457036972046,
278
+ "rewards/rejected": -0.7600023150444031,
279
  "step": 170
280
  },
281
  {
282
  "epoch": 0.37676609105180536,
283
+ "grad_norm": 18.84156651008044,
284
  "learning_rate": 8.527854855097224e-08,
285
+ "logits/chosen": -2.5160155296325684,
286
+ "logits/rejected": -2.4737634658813477,
287
+ "logps/chosen": -328.61224365234375,
288
+ "logps/rejected": -344.9612121582031,
289
+ "loss": 0.5499,
290
+ "rewards/accuracies": 0.699999988079071,
291
+ "rewards/chosen": -0.454254150390625,
292
+ "rewards/margins": 0.40306931734085083,
293
+ "rewards/rejected": -0.8573234677314758,
294
  "step": 180
295
  },
296
  {
297
  "epoch": 0.3976975405546834,
298
+ "grad_norm": 32.06520543659375,
299
  "learning_rate": 5.9541440373546445e-08,
300
+ "logits/chosen": -2.3760242462158203,
301
+ "logits/rejected": -2.352184772491455,
302
+ "logps/chosen": -323.0282897949219,
303
+ "logps/rejected": -362.2243957519531,
304
+ "loss": 0.5613,
305
+ "rewards/accuracies": 0.6499999761581421,
306
+ "rewards/chosen": -0.5423271059989929,
307
+ "rewards/margins": 0.3743038773536682,
308
+ "rewards/rejected": -0.9166310429573059,
309
  "step": 190
310
  },
311
  {
312
  "epoch": 0.4186289900575615,
313
+ "grad_norm": 23.15044859855663,
314
  "learning_rate": 3.790158337517127e-08,
315
+ "logits/chosen": -2.311206817626953,
316
+ "logits/rejected": -2.2949588298797607,
317
+ "logps/chosen": -342.80731201171875,
318
+ "logps/rejected": -394.5885314941406,
319
+ "loss": 0.5711,
320
  "rewards/accuracies": 0.6937500238418579,
321
+ "rewards/chosen": -0.5416313409805298,
322
+ "rewards/margins": 0.41420310735702515,
323
+ "rewards/rejected": -0.9558345079421997,
324
  "step": 200
325
  },
326
  {
327
  "epoch": 0.43956043956043955,
328
+ "grad_norm": 19.06533234677666,
329
  "learning_rate": 2.0824506276503894e-08,
330
+ "logits/chosen": -2.3925139904022217,
331
+ "logits/rejected": -2.268831491470337,
332
+ "logps/chosen": -395.4692077636719,
333
+ "logps/rejected": -360.0453186035156,
334
+ "loss": 0.5805,
335
+ "rewards/accuracies": 0.7124999761581421,
336
+ "rewards/chosen": -0.5327693223953247,
337
+ "rewards/margins": 0.4689061641693115,
338
+ "rewards/rejected": -1.0016754865646362,
339
  "step": 210
340
  },
341
  {
342
  "epoch": 0.4604918890633176,
343
+ "grad_norm": 24.03298915942009,
344
  "learning_rate": 8.677580722139671e-09,
345
+ "logits/chosen": -2.316939353942871,
346
+ "logits/rejected": -2.2927820682525635,
347
+ "logps/chosen": -342.931640625,
348
+ "logps/rejected": -364.62982177734375,
349
+ "loss": 0.5674,
350
+ "rewards/accuracies": 0.699999988079071,
351
+ "rewards/chosen": -0.501736581325531,
352
+ "rewards/margins": 0.49570217728614807,
353
+ "rewards/rejected": -0.9974387884140015,
354
  "step": 220
355
  },
356
  {
357
  "epoch": 0.48142333856619574,
358
+ "grad_norm": 19.987346780573713,
359
  "learning_rate": 1.722118176089915e-09,
360
+ "logits/chosen": -2.4182724952697754,
361
+ "logits/rejected": -2.337088108062744,
362
+ "logps/chosen": -337.9970703125,
363
+ "logps/rejected": -381.84320068359375,
364
+ "loss": 0.5667,
365
+ "rewards/accuracies": 0.75,
366
+ "rewards/chosen": -0.39490121603012085,
367
+ "rewards/margins": 0.7370277643203735,
368
+ "rewards/rejected": -1.1319290399551392,
369
  "step": 230
370
  },
371
  {
372
  "epoch": 0.4981684981684982,
373
+ "eval_logits/chosen": -2.3480114936828613,
374
+ "eval_logits/rejected": -2.3013908863067627,
375
+ "eval_logps/chosen": -315.5382385253906,
376
+ "eval_logps/rejected": -372.7966003417969,
377
+ "eval_loss": 0.5668980479240417,
378
+ "eval_rewards/accuracies": 0.74609375,
379
+ "eval_rewards/chosen": -0.4034212827682495,
380
+ "eval_rewards/margins": 0.5594114065170288,
381
+ "eval_rewards/rejected": -0.9628326892852783,
382
+ "eval_runtime": 171.8973,
383
+ "eval_samples_per_second": 11.635,
384
+ "eval_steps_per_second": 0.186,
385
  "step": 238
386
  },
387
  {
388
  "epoch": 0.4981684981684982,
389
  "step": 238,
390
  "total_flos": 0.0,
391
+ "train_loss": 0.6061525304778284,
392
+ "train_runtime": 7204.502,
393
+ "train_samples_per_second": 4.228,
394
  "train_steps_per_second": 0.033
395
  }
396
  ],
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:13885dc2cfddf0eb7080d5bad9f264e574a65e3f8468c00bc6a65382edb01768
3
  size 7672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a917c64a8bc731aa9c248cac3f53adb5e79283709a6de18e8be835bb3be98d41
3
  size 7672