Incomple commited on
Commit
ec49c99
·
verified ·
1 Parent(s): 340b00b

End of training

Browse files
README.md CHANGED
@@ -3,9 +3,10 @@ library_name: peft
3
  license: llama3.1
4
  base_model: meta-llama/Llama-3.1-8B-Instruct
5
  tags:
 
 
6
  - trl
7
  - dpo
8
- - llama-factory
9
  - generated_from_trainer
10
  model-index:
11
  - name: Llama-3.1-8B-Instruct_holistic_20
@@ -17,7 +18,7 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # Llama-3.1-8B-Instruct_holistic_20
19
 
20
- This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on an unknown dataset.
21
 
22
  ## Model description
23
 
 
3
  license: llama3.1
4
  base_model: meta-llama/Llama-3.1-8B-Instruct
5
  tags:
6
+ - llama-factory
7
+ - lora
8
  - trl
9
  - dpo
 
10
  - generated_from_trainer
11
  model-index:
12
  - name: Llama-3.1-8B-Instruct_holistic_20
 
18
 
19
  # Llama-3.1-8B-Instruct_holistic_20
20
 
21
+ This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on the holistic_20 dataset.
22
 
23
  ## Model description
24
 
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9997056226081837,
3
+ "total_flos": 1.5461894723437855e+18,
4
+ "train_loss": 0.1253820424179119,
5
+ "train_runtime": 39602.0987,
6
+ "train_samples_per_second": 0.515,
7
+ "train_steps_per_second": 0.064
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9997056226081837,
3
+ "total_flos": 1.5461894723437855e+18,
4
+ "train_loss": 0.1253820424179119,
5
+ "train_runtime": 39602.0987,
6
+ "train_samples_per_second": 0.515,
7
+ "train_steps_per_second": 0.064
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9997056226081837,
5
+ "eval_steps": 500,
6
+ "global_step": 2547,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05024040820331665,
13
+ "grad_norm": 6.086460590362549,
14
+ "learning_rate": 5.019607843137255e-07,
15
+ "logits/chosen": -0.5333898067474365,
16
+ "logits/rejected": -0.47814399003982544,
17
+ "logps/chosen": -75.330810546875,
18
+ "logps/rejected": -13.868017196655273,
19
+ "loss": 0.6901,
20
+ "rewards/accuracies": 0.5107421875,
21
+ "rewards/chosen": 0.005103742238134146,
22
+ "rewards/margins": 0.0064795538783073425,
23
+ "rewards/rejected": -0.0013758119894191623,
24
+ "step": 128
25
+ },
26
+ {
27
+ "epoch": 0.1004808164066333,
28
+ "grad_norm": 3.6039109230041504,
29
+ "learning_rate": 9.995636998254798e-07,
30
+ "logits/chosen": -0.5320765972137451,
31
+ "logits/rejected": -0.466244101524353,
32
+ "logps/chosen": -74.39834594726562,
33
+ "logps/rejected": -12.819503784179688,
34
+ "loss": 0.6417,
35
+ "rewards/accuracies": 0.8037109375,
36
+ "rewards/chosen": 0.10177542269229889,
37
+ "rewards/margins": 0.11421097815036774,
38
+ "rewards/rejected": -0.012435557320713997,
39
+ "step": 256
40
+ },
41
+ {
42
+ "epoch": 0.15072122460994997,
43
+ "grad_norm": 2.8107080459594727,
44
+ "learning_rate": 9.43717277486911e-07,
45
+ "logits/chosen": -0.5339048504829407,
46
+ "logits/rejected": -0.475521981716156,
47
+ "logps/chosen": -67.63614654541016,
48
+ "logps/rejected": -14.525527954101562,
49
+ "loss": 0.4265,
50
+ "rewards/accuracies": 0.83984375,
51
+ "rewards/chosen": 0.691648006439209,
52
+ "rewards/margins": 0.9047117233276367,
53
+ "rewards/rejected": -0.21306368708610535,
54
+ "step": 384
55
+ },
56
+ {
57
+ "epoch": 0.2009616328132666,
58
+ "grad_norm": 7.228174686431885,
59
+ "learning_rate": 8.87870855148342e-07,
60
+ "logits/chosen": -0.5555779337882996,
61
+ "logits/rejected": -0.5105082988739014,
62
+ "logps/chosen": -59.40736770629883,
63
+ "logps/rejected": -20.558443069458008,
64
+ "loss": 0.2512,
65
+ "rewards/accuracies": 0.9453125,
66
+ "rewards/chosen": 1.2393351793289185,
67
+ "rewards/margins": 1.9693442583084106,
68
+ "rewards/rejected": -0.7300090193748474,
69
+ "step": 512
70
+ },
71
+ {
72
+ "epoch": 0.2512020410165833,
73
+ "grad_norm": 6.92218542098999,
74
+ "learning_rate": 8.320244328097731e-07,
75
+ "logits/chosen": -0.5576226115226746,
76
+ "logits/rejected": -0.5113691091537476,
77
+ "logps/chosen": -60.53861618041992,
78
+ "logps/rejected": -32.28628921508789,
79
+ "loss": 0.0926,
80
+ "rewards/accuracies": 0.98828125,
81
+ "rewards/chosen": 1.4149188995361328,
82
+ "rewards/margins": 3.2507734298706055,
83
+ "rewards/rejected": -1.8358546495437622,
84
+ "step": 640
85
+ },
86
+ {
87
+ "epoch": 0.30144244921989993,
88
+ "grad_norm": 0.33116015791893005,
89
+ "learning_rate": 7.761780104712042e-07,
90
+ "logits/chosen": -0.5273003578186035,
91
+ "logits/rejected": -0.4682571291923523,
92
+ "logps/chosen": -59.363250732421875,
93
+ "logps/rejected": -40.3122444152832,
94
+ "loss": 0.0343,
95
+ "rewards/accuracies": 0.9921875,
96
+ "rewards/chosen": 1.5962406396865845,
97
+ "rewards/margins": 4.371469020843506,
98
+ "rewards/rejected": -2.77522873878479,
99
+ "step": 768
100
+ },
101
+ {
102
+ "epoch": 0.3516828574232166,
103
+ "grad_norm": 0.6943575143814087,
104
+ "learning_rate": 7.203315881326351e-07,
105
+ "logits/chosen": -0.48832786083221436,
106
+ "logits/rejected": -0.42331841588020325,
107
+ "logps/chosen": -56.96063232421875,
108
+ "logps/rejected": -44.680931091308594,
109
+ "loss": 0.0303,
110
+ "rewards/accuracies": 0.990234375,
111
+ "rewards/chosen": 1.5980381965637207,
112
+ "rewards/margins": 4.763439178466797,
113
+ "rewards/rejected": -3.165400981903076,
114
+ "step": 896
115
+ },
116
+ {
117
+ "epoch": 0.4019232656265332,
118
+ "grad_norm": 0.07658185809850693,
119
+ "learning_rate": 6.644851657940663e-07,
120
+ "logits/chosen": -0.4915071725845337,
121
+ "logits/rejected": -0.4179531931877136,
122
+ "logps/chosen": -54.62388610839844,
123
+ "logps/rejected": -47.38861846923828,
124
+ "loss": 0.0313,
125
+ "rewards/accuracies": 0.990234375,
126
+ "rewards/chosen": 1.6683292388916016,
127
+ "rewards/margins": 5.199033737182617,
128
+ "rewards/rejected": -3.5307040214538574,
129
+ "step": 1024
130
+ },
131
+ {
132
+ "epoch": 0.45216367382984984,
133
+ "grad_norm": 0.13849562406539917,
134
+ "learning_rate": 6.086387434554974e-07,
135
+ "logits/chosen": -0.4723713994026184,
136
+ "logits/rejected": -0.38997870683670044,
137
+ "logps/chosen": -56.00676727294922,
138
+ "logps/rejected": -48.90182876586914,
139
+ "loss": 0.0423,
140
+ "rewards/accuracies": 0.982421875,
141
+ "rewards/chosen": 1.662773609161377,
142
+ "rewards/margins": 5.358356475830078,
143
+ "rewards/rejected": -3.695582628250122,
144
+ "step": 1152
145
+ },
146
+ {
147
+ "epoch": 0.5024040820331666,
148
+ "grad_norm": 0.09821192175149918,
149
+ "learning_rate": 5.527923211169285e-07,
150
+ "logits/chosen": -0.4637073874473572,
151
+ "logits/rejected": -0.3821738064289093,
152
+ "logps/chosen": -56.77195739746094,
153
+ "logps/rejected": -53.689002990722656,
154
+ "loss": 0.0199,
155
+ "rewards/accuracies": 0.9921875,
156
+ "rewards/chosen": 1.7342756986618042,
157
+ "rewards/margins": 5.696838855743408,
158
+ "rewards/rejected": -3.9625630378723145,
159
+ "step": 1280
160
+ },
161
+ {
162
+ "epoch": 0.5526444902364832,
163
+ "grad_norm": 0.7402496933937073,
164
+ "learning_rate": 4.969458987783594e-07,
165
+ "logits/chosen": -0.4427691102027893,
166
+ "logits/rejected": -0.3622562885284424,
167
+ "logps/chosen": -58.04669189453125,
168
+ "logps/rejected": -54.428367614746094,
169
+ "loss": 0.0248,
170
+ "rewards/accuracies": 0.990234375,
171
+ "rewards/chosen": 1.715148687362671,
172
+ "rewards/margins": 5.825463771820068,
173
+ "rewards/rejected": -4.110315322875977,
174
+ "step": 1408
175
+ },
176
+ {
177
+ "epoch": 0.6028848984397999,
178
+ "grad_norm": 0.46330779790878296,
179
+ "learning_rate": 4.410994764397906e-07,
180
+ "logits/chosen": -0.43754109740257263,
181
+ "logits/rejected": -0.33878153562545776,
182
+ "logps/chosen": -57.78377914428711,
183
+ "logps/rejected": -54.78404235839844,
184
+ "loss": 0.0342,
185
+ "rewards/accuracies": 0.98828125,
186
+ "rewards/chosen": 1.7349567413330078,
187
+ "rewards/margins": 5.961076736450195,
188
+ "rewards/rejected": -4.2261199951171875,
189
+ "step": 1536
190
+ },
191
+ {
192
+ "epoch": 0.6531253066431165,
193
+ "grad_norm": 0.07320141792297363,
194
+ "learning_rate": 3.852530541012216e-07,
195
+ "logits/chosen": -0.4277573823928833,
196
+ "logits/rejected": -0.31976205110549927,
197
+ "logps/chosen": -54.81957244873047,
198
+ "logps/rejected": -55.96826171875,
199
+ "loss": 0.0183,
200
+ "rewards/accuracies": 0.9921875,
201
+ "rewards/chosen": 1.6823066473007202,
202
+ "rewards/margins": 6.112739562988281,
203
+ "rewards/rejected": -4.43043327331543,
204
+ "step": 1664
205
+ },
206
+ {
207
+ "epoch": 0.7033657148464332,
208
+ "grad_norm": 0.5408441424369812,
209
+ "learning_rate": 3.2940663176265273e-07,
210
+ "logits/chosen": -0.4249654710292816,
211
+ "logits/rejected": -0.3083575367927551,
212
+ "logps/chosen": -55.527496337890625,
213
+ "logps/rejected": -58.188899993896484,
214
+ "loss": 0.013,
215
+ "rewards/accuracies": 0.994140625,
216
+ "rewards/chosen": 1.7380454540252686,
217
+ "rewards/margins": 6.380348205566406,
218
+ "rewards/rejected": -4.642302989959717,
219
+ "step": 1792
220
+ },
221
+ {
222
+ "epoch": 0.7536061230497498,
223
+ "grad_norm": 0.04812052845954895,
224
+ "learning_rate": 2.7356020942408376e-07,
225
+ "logits/chosen": -0.4335082471370697,
226
+ "logits/rejected": -0.3139256536960602,
227
+ "logps/chosen": -54.48476028442383,
228
+ "logps/rejected": -60.73431396484375,
229
+ "loss": 0.0241,
230
+ "rewards/accuracies": 0.98828125,
231
+ "rewards/chosen": 1.6781396865844727,
232
+ "rewards/margins": 6.47487735748291,
233
+ "rewards/rejected": -4.7967376708984375,
234
+ "step": 1920
235
+ },
236
+ {
237
+ "epoch": 0.8038465312530664,
238
+ "grad_norm": 0.055905986577272415,
239
+ "learning_rate": 2.1771378708551484e-07,
240
+ "logits/chosen": -0.3921366333961487,
241
+ "logits/rejected": -0.2739506959915161,
242
+ "logps/chosen": -56.08161544799805,
243
+ "logps/rejected": -62.1962890625,
244
+ "loss": 0.0297,
245
+ "rewards/accuracies": 0.986328125,
246
+ "rewards/chosen": 1.7258753776550293,
247
+ "rewards/margins": 6.677608489990234,
248
+ "rewards/rejected": -4.951733589172363,
249
+ "step": 2048
250
+ },
251
+ {
252
+ "epoch": 0.854086939456383,
253
+ "grad_norm": 0.1123403012752533,
254
+ "learning_rate": 1.618673647469459e-07,
255
+ "logits/chosen": -0.4205915331840515,
256
+ "logits/rejected": -0.31026044487953186,
257
+ "logps/chosen": -56.832523345947266,
258
+ "logps/rejected": -63.64851760864258,
259
+ "loss": 0.0154,
260
+ "rewards/accuracies": 0.9921875,
261
+ "rewards/chosen": 1.7784671783447266,
262
+ "rewards/margins": 6.823487281799316,
263
+ "rewards/rejected": -5.04502010345459,
264
+ "step": 2176
265
+ },
266
+ {
267
+ "epoch": 0.9043273476596997,
268
+ "grad_norm": 0.17749281227588654,
269
+ "learning_rate": 1.0602094240837696e-07,
270
+ "logits/chosen": -0.3997223973274231,
271
+ "logits/rejected": -0.2778712511062622,
272
+ "logps/chosen": -55.459022521972656,
273
+ "logps/rejected": -63.700008392333984,
274
+ "loss": 0.0313,
275
+ "rewards/accuracies": 0.9853515625,
276
+ "rewards/chosen": 1.6784813404083252,
277
+ "rewards/margins": 6.7875847816467285,
278
+ "rewards/rejected": -5.109103202819824,
279
+ "step": 2304
280
+ },
281
+ {
282
+ "epoch": 0.9545677558630163,
283
+ "grad_norm": 0.04166420176625252,
284
+ "learning_rate": 5.0174520069808025e-08,
285
+ "logits/chosen": -0.35927632451057434,
286
+ "logits/rejected": -0.23961657285690308,
287
+ "logps/chosen": -55.96198272705078,
288
+ "logps/rejected": -64.85810089111328,
289
+ "loss": 0.0223,
290
+ "rewards/accuracies": 0.9921875,
291
+ "rewards/chosen": 1.656056523323059,
292
+ "rewards/margins": 6.852350234985352,
293
+ "rewards/rejected": -5.196293830871582,
294
+ "step": 2432
295
+ },
296
+ {
297
+ "epoch": 0.9997056226081837,
298
+ "step": 2547,
299
+ "total_flos": 1.5461894723437855e+18,
300
+ "train_loss": 0.1253820424179119,
301
+ "train_runtime": 39602.0987,
302
+ "train_samples_per_second": 0.515,
303
+ "train_steps_per_second": 0.064
304
+ }
305
+ ],
306
+ "logging_steps": 128,
307
+ "max_steps": 2547,
308
+ "num_input_tokens_seen": 0,
309
+ "num_train_epochs": 1,
310
+ "save_steps": 500,
311
+ "stateful_callbacks": {
312
+ "TrainerControl": {
313
+ "args": {
314
+ "should_epoch_stop": false,
315
+ "should_evaluate": false,
316
+ "should_log": false,
317
+ "should_save": true,
318
+ "should_training_stop": true
319
+ },
320
+ "attributes": {}
321
+ }
322
+ },
323
+ "total_flos": 1.5461894723437855e+18,
324
+ "train_batch_size": 2,
325
+ "trial_name": null,
326
+ "trial_params": null
327
+ }
training_loss.png ADDED
training_rewards_accuracies.png ADDED