EleanorZzz commited on
Commit
1fd2c9c
·
verified ·
1 Parent(s): 5d5550f

End of training

Browse files
Files changed (5) hide show
  1. README.md +1 -1
  2. all_results.json +6 -6
  3. train_results.json +6 -6
  4. trainer_state.json +51 -121
  5. training_loss.png +0 -0
README.md CHANGED
@@ -16,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # persuasion_simulation_qwen2.5_7b_sft_w_promp_3epochs
18
 
19
- This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) on an unknown dataset.
20
 
21
  ## Model description
22
 
 
16
 
17
  # persuasion_simulation_qwen2.5_7b_sft_w_promp_3epochs
18
 
19
+ This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) on the persuasion_simulation dataset.
20
 
21
  ## Model description
22
 
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 4.908108108108108,
3
- "total_flos": 8436467998720.0,
4
- "train_loss": 0.7619479303774627,
5
- "train_runtime": 1795.6366,
6
- "train_samples_per_second": 2.058,
7
- "train_steps_per_second": 0.128
8
  }
 
1
  {
2
+ "epoch": 2.9513513513513514,
3
+ "total_flos": 5071563390976.0,
4
+ "train_loss": 0.982524781987287,
5
+ "train_runtime": 1059.4142,
6
+ "train_samples_per_second": 2.093,
7
+ "train_steps_per_second": 0.13
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 4.908108108108108,
3
- "total_flos": 8436467998720.0,
4
- "train_loss": 0.7619479303774627,
5
- "train_runtime": 1795.6366,
6
- "train_samples_per_second": 2.058,
7
- "train_steps_per_second": 0.128
8
  }
 
1
  {
2
+ "epoch": 2.9513513513513514,
3
+ "total_flos": 5071563390976.0,
4
+ "train_loss": 0.982524781987287,
5
+ "train_runtime": 1059.4142,
6
+ "train_samples_per_second": 2.093,
7
+ "train_steps_per_second": 0.13
8
  }
trainer_state.json CHANGED
@@ -2,188 +2,118 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 4.908108108108108,
6
  "eval_steps": 500,
7
- "global_step": 230,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
  "epoch": 0.21621621621621623,
14
- "grad_norm": 8.280130797797561,
15
- "learning_rate": 4.347826086956522e-06,
16
- "loss": 2.7155,
17
  "step": 10
18
  },
19
  {
20
  "epoch": 0.43243243243243246,
21
- "grad_norm": 2.4715524570417173,
22
- "learning_rate": 8.695652173913044e-06,
23
- "loss": 1.3465,
24
  "step": 20
25
  },
26
  {
27
  "epoch": 0.6486486486486487,
28
- "grad_norm": 2.099989136279864,
29
- "learning_rate": 9.971810547786794e-06,
30
- "loss": 1.0712,
31
  "step": 30
32
  },
33
  {
34
  "epoch": 0.8648648648648649,
35
- "grad_norm": 1.8926473625262032,
36
- "learning_rate": 9.834504404631032e-06,
37
- "loss": 1.0268,
38
  "step": 40
39
  },
40
  {
41
  "epoch": 1.0648648648648649,
42
- "grad_norm": 2.064121413621776,
43
- "learning_rate": 9.586056507527266e-06,
44
- "loss": 0.9506,
45
  "step": 50
46
  },
47
  {
48
  "epoch": 1.281081081081081,
49
- "grad_norm": 1.8094825922438424,
50
- "learning_rate": 9.232178493644006e-06,
51
- "loss": 0.8562,
52
  "step": 60
53
  },
54
  {
55
  "epoch": 1.4972972972972973,
56
- "grad_norm": 2.050095654745026,
57
- "learning_rate": 8.781005762156593e-06,
58
- "loss": 0.8412,
59
  "step": 70
60
  },
61
  {
62
  "epoch": 1.7135135135135136,
63
- "grad_norm": 1.8352865781006955,
64
- "learning_rate": 8.24291044731378e-06,
65
- "loss": 0.8523,
66
  "step": 80
67
  },
68
  {
69
  "epoch": 1.9297297297297298,
70
- "grad_norm": 1.9667662631699199,
71
- "learning_rate": 7.630262970585355e-06,
72
- "loss": 0.8436,
73
  "step": 90
74
  },
75
  {
76
  "epoch": 2.1297297297297297,
77
- "grad_norm": 1.7211651987438827,
78
- "learning_rate": 6.957147653634198e-06,
79
- "loss": 0.7678,
80
  "step": 100
81
  },
82
  {
83
  "epoch": 2.345945945945946,
84
- "grad_norm": 1.7813862468072899,
85
- "learning_rate": 6.2390389299645e-06,
86
- "loss": 0.6588,
87
  "step": 110
88
  },
89
  {
90
  "epoch": 2.562162162162162,
91
- "grad_norm": 1.874299390396019,
92
- "learning_rate": 5.492445598905843e-06,
93
- "loss": 0.657,
94
  "step": 120
95
  },
96
  {
97
  "epoch": 2.7783783783783784,
98
- "grad_norm": 1.7516461345516408,
99
- "learning_rate": 4.7345313002762545e-06,
100
- "loss": 0.6495,
101
  "step": 130
102
  },
103
  {
104
- "epoch": 2.9945945945945946,
105
- "grad_norm": 1.700560828577539,
106
- "learning_rate": 3.982719934736832e-06,
107
- "loss": 0.6521,
108
- "step": 140
109
- },
110
- {
111
- "epoch": 3.1945945945945944,
112
- "grad_norm": 2.4844933595663345,
113
- "learning_rate": 3.2542951009381584e-06,
114
- "loss": 0.5145,
115
- "step": 150
116
- },
117
- {
118
- "epoch": 3.410810810810811,
119
- "grad_norm": 1.788000241563165,
120
- "learning_rate": 2.566002758108256e-06,
121
- "loss": 0.4596,
122
- "step": 160
123
- },
124
- {
125
- "epoch": 3.627027027027027,
126
- "grad_norm": 1.7733098076103213,
127
- "learning_rate": 1.933666248581418e-06,
128
- "loss": 0.447,
129
- "step": 170
130
- },
131
- {
132
- "epoch": 3.8432432432432435,
133
- "grad_norm": 1.88196318426097,
134
- "learning_rate": 1.3718225306210049e-06,
135
- "loss": 0.4576,
136
- "step": 180
137
- },
138
- {
139
- "epoch": 4.043243243243243,
140
- "grad_norm": 1.9793294606414797,
141
- "learning_rate": 8.933879842801558e-07,
142
- "loss": 0.4278,
143
- "step": 190
144
- },
145
- {
146
- "epoch": 4.2594594594594595,
147
- "grad_norm": 2.043336421318033,
148
- "learning_rate": 5.0936147318152e-07,
149
- "loss": 0.3384,
150
- "step": 200
151
- },
152
- {
153
- "epoch": 4.475675675675676,
154
- "grad_norm": 1.9227579071071994,
155
- "learning_rate": 2.2857148861060552e-07,
156
- "loss": 0.3365,
157
- "step": 210
158
- },
159
- {
160
- "epoch": 4.691891891891892,
161
- "grad_norm": 1.8184136523064685,
162
- "learning_rate": 5.747318889684883e-08,
163
- "loss": 0.3239,
164
- "step": 220
165
- },
166
- {
167
- "epoch": 4.908108108108108,
168
- "grad_norm": 1.7870900401086147,
169
- "learning_rate": 0.0,
170
- "loss": 0.3306,
171
- "step": 230
172
- },
173
- {
174
- "epoch": 4.908108108108108,
175
- "step": 230,
176
- "total_flos": 8436467998720.0,
177
- "train_loss": 0.7619479303774627,
178
- "train_runtime": 1795.6366,
179
- "train_samples_per_second": 2.058,
180
- "train_steps_per_second": 0.128
181
  }
182
  ],
183
  "logging_steps": 10,
184
- "max_steps": 230,
185
  "num_input_tokens_seen": 0,
186
- "num_train_epochs": 5,
187
  "save_steps": 500,
188
  "stateful_callbacks": {
189
  "TrainerControl": {
@@ -197,7 +127,7 @@
197
  "attributes": {}
198
  }
199
  },
200
- "total_flos": 8436467998720.0,
201
  "train_batch_size": 1,
202
  "trial_name": null,
203
  "trial_params": null
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 2.9513513513513514,
6
  "eval_steps": 500,
7
+ "global_step": 138,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
  "epoch": 0.21621621621621623,
14
+ "grad_norm": 7.197075999482914,
15
+ "learning_rate": 7.1428571428571436e-06,
16
+ "loss": 2.5723,
17
  "step": 10
18
  },
19
  {
20
  "epoch": 0.43243243243243246,
21
+ "grad_norm": 2.225334810414002,
22
+ "learning_rate": 9.942341621640558e-06,
23
+ "loss": 1.204,
24
  "step": 20
25
  },
26
  {
27
  "epoch": 0.6486486486486487,
28
+ "grad_norm": 2.124134707404298,
29
+ "learning_rate": 9.594789058101154e-06,
30
+ "loss": 1.0538,
31
  "step": 30
32
  },
33
  {
34
  "epoch": 0.8648648648648649,
35
+ "grad_norm": 1.8972052525289154,
36
+ "learning_rate": 8.953878684688492e-06,
37
+ "loss": 1.0201,
38
  "step": 40
39
  },
40
  {
41
  "epoch": 1.0648648648648649,
42
+ "grad_norm": 2.050396090669739,
43
+ "learning_rate": 8.060529912738316e-06,
44
+ "loss": 0.9428,
45
  "step": 50
46
  },
47
  {
48
  "epoch": 1.281081081081081,
49
+ "grad_norm": 1.711788073870433,
50
+ "learning_rate": 6.971779275566593e-06,
51
+ "loss": 0.8491,
52
  "step": 60
53
  },
54
  {
55
  "epoch": 1.4972972972972973,
56
+ "grad_norm": 2.0490846868576202,
57
+ "learning_rate": 5.757138887522884e-06,
58
+ "loss": 0.8344,
59
  "step": 70
60
  },
61
  {
62
  "epoch": 1.7135135135135136,
63
+ "grad_norm": 1.7041228428337094,
64
+ "learning_rate": 4.49415839006284e-06,
65
+ "loss": 0.8397,
66
  "step": 80
67
  },
68
  {
69
  "epoch": 1.9297297297297298,
70
+ "grad_norm": 1.9251426434335601,
71
+ "learning_rate": 3.2634737357758994e-06,
72
+ "loss": 0.8296,
73
  "step": 90
74
  },
75
  {
76
  "epoch": 2.1297297297297297,
77
+ "grad_norm": 1.7433702920708603,
78
+ "learning_rate": 2.1436589245260375e-06,
79
+ "loss": 0.7802,
80
  "step": 100
81
  },
82
  {
83
  "epoch": 2.345945945945946,
84
+ "grad_norm": 1.8519573766604274,
85
+ "learning_rate": 1.2062093865360458e-06,
86
+ "loss": 0.7006,
87
  "step": 110
88
  },
89
  {
90
  "epoch": 2.562162162162162,
91
+ "grad_norm": 1.6373359351623393,
92
+ "learning_rate": 5.109773021462921e-07,
93
+ "loss": 0.6986,
94
  "step": 120
95
  },
96
  {
97
  "epoch": 2.7783783783783784,
98
+ "grad_norm": 1.6305332102388383,
99
+ "learning_rate": 1.0235029373752758e-07,
100
+ "loss": 0.6844,
101
  "step": 130
102
  },
103
  {
104
+ "epoch": 2.9513513513513514,
105
+ "step": 138,
106
+ "total_flos": 5071563390976.0,
107
+ "train_loss": 0.982524781987287,
108
+ "train_runtime": 1059.4142,
109
+ "train_samples_per_second": 2.093,
110
+ "train_steps_per_second": 0.13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  }
112
  ],
113
  "logging_steps": 10,
114
+ "max_steps": 138,
115
  "num_input_tokens_seen": 0,
116
+ "num_train_epochs": 3,
117
  "save_steps": 500,
118
  "stateful_callbacks": {
119
  "TrainerControl": {
 
127
  "attributes": {}
128
  }
129
  },
130
+ "total_flos": 5071563390976.0,
131
  "train_batch_size": 1,
132
  "trial_name": null,
133
  "trial_params": null
training_loss.png CHANGED