|
{ |
|
"best_metric": 0.06684371829032898, |
|
"best_model_checkpoint": "saves/psy-course-info-chain/Llama-3.1-8B-Instruct/train/fold1/checkpoint-120", |
|
"epoch": 4.938271604938271, |
|
"eval_steps": 10, |
|
"global_step": 125, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03950617283950617, |
|
"grad_norm": 1.7534302473068237, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 0.5276, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07901234567901234, |
|
"grad_norm": 1.6594399213790894, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 0.5322, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.11851851851851852, |
|
"grad_norm": 1.7843174934387207, |
|
"learning_rate": 2.307692307692308e-05, |
|
"loss": 0.5109, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1580246913580247, |
|
"grad_norm": 1.7626229524612427, |
|
"learning_rate": 3.0769230769230774e-05, |
|
"loss": 0.5394, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.19753086419753085, |
|
"grad_norm": 1.815011739730835, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.519, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.23703703703703705, |
|
"grad_norm": 1.5044406652450562, |
|
"learning_rate": 4.615384615384616e-05, |
|
"loss": 0.483, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.2765432098765432, |
|
"grad_norm": 1.7126368284225464, |
|
"learning_rate": 5.384615384615385e-05, |
|
"loss": 0.424, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.3160493827160494, |
|
"grad_norm": 1.967699646949768, |
|
"learning_rate": 6.153846153846155e-05, |
|
"loss": 0.3509, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.35555555555555557, |
|
"grad_norm": 1.2087763547897339, |
|
"learning_rate": 6.923076923076924e-05, |
|
"loss": 0.193, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.3950617283950617, |
|
"grad_norm": 0.7323130369186401, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 0.175, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3950617283950617, |
|
"eval_loss": 0.15156826376914978, |
|
"eval_runtime": 21.0352, |
|
"eval_samples_per_second": 2.139, |
|
"eval_steps_per_second": 2.139, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.4345679012345679, |
|
"grad_norm": 0.5891270637512207, |
|
"learning_rate": 8.461538461538461e-05, |
|
"loss": 0.1207, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.4740740740740741, |
|
"grad_norm": 0.6813968420028687, |
|
"learning_rate": 9.230769230769232e-05, |
|
"loss": 0.1954, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.5135802469135803, |
|
"grad_norm": 0.946879506111145, |
|
"learning_rate": 0.0001, |
|
"loss": 0.2548, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.5530864197530864, |
|
"grad_norm": 0.8726567029953003, |
|
"learning_rate": 9.998033131915266e-05, |
|
"loss": 0.1641, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5925925925925926, |
|
"grad_norm": 0.6376297473907471, |
|
"learning_rate": 9.992134075089084e-05, |
|
"loss": 0.1747, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.6320987654320988, |
|
"grad_norm": 0.5921506881713867, |
|
"learning_rate": 9.982307470588098e-05, |
|
"loss": 0.1838, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.671604938271605, |
|
"grad_norm": 0.6308391094207764, |
|
"learning_rate": 9.968561049466214e-05, |
|
"loss": 0.1716, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.7111111111111111, |
|
"grad_norm": 0.4433191418647766, |
|
"learning_rate": 9.950905626682228e-05, |
|
"loss": 0.1243, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.7506172839506173, |
|
"grad_norm": 0.5776513814926147, |
|
"learning_rate": 9.92935509259118e-05, |
|
"loss": 0.1369, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.7901234567901234, |
|
"grad_norm": 0.3834328055381775, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 0.099, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.7901234567901234, |
|
"eval_loss": 0.12395185977220535, |
|
"eval_runtime": 20.9205, |
|
"eval_samples_per_second": 2.151, |
|
"eval_steps_per_second": 2.151, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.8296296296296296, |
|
"grad_norm": 0.42168161273002625, |
|
"learning_rate": 9.874639560909117e-05, |
|
"loss": 0.1307, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.8691358024691358, |
|
"grad_norm": 0.3687320947647095, |
|
"learning_rate": 9.841517610611309e-05, |
|
"loss": 0.0919, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.908641975308642, |
|
"grad_norm": 0.3716530203819275, |
|
"learning_rate": 9.804586609725499e-05, |
|
"loss": 0.0851, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.9481481481481482, |
|
"grad_norm": 0.5560383796691895, |
|
"learning_rate": 9.763875613614482e-05, |
|
"loss": 0.1307, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.9876543209876543, |
|
"grad_norm": 0.4219168424606323, |
|
"learning_rate": 9.719416651541839e-05, |
|
"loss": 0.148, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.0271604938271606, |
|
"grad_norm": 0.6669564247131348, |
|
"learning_rate": 9.671244701472999e-05, |
|
"loss": 0.1859, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.0666666666666667, |
|
"grad_norm": 0.4102296531200409, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.1018, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.106172839506173, |
|
"grad_norm": 0.5062835812568665, |
|
"learning_rate": 9.563916325306594e-05, |
|
"loss": 0.129, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.145679012345679, |
|
"grad_norm": 0.45242568850517273, |
|
"learning_rate": 9.504844339512095e-05, |
|
"loss": 0.1037, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.1851851851851851, |
|
"grad_norm": 0.4103507697582245, |
|
"learning_rate": 9.442228179894362e-05, |
|
"loss": 0.0955, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.1851851851851851, |
|
"eval_loss": 0.09509595483541489, |
|
"eval_runtime": 20.9712, |
|
"eval_samples_per_second": 2.146, |
|
"eval_steps_per_second": 2.146, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.2246913580246914, |
|
"grad_norm": 0.4553764760494232, |
|
"learning_rate": 9.376117109543769e-05, |
|
"loss": 0.0872, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.2641975308641975, |
|
"grad_norm": 0.4687905013561249, |
|
"learning_rate": 9.306563141162046e-05, |
|
"loss": 0.1058, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.3037037037037038, |
|
"grad_norm": 0.48785629868507385, |
|
"learning_rate": 9.233620996141421e-05, |
|
"loss": 0.1469, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.34320987654321, |
|
"grad_norm": 0.3559998571872711, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 0.1027, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.382716049382716, |
|
"grad_norm": 0.30200159549713135, |
|
"learning_rate": 9.077804344796302e-05, |
|
"loss": 0.0761, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.4222222222222223, |
|
"grad_norm": 0.39950889348983765, |
|
"learning_rate": 8.995052426791247e-05, |
|
"loss": 0.1153, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.4617283950617284, |
|
"grad_norm": 0.33219078183174133, |
|
"learning_rate": 8.90915741234015e-05, |
|
"loss": 0.0844, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.5012345679012347, |
|
"grad_norm": 0.42346814274787903, |
|
"learning_rate": 8.820186879108038e-05, |
|
"loss": 0.102, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.5407407407407407, |
|
"grad_norm": 0.4340456426143646, |
|
"learning_rate": 8.728210824415827e-05, |
|
"loss": 0.1239, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.5802469135802468, |
|
"grad_norm": 0.3331095576286316, |
|
"learning_rate": 8.633301610170135e-05, |
|
"loss": 0.0741, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.5802469135802468, |
|
"eval_loss": 0.0872035026550293, |
|
"eval_runtime": 20.931, |
|
"eval_samples_per_second": 2.15, |
|
"eval_steps_per_second": 2.15, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.6197530864197531, |
|
"grad_norm": 0.4618953764438629, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.1074, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.6592592592592592, |
|
"grad_norm": 0.29720261693000793, |
|
"learning_rate": 8.434984630174509e-05, |
|
"loss": 0.0626, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.6987654320987655, |
|
"grad_norm": 0.49544522166252136, |
|
"learning_rate": 8.33173288976002e-05, |
|
"loss": 0.105, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.7382716049382716, |
|
"grad_norm": 0.4234393835067749, |
|
"learning_rate": 8.225859917710439e-05, |
|
"loss": 0.0895, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.7777777777777777, |
|
"grad_norm": 0.5573832392692566, |
|
"learning_rate": 8.117449009293668e-05, |
|
"loss": 0.1134, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.817283950617284, |
|
"grad_norm": 0.4032742977142334, |
|
"learning_rate": 8.006585456492029e-05, |
|
"loss": 0.0992, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.8567901234567903, |
|
"grad_norm": 0.29090389609336853, |
|
"learning_rate": 7.89335648089903e-05, |
|
"loss": 0.0357, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.8962962962962964, |
|
"grad_norm": 0.40429532527923584, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.0739, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.9358024691358025, |
|
"grad_norm": 0.49074608087539673, |
|
"learning_rate": 7.660160382576683e-05, |
|
"loss": 0.1451, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.9753086419753085, |
|
"grad_norm": 0.3378618359565735, |
|
"learning_rate": 7.540376726232648e-05, |
|
"loss": 0.0671, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.9753086419753085, |
|
"eval_loss": 0.07864316552877426, |
|
"eval_runtime": 20.956, |
|
"eval_samples_per_second": 2.147, |
|
"eval_steps_per_second": 2.147, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.0148148148148146, |
|
"grad_norm": 0.7849867939949036, |
|
"learning_rate": 7.4185944355262e-05, |
|
"loss": 0.1505, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 2.054320987654321, |
|
"grad_norm": 0.29336991906166077, |
|
"learning_rate": 7.294909322337689e-05, |
|
"loss": 0.0623, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 2.093827160493827, |
|
"grad_norm": 0.3683113157749176, |
|
"learning_rate": 7.169418695587791e-05, |
|
"loss": 0.0722, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 2.1333333333333333, |
|
"grad_norm": 0.44332262873649597, |
|
"learning_rate": 7.042221284679982e-05, |
|
"loss": 0.1282, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 2.1728395061728394, |
|
"grad_norm": 0.3715544044971466, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.0848, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.212345679012346, |
|
"grad_norm": 0.4191085696220398, |
|
"learning_rate": 6.783107663311565e-05, |
|
"loss": 0.0554, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 2.251851851851852, |
|
"grad_norm": 0.3467198610305786, |
|
"learning_rate": 6.651395309775837e-05, |
|
"loss": 0.0548, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.291358024691358, |
|
"grad_norm": 0.24871347844600677, |
|
"learning_rate": 6.518383725548074e-05, |
|
"loss": 0.0416, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 2.330864197530864, |
|
"grad_norm": 0.44350406527519226, |
|
"learning_rate": 6.384177557124247e-05, |
|
"loss": 0.0542, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.3703703703703702, |
|
"grad_norm": 0.27462178468704224, |
|
"learning_rate": 6.248882390836135e-05, |
|
"loss": 0.0422, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.3703703703703702, |
|
"eval_loss": 0.07898063957691193, |
|
"eval_runtime": 20.9796, |
|
"eval_samples_per_second": 2.145, |
|
"eval_steps_per_second": 2.145, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.4098765432098768, |
|
"grad_norm": 0.3473226726055145, |
|
"learning_rate": 6.112604669781572e-05, |
|
"loss": 0.0541, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.449382716049383, |
|
"grad_norm": 0.45636582374572754, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.0886, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.488888888888889, |
|
"grad_norm": 0.47652554512023926, |
|
"learning_rate": 5.837531116523682e-05, |
|
"loss": 0.0751, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.528395061728395, |
|
"grad_norm": 0.3730301856994629, |
|
"learning_rate": 5.698951697677498e-05, |
|
"loss": 0.0651, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.567901234567901, |
|
"grad_norm": 0.4122215807437897, |
|
"learning_rate": 5.559822380516539e-05, |
|
"loss": 0.0681, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.6074074074074076, |
|
"grad_norm": 0.506348729133606, |
|
"learning_rate": 5.420252624646238e-05, |
|
"loss": 0.044, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 2.6469135802469137, |
|
"grad_norm": 0.47149717807769775, |
|
"learning_rate": 5.2803522361859594e-05, |
|
"loss": 0.0497, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 2.68641975308642, |
|
"grad_norm": 0.4436272382736206, |
|
"learning_rate": 5.140231281379345e-05, |
|
"loss": 0.0678, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.725925925925926, |
|
"grad_norm": 0.5666233897209167, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0608, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 2.765432098765432, |
|
"grad_norm": 0.5969477891921997, |
|
"learning_rate": 4.859768718620656e-05, |
|
"loss": 0.0758, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.765432098765432, |
|
"eval_loss": 0.07115509361028671, |
|
"eval_runtime": 20.9773, |
|
"eval_samples_per_second": 2.145, |
|
"eval_steps_per_second": 2.145, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.8049382716049385, |
|
"grad_norm": 0.5112955570220947, |
|
"learning_rate": 4.7196477638140404e-05, |
|
"loss": 0.0819, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 2.8444444444444446, |
|
"grad_norm": 0.6357370615005493, |
|
"learning_rate": 4.579747375353763e-05, |
|
"loss": 0.1089, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.8839506172839506, |
|
"grad_norm": 0.6567676663398743, |
|
"learning_rate": 4.4401776194834613e-05, |
|
"loss": 0.1079, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 2.9234567901234567, |
|
"grad_norm": 0.3676830232143402, |
|
"learning_rate": 4.3010483023225045e-05, |
|
"loss": 0.0751, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 2.962962962962963, |
|
"grad_norm": 0.4251662492752075, |
|
"learning_rate": 4.162468883476319e-05, |
|
"loss": 0.0773, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 3.0024691358024693, |
|
"grad_norm": 0.8321242928504944, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.1391, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 3.0419753086419754, |
|
"grad_norm": 0.32981082797050476, |
|
"learning_rate": 3.887395330218429e-05, |
|
"loss": 0.0483, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 3.0814814814814815, |
|
"grad_norm": 0.41706305742263794, |
|
"learning_rate": 3.7511176091638653e-05, |
|
"loss": 0.0563, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 3.1209876543209876, |
|
"grad_norm": 0.4880906641483307, |
|
"learning_rate": 3.6158224428757535e-05, |
|
"loss": 0.0564, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 3.1604938271604937, |
|
"grad_norm": 0.4189813733100891, |
|
"learning_rate": 3.4816162744519263e-05, |
|
"loss": 0.0359, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.1604938271604937, |
|
"eval_loss": 0.07390694320201874, |
|
"eval_runtime": 21.022, |
|
"eval_samples_per_second": 2.141, |
|
"eval_steps_per_second": 2.141, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 0.32041606307029724, |
|
"learning_rate": 3.3486046902241664e-05, |
|
"loss": 0.0444, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 3.2395061728395063, |
|
"grad_norm": 0.6350606679916382, |
|
"learning_rate": 3.216892336688435e-05, |
|
"loss": 0.0665, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 3.2790123456790123, |
|
"grad_norm": 0.38409528136253357, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.0521, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 3.3185185185185184, |
|
"grad_norm": 0.4244583249092102, |
|
"learning_rate": 2.9577787153200197e-05, |
|
"loss": 0.0896, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 3.3580246913580245, |
|
"grad_norm": 0.3476863503456116, |
|
"learning_rate": 2.8305813044122097e-05, |
|
"loss": 0.0526, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 3.397530864197531, |
|
"grad_norm": 0.37426993250846863, |
|
"learning_rate": 2.705090677662311e-05, |
|
"loss": 0.0486, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 3.437037037037037, |
|
"grad_norm": 0.5348115563392639, |
|
"learning_rate": 2.581405564473801e-05, |
|
"loss": 0.065, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 3.476543209876543, |
|
"grad_norm": 0.38502976298332214, |
|
"learning_rate": 2.459623273767354e-05, |
|
"loss": 0.0453, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 3.5160493827160493, |
|
"grad_norm": 0.34430432319641113, |
|
"learning_rate": 2.3398396174233178e-05, |
|
"loss": 0.0582, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 3.5555555555555554, |
|
"grad_norm": 0.40302205085754395, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.0385, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.5555555555555554, |
|
"eval_loss": 0.07315154373645782, |
|
"eval_runtime": 21.0309, |
|
"eval_samples_per_second": 2.14, |
|
"eval_steps_per_second": 2.14, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.595061728395062, |
|
"grad_norm": 0.6493139266967773, |
|
"learning_rate": 2.1066435191009715e-05, |
|
"loss": 0.1011, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 3.634567901234568, |
|
"grad_norm": 0.6032883524894714, |
|
"learning_rate": 1.9934145435079702e-05, |
|
"loss": 0.0751, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 3.674074074074074, |
|
"grad_norm": 0.4316764771938324, |
|
"learning_rate": 1.8825509907063327e-05, |
|
"loss": 0.0458, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 3.71358024691358, |
|
"grad_norm": 0.3671962022781372, |
|
"learning_rate": 1.774140082289563e-05, |
|
"loss": 0.0589, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 3.753086419753086, |
|
"grad_norm": 0.3559502065181732, |
|
"learning_rate": 1.6682671102399805e-05, |
|
"loss": 0.0429, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 3.7925925925925927, |
|
"grad_norm": 0.3240104615688324, |
|
"learning_rate": 1.5650153698254916e-05, |
|
"loss": 0.0474, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 3.832098765432099, |
|
"grad_norm": 0.31719568371772766, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.0477, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 3.871604938271605, |
|
"grad_norm": 0.40934890508651733, |
|
"learning_rate": 1.3666983898298657e-05, |
|
"loss": 0.0504, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 3.911111111111111, |
|
"grad_norm": 0.45837289094924927, |
|
"learning_rate": 1.2717891755841722e-05, |
|
"loss": 0.0657, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 3.950617283950617, |
|
"grad_norm": 0.41513946652412415, |
|
"learning_rate": 1.1798131208919627e-05, |
|
"loss": 0.0507, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.950617283950617, |
|
"eval_loss": 0.0681799054145813, |
|
"eval_runtime": 21.0348, |
|
"eval_samples_per_second": 2.139, |
|
"eval_steps_per_second": 2.139, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.9901234567901236, |
|
"grad_norm": 0.562179684638977, |
|
"learning_rate": 1.090842587659851e-05, |
|
"loss": 0.0934, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 4.029629629629629, |
|
"grad_norm": 1.0771077871322632, |
|
"learning_rate": 1.004947573208756e-05, |
|
"loss": 0.1265, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 4.069135802469136, |
|
"grad_norm": 0.38524681329727173, |
|
"learning_rate": 9.221956552036992e-06, |
|
"loss": 0.043, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 4.108641975308642, |
|
"grad_norm": 0.49528777599334717, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.0521, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 4.148148148148148, |
|
"grad_norm": 0.40660879015922546, |
|
"learning_rate": 7.663790038585793e-06, |
|
"loss": 0.0451, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 4.187654320987654, |
|
"grad_norm": 0.3841019868850708, |
|
"learning_rate": 6.934368588379553e-06, |
|
"loss": 0.0434, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 4.22716049382716, |
|
"grad_norm": 0.2953658103942871, |
|
"learning_rate": 6.238828904562316e-06, |
|
"loss": 0.0347, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 4.266666666666667, |
|
"grad_norm": 0.36496424674987793, |
|
"learning_rate": 5.577718201056392e-06, |
|
"loss": 0.0546, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 4.306172839506173, |
|
"grad_norm": 0.5377872586250305, |
|
"learning_rate": 4.951556604879048e-06, |
|
"loss": 0.0643, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 4.345679012345679, |
|
"grad_norm": 0.26345789432525635, |
|
"learning_rate": 4.360836746934055e-06, |
|
"loss": 0.0352, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 4.345679012345679, |
|
"eval_loss": 0.0669243261218071, |
|
"eval_runtime": 21.0024, |
|
"eval_samples_per_second": 2.143, |
|
"eval_steps_per_second": 2.143, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 4.385185185185185, |
|
"grad_norm": 0.3408642113208771, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.045, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 4.424691358024692, |
|
"grad_norm": 0.34921032190322876, |
|
"learning_rate": 3.2875529852700147e-06, |
|
"loss": 0.0592, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 4.4641975308641975, |
|
"grad_norm": 0.2842159867286682, |
|
"learning_rate": 2.8058334845816213e-06, |
|
"loss": 0.0307, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 4.503703703703704, |
|
"grad_norm": 0.38709065318107605, |
|
"learning_rate": 2.361243863855184e-06, |
|
"loss": 0.0423, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 4.54320987654321, |
|
"grad_norm": 0.2887662649154663, |
|
"learning_rate": 1.9541339027450256e-06, |
|
"loss": 0.03, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 4.582716049382716, |
|
"grad_norm": 0.3436429500579834, |
|
"learning_rate": 1.584823893886933e-06, |
|
"loss": 0.0361, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 4.622222222222222, |
|
"grad_norm": 0.38178807497024536, |
|
"learning_rate": 1.2536043909088191e-06, |
|
"loss": 0.0458, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 4.661728395061728, |
|
"grad_norm": 0.33953651785850525, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.0538, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 4.701234567901235, |
|
"grad_norm": 0.4527745246887207, |
|
"learning_rate": 7.064490740882057e-07, |
|
"loss": 0.0635, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 4.7407407407407405, |
|
"grad_norm": 0.41231685876846313, |
|
"learning_rate": 4.909437331777179e-07, |
|
"loss": 0.0603, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.7407407407407405, |
|
"eval_loss": 0.06684371829032898, |
|
"eval_runtime": 21.0307, |
|
"eval_samples_per_second": 2.14, |
|
"eval_steps_per_second": 2.14, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.780246913580247, |
|
"grad_norm": 0.30335646867752075, |
|
"learning_rate": 3.143895053378698e-07, |
|
"loss": 0.0231, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 4.8197530864197535, |
|
"grad_norm": 0.36105185747146606, |
|
"learning_rate": 1.7692529411904578e-07, |
|
"loss": 0.0625, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 4.859259259259259, |
|
"grad_norm": 0.319932758808136, |
|
"learning_rate": 7.865924910916977e-08, |
|
"loss": 0.0508, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 4.898765432098766, |
|
"grad_norm": 0.31719672679901123, |
|
"learning_rate": 1.9668680847356735e-08, |
|
"loss": 0.0383, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 4.938271604938271, |
|
"grad_norm": 0.33628955483436584, |
|
"learning_rate": 0.0, |
|
"loss": 0.0472, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 4.938271604938271, |
|
"step": 125, |
|
"total_flos": 1.3285867935773491e+17, |
|
"train_loss": 0.1088795803785324, |
|
"train_runtime": 3712.9103, |
|
"train_samples_per_second": 0.545, |
|
"train_steps_per_second": 0.034 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 125, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.3285867935773491e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|