{ "best_global_step": null, "best_metric": null, "best_model_checkpoint": null, "epoch": 1.0, "eval_steps": 500, "global_step": 170, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.029411764705882353, "grad_norm": 4.553732872009277, "learning_rate": 2.777777777777778e-05, "loss": 0.7808, "num_tokens": 163560.0, "step": 5 }, { "epoch": 0.058823529411764705, "grad_norm": 2.3360044956207275, "learning_rate": 4.9995716618706634e-05, "loss": 0.1826, "num_tokens": 327236.0, "step": 10 }, { "epoch": 0.08823529411764706, "grad_norm": 0.659660279750824, "learning_rate": 4.9845969445888354e-05, "loss": 0.0853, "num_tokens": 490970.0, "step": 15 }, { "epoch": 0.11764705882352941, "grad_norm": 0.6894950866699219, "learning_rate": 4.948368129547296e-05, "loss": 0.0705, "num_tokens": 654151.0, "step": 20 }, { "epoch": 0.14705882352941177, "grad_norm": 0.47198617458343506, "learning_rate": 4.891229802725401e-05, "loss": 0.0624, "num_tokens": 817991.0, "step": 25 }, { "epoch": 0.17647058823529413, "grad_norm": 0.4311853051185608, "learning_rate": 4.8137254283872696e-05, "loss": 0.0577, "num_tokens": 981831.0, "step": 30 }, { "epoch": 0.20588235294117646, "grad_norm": 0.37233826518058777, "learning_rate": 4.7165921799873716e-05, "loss": 0.0542, "num_tokens": 1145572.0, "step": 35 }, { "epoch": 0.23529411764705882, "grad_norm": 0.32413461804389954, "learning_rate": 4.6007539286346375e-05, "loss": 0.0539, "num_tokens": 1309412.0, "step": 40 }, { "epoch": 0.2647058823529412, "grad_norm": 0.27661165595054626, "learning_rate": 4.467312455804482e-05, "loss": 0.0528, "num_tokens": 1473224.0, "step": 45 }, { "epoch": 0.29411764705882354, "grad_norm": 0.25189208984375, "learning_rate": 4.317536973877955e-05, "loss": 0.0528, "num_tokens": 1637064.0, "step": 50 }, { "epoch": 0.3235294117647059, "grad_norm": 0.258693665266037, "learning_rate": 4.1528520541821506e-05, "loss": 0.0506, "num_tokens": 1800823.0, "step": 55 }, { "epoch": 0.35294117647058826, "grad_norm": 0.22541610896587372, "learning_rate": 3.974824077352845e-05, "loss": 0.0504, "num_tokens": 1964243.0, "step": 60 }, { "epoch": 0.38235294117647056, "grad_norm": 0.31295299530029297, "learning_rate": 3.785146334895093e-05, "loss": 0.0504, "num_tokens": 2127639.0, "step": 65 }, { "epoch": 0.4117647058823529, "grad_norm": 0.24782125651836395, "learning_rate": 3.58562292364649e-05, "loss": 0.0489, "num_tokens": 2291479.0, "step": 70 }, { "epoch": 0.4411764705882353, "grad_norm": 0.20237773656845093, "learning_rate": 3.378151586328963e-05, "loss": 0.0489, "num_tokens": 2455319.0, "step": 75 }, { "epoch": 0.47058823529411764, "grad_norm": 0.15972378849983215, "learning_rate": 3.164705661399079e-05, "loss": 0.0489, "num_tokens": 2619081.0, "step": 80 }, { "epoch": 0.5, "grad_norm": 0.18197670578956604, "learning_rate": 2.947315313878701e-05, "loss": 0.0496, "num_tokens": 2782547.0, "step": 85 }, { "epoch": 0.5294117647058824, "grad_norm": 0.6986818909645081, "learning_rate": 2.7280482256866697e-05, "loss": 0.0478, "num_tokens": 2946387.0, "step": 90 }, { "epoch": 0.5588235294117647, "grad_norm": 0.22870764136314392, "learning_rate": 2.508989929133051e-05, "loss": 0.0474, "num_tokens": 3109604.0, "step": 95 }, { "epoch": 0.5882352941176471, "grad_norm": 0.17916245758533478, "learning_rate": 2.2922239706315745e-05, "loss": 0.0461, "num_tokens": 3273444.0, "step": 100 }, { "epoch": 0.6176470588235294, "grad_norm": 0.3453664779663086, "learning_rate": 2.079812093300668e-05, "loss": 0.046, "num_tokens": 3435972.0, "step": 105 }, { "epoch": 0.6470588235294118, "grad_norm": 0.26898708939552307, "learning_rate": 1.8737746269439006e-05, "loss": 0.0452, "num_tokens": 3599812.0, "step": 110 }, { "epoch": 0.6764705882352942, "grad_norm": 0.2884495258331299, "learning_rate": 1.6760712719281375e-05, "loss": 0.0459, "num_tokens": 3763587.0, "step": 115 }, { "epoch": 0.7058823529411765, "grad_norm": 0.25366732478141785, "learning_rate": 1.4885824597312362e-05, "loss": 0.0461, "num_tokens": 3927092.0, "step": 120 }, { "epoch": 0.7352941176470589, "grad_norm": 0.21009443700313568, "learning_rate": 1.313091467446158e-05, "loss": 0.0444, "num_tokens": 4090932.0, "step": 125 }, { "epoch": 0.7647058823529411, "grad_norm": 0.19418346881866455, "learning_rate": 1.1512674563572253e-05, "loss": 0.0459, "num_tokens": 4254483.0, "step": 130 }, { "epoch": 0.7941176470588235, "grad_norm": 0.19523853063583374, "learning_rate": 1.0046495959150554e-05, "loss": 0.0446, "num_tokens": 4418323.0, "step": 135 }, { "epoch": 0.8235294117647058, "grad_norm": 0.1927725076675415, "learning_rate": 8.746324241130455e-06, "loss": 0.0434, "num_tokens": 4581587.0, "step": 140 }, { "epoch": 0.8529411764705882, "grad_norm": 0.1469401717185974, "learning_rate": 7.624525835084185e-06, "loss": 0.0436, "num_tokens": 4745427.0, "step": 145 }, { "epoch": 0.8823529411764706, "grad_norm": 0.1357996016740799, "learning_rate": 6.691770590465606e-06, "loss": 0.0432, "num_tokens": 4909267.0, "step": 150 }, { "epoch": 0.9117647058823529, "grad_norm": 0.16276375949382782, "learning_rate": 5.95693029563144e-06, "loss": 0.0429, "num_tokens": 5073107.0, "step": 155 }, { "epoch": 0.9411764705882353, "grad_norm": 0.17533668875694275, "learning_rate": 5.426994294902611e-06, "loss": 0.0431, "num_tokens": 5236905.0, "step": 160 }, { "epoch": 0.9705882352941176, "grad_norm": 0.1326180398464203, "learning_rate": 5.10700301026355e-06, "loss": 0.0434, "num_tokens": 5400452.0, "step": 165 }, { "epoch": 1.0, "grad_norm": 0.19520561397075653, "learning_rate": 5e-06, "loss": 0.0428, "num_tokens": 5563792.0, "step": 170 }, { "epoch": 1.0, "step": 170, "total_flos": 9.27422622990336e+16, "train_loss": 0.0753641640438753, "train_runtime": 542.7298, "train_samples_per_second": 19.957, "train_steps_per_second": 0.313 } ], "logging_steps": 5, "max_steps": 170, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 100, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 9.27422622990336e+16, "train_batch_size": 16, "trial_name": null, "trial_params": null }