| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.9963706750544397, | |
| "eval_steps": 100, | |
| "global_step": 387, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03871279941930801, | |
| "grad_norm": 132.01553344726562, | |
| "learning_rate": 2.5e-06, | |
| "loss": 12.7967, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07742559883861602, | |
| "grad_norm": 132.22860717773438, | |
| "learning_rate": 5e-06, | |
| "loss": 12.7632, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.11613839825792402, | |
| "grad_norm": 127.19441986083984, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 10.7405, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.15485119767723204, | |
| "grad_norm": 131.84519958496094, | |
| "learning_rate": 1e-05, | |
| "loss": 7.2418, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.19356399709654004, | |
| "grad_norm": 45.38945007324219, | |
| "learning_rate": 9.863760217983652e-06, | |
| "loss": 3.0355, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.23227679651584804, | |
| "grad_norm": 52.082942962646484, | |
| "learning_rate": 9.727520435967303e-06, | |
| "loss": 1.3251, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.27098959593515604, | |
| "grad_norm": 11.499299049377441, | |
| "learning_rate": 9.591280653950955e-06, | |
| "loss": 0.6864, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.3097023953544641, | |
| "grad_norm": 18.093923568725586, | |
| "learning_rate": 9.455040871934606e-06, | |
| "loss": 0.652, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3484151947737721, | |
| "grad_norm": 3.510300397872925, | |
| "learning_rate": 9.318801089918257e-06, | |
| "loss": 0.6286, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.3871279941930801, | |
| "grad_norm": 5.945786952972412, | |
| "learning_rate": 9.182561307901908e-06, | |
| "loss": 0.4977, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.4258407936123881, | |
| "grad_norm": 37.19482421875, | |
| "learning_rate": 9.04632152588556e-06, | |
| "loss": 0.6305, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.4645535930316961, | |
| "grad_norm": 5.250940799713135, | |
| "learning_rate": 8.91008174386921e-06, | |
| "loss": 0.693, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5032663924510041, | |
| "grad_norm": 15.312704086303711, | |
| "learning_rate": 8.773841961852862e-06, | |
| "loss": 0.6779, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.5419791918703121, | |
| "grad_norm": 14.1633882522583, | |
| "learning_rate": 8.637602179836513e-06, | |
| "loss": 0.6297, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5806919912896201, | |
| "grad_norm": 16.27649688720703, | |
| "learning_rate": 8.501362397820165e-06, | |
| "loss": 0.5674, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.6194047907089282, | |
| "grad_norm": 18.839599609375, | |
| "learning_rate": 8.365122615803816e-06, | |
| "loss": 0.735, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6581175901282361, | |
| "grad_norm": 4.531907081604004, | |
| "learning_rate": 8.228882833787467e-06, | |
| "loss": 0.4994, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.6968303895475442, | |
| "grad_norm": 3.412825345993042, | |
| "learning_rate": 8.092643051771117e-06, | |
| "loss": 0.6938, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7355431889668521, | |
| "grad_norm": 11.98742389678955, | |
| "learning_rate": 7.95640326975477e-06, | |
| "loss": 0.6628, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.7742559883861602, | |
| "grad_norm": 7.5615081787109375, | |
| "learning_rate": 7.82016348773842e-06, | |
| "loss": 0.5611, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7742559883861602, | |
| "eval_loss": 0.6137279868125916, | |
| "eval_mse": 0.6137279992816091, | |
| "eval_runtime": 12.4934, | |
| "eval_samples_per_second": 34.818, | |
| "eval_steps_per_second": 17.449, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8129687878054682, | |
| "grad_norm": 5.634002208709717, | |
| "learning_rate": 7.683923705722072e-06, | |
| "loss": 0.5883, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.8516815872247762, | |
| "grad_norm": 8.587458610534668, | |
| "learning_rate": 7.547683923705723e-06, | |
| "loss": 0.5996, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.8903943866440842, | |
| "grad_norm": 6.751793384552002, | |
| "learning_rate": 7.411444141689374e-06, | |
| "loss": 0.5587, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.9291071860633922, | |
| "grad_norm": 3.9238288402557373, | |
| "learning_rate": 7.275204359673025e-06, | |
| "loss": 0.66, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.9678199854827002, | |
| "grad_norm": 2.3443965911865234, | |
| "learning_rate": 7.138964577656676e-06, | |
| "loss": 0.523, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.0065327849020083, | |
| "grad_norm": 9.468040466308594, | |
| "learning_rate": 7.002724795640327e-06, | |
| "loss": 0.6157, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.0452455843213162, | |
| "grad_norm": 10.286754608154297, | |
| "learning_rate": 6.8664850136239795e-06, | |
| "loss": 0.5813, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.0839583837406241, | |
| "grad_norm": 1.447935700416565, | |
| "learning_rate": 6.730245231607629e-06, | |
| "loss": 0.5564, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.1226711831599323, | |
| "grad_norm": 4.448881149291992, | |
| "learning_rate": 6.594005449591281e-06, | |
| "loss": 0.5708, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.1613839825792402, | |
| "grad_norm": 0.6417679190635681, | |
| "learning_rate": 6.457765667574932e-06, | |
| "loss": 0.6145, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.2000967819985482, | |
| "grad_norm": 1.2481282949447632, | |
| "learning_rate": 6.321525885558584e-06, | |
| "loss": 0.5985, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.2388095814178564, | |
| "grad_norm": 8.027632713317871, | |
| "learning_rate": 6.185286103542235e-06, | |
| "loss": 0.6178, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.2775223808371643, | |
| "grad_norm": 7.15224027633667, | |
| "learning_rate": 6.049046321525886e-06, | |
| "loss": 0.7308, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.3162351802564722, | |
| "grad_norm": 10.559642791748047, | |
| "learning_rate": 5.9128065395095365e-06, | |
| "loss": 0.6057, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.3549479796757802, | |
| "grad_norm": 3.033139705657959, | |
| "learning_rate": 5.776566757493189e-06, | |
| "loss": 0.5388, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.3936607790950883, | |
| "grad_norm": 9.217206954956055, | |
| "learning_rate": 5.64032697547684e-06, | |
| "loss": 0.4886, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.4323735785143963, | |
| "grad_norm": 8.655097007751465, | |
| "learning_rate": 5.504087193460491e-06, | |
| "loss": 0.5852, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 1.4710863779337044, | |
| "grad_norm": 14.92773723602295, | |
| "learning_rate": 5.367847411444142e-06, | |
| "loss": 0.615, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.5097991773530124, | |
| "grad_norm": 6.135091781616211, | |
| "learning_rate": 5.231607629427793e-06, | |
| "loss": 0.6194, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 1.5485119767723203, | |
| "grad_norm": 14.746587753295898, | |
| "learning_rate": 5.095367847411444e-06, | |
| "loss": 0.6542, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.5485119767723203, | |
| "eval_loss": 0.6139479875564575, | |
| "eval_mse": 0.6139480064655173, | |
| "eval_runtime": 12.9867, | |
| "eval_samples_per_second": 33.496, | |
| "eval_steps_per_second": 16.786, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.5872247761916283, | |
| "grad_norm": 6.090452194213867, | |
| "learning_rate": 4.959128065395096e-06, | |
| "loss": 0.5262, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 1.6259375756109362, | |
| "grad_norm": 9.420876502990723, | |
| "learning_rate": 4.822888283378747e-06, | |
| "loss": 0.5827, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.6646503750302444, | |
| "grad_norm": 5.127752304077148, | |
| "learning_rate": 4.686648501362398e-06, | |
| "loss": 0.5495, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 1.7033631744495525, | |
| "grad_norm": 13.193638801574707, | |
| "learning_rate": 4.55040871934605e-06, | |
| "loss": 0.7189, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.7420759738688605, | |
| "grad_norm": 6.717041492462158, | |
| "learning_rate": 4.414168937329701e-06, | |
| "loss": 0.6618, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 1.7807887732881684, | |
| "grad_norm": 3.5996382236480713, | |
| "learning_rate": 4.2779291553133515e-06, | |
| "loss": 0.5368, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.8195015727074764, | |
| "grad_norm": 13.474601745605469, | |
| "learning_rate": 4.141689373297003e-06, | |
| "loss": 0.5235, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 1.8582143721267843, | |
| "grad_norm": 4.9880852699279785, | |
| "learning_rate": 4.005449591280654e-06, | |
| "loss": 0.6603, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.8969271715460925, | |
| "grad_norm": 18.385284423828125, | |
| "learning_rate": 3.869209809264305e-06, | |
| "loss": 0.5856, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 1.9356399709654004, | |
| "grad_norm": 8.442005157470703, | |
| "learning_rate": 3.732970027247957e-06, | |
| "loss": 0.6516, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.9743527703847086, | |
| "grad_norm": 7.920212268829346, | |
| "learning_rate": 3.5967302452316077e-06, | |
| "loss": 0.6733, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 2.0130655698040165, | |
| "grad_norm": 5.7283430099487305, | |
| "learning_rate": 3.460490463215259e-06, | |
| "loss": 0.6598, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.0517783692233245, | |
| "grad_norm": 9.823247909545898, | |
| "learning_rate": 3.3242506811989107e-06, | |
| "loss": 0.5799, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 2.0904911686426324, | |
| "grad_norm": 10.975919723510742, | |
| "learning_rate": 3.1880108991825615e-06, | |
| "loss": 0.6606, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.1292039680619403, | |
| "grad_norm": 12.75275707244873, | |
| "learning_rate": 3.0517711171662127e-06, | |
| "loss": 0.5614, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 2.1679167674812483, | |
| "grad_norm": 6.546001434326172, | |
| "learning_rate": 2.9155313351498636e-06, | |
| "loss": 0.5947, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.2066295669005567, | |
| "grad_norm": 6.290523052215576, | |
| "learning_rate": 2.7792915531335152e-06, | |
| "loss": 0.6092, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 2.2453423663198646, | |
| "grad_norm": 2.127336025238037, | |
| "learning_rate": 2.6430517711171665e-06, | |
| "loss": 0.5901, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.2840551657391726, | |
| "grad_norm": 5.803460121154785, | |
| "learning_rate": 2.5068119891008173e-06, | |
| "loss": 0.5735, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 2.3227679651584805, | |
| "grad_norm": 3.169969081878662, | |
| "learning_rate": 2.370572207084469e-06, | |
| "loss": 0.5106, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.3227679651584805, | |
| "eval_loss": 0.6125330924987793, | |
| "eval_mse": 0.6125331133261495, | |
| "eval_runtime": 13.1593, | |
| "eval_samples_per_second": 33.057, | |
| "eval_steps_per_second": 16.566, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.3614807645777884, | |
| "grad_norm": 3.102710485458374, | |
| "learning_rate": 2.2343324250681202e-06, | |
| "loss": 0.5843, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 2.4001935639970964, | |
| "grad_norm": 2.053990602493286, | |
| "learning_rate": 2.098092643051771e-06, | |
| "loss": 0.6371, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.4389063634164048, | |
| "grad_norm": 10.483580589294434, | |
| "learning_rate": 1.9618528610354227e-06, | |
| "loss": 0.5718, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 2.4776191628357127, | |
| "grad_norm": 3.1228301525115967, | |
| "learning_rate": 1.8256130790190738e-06, | |
| "loss": 0.5231, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.5163319622550206, | |
| "grad_norm": 5.304292678833008, | |
| "learning_rate": 1.689373297002725e-06, | |
| "loss": 0.6116, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 2.5550447616743286, | |
| "grad_norm": 6.294493675231934, | |
| "learning_rate": 1.553133514986376e-06, | |
| "loss": 0.6674, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.5937575610936365, | |
| "grad_norm": 3.730954647064209, | |
| "learning_rate": 1.4168937329700275e-06, | |
| "loss": 0.6522, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 2.6324703605129445, | |
| "grad_norm": 1.1117771863937378, | |
| "learning_rate": 1.2806539509536785e-06, | |
| "loss": 0.7333, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.6711831599322524, | |
| "grad_norm": 2.0916991233825684, | |
| "learning_rate": 1.1444141689373298e-06, | |
| "loss": 0.6361, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 2.7098959593515604, | |
| "grad_norm": 1.0104436874389648, | |
| "learning_rate": 1.008174386920981e-06, | |
| "loss": 0.621, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.7486087587708687, | |
| "grad_norm": 10.215568542480469, | |
| "learning_rate": 8.719346049046322e-07, | |
| "loss": 0.5874, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 2.7873215581901767, | |
| "grad_norm": 5.2254557609558105, | |
| "learning_rate": 7.356948228882835e-07, | |
| "loss": 0.621, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.8260343576094846, | |
| "grad_norm": 5.230953693389893, | |
| "learning_rate": 5.994550408719347e-07, | |
| "loss": 0.5851, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 2.8647471570287926, | |
| "grad_norm": 1.5109760761260986, | |
| "learning_rate": 4.6321525885558585e-07, | |
| "loss": 0.5377, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.9034599564481005, | |
| "grad_norm": 5.254393100738525, | |
| "learning_rate": 3.2697547683923705e-07, | |
| "loss": 0.6476, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 2.942172755867409, | |
| "grad_norm": 7.337405681610107, | |
| "learning_rate": 1.907356948228883e-07, | |
| "loss": 0.5664, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.980885555286717, | |
| "grad_norm": 4.144803524017334, | |
| "learning_rate": 5.449591280653951e-08, | |
| "loss": 0.5508, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 2.9963706750544397, | |
| "step": 387, | |
| "total_flos": 2.606459925823488e+16, | |
| "train_loss": 1.1786008090627902, | |
| "train_runtime": 1155.1053, | |
| "train_samples_per_second": 21.466, | |
| "train_steps_per_second": 0.335 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 387, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.606459925823488e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |