| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 802, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.012468827930174564, | |
| "grad_norm": 0.0348743200302124, | |
| "learning_rate": 4.9995205016168205e-05, | |
| "loss": 0.4047, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.02493765586034913, | |
| "grad_norm": 0.052354972809553146, | |
| "learning_rate": 4.998082190402241e-05, | |
| "loss": 0.3855, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03740648379052369, | |
| "grad_norm": 0.02390989102423191, | |
| "learning_rate": 4.995685618090584e-05, | |
| "loss": 0.3818, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.04987531172069826, | |
| "grad_norm": 0.01846301183104515, | |
| "learning_rate": 4.9923317040038885e-05, | |
| "loss": 0.3884, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06234413965087282, | |
| "grad_norm": 0.018641460686922073, | |
| "learning_rate": 4.988021734699259e-05, | |
| "loss": 0.3713, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.07481296758104738, | |
| "grad_norm": 0.016644474118947983, | |
| "learning_rate": 4.982757363475346e-05, | |
| "loss": 0.3492, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.08728179551122195, | |
| "grad_norm": 0.01785973273217678, | |
| "learning_rate": 4.9765406097381426e-05, | |
| "loss": 0.3596, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.09975062344139651, | |
| "grad_norm": 0.01776760071516037, | |
| "learning_rate": 4.96937385822634e-05, | |
| "loss": 0.3792, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11221945137157108, | |
| "grad_norm": 0.019663693383336067, | |
| "learning_rate": 4.96125985809655e-05, | |
| "loss": 0.3393, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.12468827930174564, | |
| "grad_norm": 0.036602046340703964, | |
| "learning_rate": 4.952201721868727e-05, | |
| "loss": 0.3433, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1371571072319202, | |
| "grad_norm": 0.020088447257876396, | |
| "learning_rate": 4.94220292423221e-05, | |
| "loss": 0.3563, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.14962593516209477, | |
| "grad_norm": 0.023927291855216026, | |
| "learning_rate": 4.9312673007128405e-05, | |
| "loss": 0.3543, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.16209476309226933, | |
| "grad_norm": 0.023712489753961563, | |
| "learning_rate": 4.919399046201656e-05, | |
| "loss": 0.3442, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.1745635910224439, | |
| "grad_norm": 0.02708962745964527, | |
| "learning_rate": 4.906602713345735e-05, | |
| "loss": 0.344, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18703241895261846, | |
| "grad_norm": 0.026062602177262306, | |
| "learning_rate": 4.8928832108018106e-05, | |
| "loss": 0.3426, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.19950124688279303, | |
| "grad_norm": 0.025727244094014168, | |
| "learning_rate": 4.878245801353313e-05, | |
| "loss": 0.3677, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.2119700748129676, | |
| "grad_norm": 0.03201605752110481, | |
| "learning_rate": 4.862696099891573e-05, | |
| "loss": 0.333, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.22443890274314215, | |
| "grad_norm": 0.03317998722195625, | |
| "learning_rate": 4.846240071261959e-05, | |
| "loss": 0.3596, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.23690773067331672, | |
| "grad_norm": 0.02743816375732422, | |
| "learning_rate": 4.828884027975768e-05, | |
| "loss": 0.3156, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.24937655860349128, | |
| "grad_norm": 0.030033418908715248, | |
| "learning_rate": 4.810634627788756e-05, | |
| "loss": 0.3355, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.26184538653366585, | |
| "grad_norm": 0.031716831028461456, | |
| "learning_rate": 4.791498871147229e-05, | |
| "loss": 0.3518, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.2743142144638404, | |
| "grad_norm": 0.03049326501786709, | |
| "learning_rate": 4.7714840985026834e-05, | |
| "loss": 0.3031, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.286783042394015, | |
| "grad_norm": 0.03361716866493225, | |
| "learning_rate": 4.7505979874960184e-05, | |
| "loss": 0.3452, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.29925187032418954, | |
| "grad_norm": 0.0355195626616478, | |
| "learning_rate": 4.728848550012399e-05, | |
| "loss": 0.343, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3117206982543641, | |
| "grad_norm": 0.04162430390715599, | |
| "learning_rate": 4.706244129107914e-05, | |
| "loss": 0.3234, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.32418952618453867, | |
| "grad_norm": 0.04150069132447243, | |
| "learning_rate": 4.682793395809184e-05, | |
| "loss": 0.3238, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.33665835411471323, | |
| "grad_norm": 0.04490313306450844, | |
| "learning_rate": 4.658505345787169e-05, | |
| "loss": 0.3337, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.3491271820448878, | |
| "grad_norm": 0.03981255367398262, | |
| "learning_rate": 4.633389295906443e-05, | |
| "loss": 0.3422, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.36159600997506236, | |
| "grad_norm": 0.04022540897130966, | |
| "learning_rate": 4.607454880651253e-05, | |
| "loss": 0.315, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.3740648379052369, | |
| "grad_norm": 0.03891991823911667, | |
| "learning_rate": 4.580712048429746e-05, | |
| "loss": 0.3213, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3865336658354115, | |
| "grad_norm": 0.04148748144507408, | |
| "learning_rate": 4.553171057757772e-05, | |
| "loss": 0.3326, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.39900249376558605, | |
| "grad_norm": 0.04601826146245003, | |
| "learning_rate": 4.5248424733237294e-05, | |
| "loss": 0.3264, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.4114713216957606, | |
| "grad_norm": 0.04266240820288658, | |
| "learning_rate": 4.4957371619359644e-05, | |
| "loss": 0.3422, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.4239401496259352, | |
| "grad_norm": 0.04361332580447197, | |
| "learning_rate": 4.46586628835428e-05, | |
| "loss": 0.3136, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.43640897755610975, | |
| "grad_norm": 0.051424313336610794, | |
| "learning_rate": 4.435241311007146e-05, | |
| "loss": 0.335, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.4488778054862843, | |
| "grad_norm": 0.04484390467405319, | |
| "learning_rate": 4.403873977596258e-05, | |
| "loss": 0.3188, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.4613466334164589, | |
| "grad_norm": 0.04609343037009239, | |
| "learning_rate": 4.371776320590143e-05, | |
| "loss": 0.3355, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.47381546134663344, | |
| "grad_norm": 0.052802663296461105, | |
| "learning_rate": 4.3389606526085115e-05, | |
| "loss": 0.3358, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.486284289276808, | |
| "grad_norm": 0.0500580295920372, | |
| "learning_rate": 4.305439561699154e-05, | |
| "loss": 0.2961, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.49875311720698257, | |
| "grad_norm": 0.05000691115856171, | |
| "learning_rate": 4.271225906509186e-05, | |
| "loss": 0.3014, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5112219451371571, | |
| "grad_norm": 0.049051977694034576, | |
| "learning_rate": 4.236332811352485e-05, | |
| "loss": 0.3182, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.5236907730673317, | |
| "grad_norm": 0.05655088275671005, | |
| "learning_rate": 4.200773661175219e-05, | |
| "loss": 0.3117, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5361596009975063, | |
| "grad_norm": 0.056144360452890396, | |
| "learning_rate": 4.164562096421403e-05, | |
| "loss": 0.2916, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.5486284289276808, | |
| "grad_norm": 0.04913180693984032, | |
| "learning_rate": 4.127712007800438e-05, | |
| "loss": 0.3342, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5610972568578554, | |
| "grad_norm": 0.06693854182958603, | |
| "learning_rate": 4.090237530958656e-05, | |
| "loss": 0.3049, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.57356608478803, | |
| "grad_norm": 0.056180231273174286, | |
| "learning_rate": 4.052153041056901e-05, | |
| "loss": 0.3349, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5860349127182045, | |
| "grad_norm": 0.06079794839024544, | |
| "learning_rate": 4.0134731472562384e-05, | |
| "loss": 0.3266, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.5985037406483791, | |
| "grad_norm": 0.05693957582116127, | |
| "learning_rate": 3.9742126871138995e-05, | |
| "loss": 0.3163, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.6109725685785536, | |
| "grad_norm": 0.056326717138290405, | |
| "learning_rate": 3.934386720891614e-05, | |
| "loss": 0.2934, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.6234413965087282, | |
| "grad_norm": 0.05838495492935181, | |
| "learning_rate": 3.894010525778511e-05, | |
| "loss": 0.2749, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.6359102244389028, | |
| "grad_norm": 0.0631040409207344, | |
| "learning_rate": 3.853099590030811e-05, | |
| "loss": 0.3367, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.6483790523690773, | |
| "grad_norm": 0.062413156032562256, | |
| "learning_rate": 3.81166960703055e-05, | |
| "loss": 0.3205, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6608478802992519, | |
| "grad_norm": 0.06148769333958626, | |
| "learning_rate": 3.769736469265621e-05, | |
| "loss": 0.3159, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.6733167082294265, | |
| "grad_norm": 0.06495541334152222, | |
| "learning_rate": 3.727316262233429e-05, | |
| "loss": 0.2995, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.685785536159601, | |
| "grad_norm": 0.05486743152141571, | |
| "learning_rate": 3.684425258270525e-05, | |
| "loss": 0.3094, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.6982543640897756, | |
| "grad_norm": 0.05586089566349983, | |
| "learning_rate": 3.64107991031055e-05, | |
| "loss": 0.3321, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.7107231920199502, | |
| "grad_norm": 0.06062714010477066, | |
| "learning_rate": 3.597296845572917e-05, | |
| "loss": 0.331, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.7231920199501247, | |
| "grad_norm": 0.06520465761423111, | |
| "learning_rate": 3.553092859184629e-05, | |
| "loss": 0.3199, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.7356608478802993, | |
| "grad_norm": 0.059496622532606125, | |
| "learning_rate": 3.508484907737687e-05, | |
| "loss": 0.3087, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.7481296758104738, | |
| "grad_norm": 0.06234793737530708, | |
| "learning_rate": 3.463490102784568e-05, | |
| "loss": 0.2914, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7605985037406484, | |
| "grad_norm": 0.060752127319574356, | |
| "learning_rate": 3.418125704274253e-05, | |
| "loss": 0.3059, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.773067331670823, | |
| "grad_norm": 0.06881830096244812, | |
| "learning_rate": 3.372409113931334e-05, | |
| "loss": 0.311, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7855361596009975, | |
| "grad_norm": 0.0633009597659111, | |
| "learning_rate": 3.326357868580734e-05, | |
| "loss": 0.3262, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.7980049875311721, | |
| "grad_norm": 0.06173081696033478, | |
| "learning_rate": 3.2799896334206045e-05, | |
| "loss": 0.307, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.8104738154613467, | |
| "grad_norm": 0.0721065104007721, | |
| "learning_rate": 3.233322195245977e-05, | |
| "loss": 0.3225, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.8229426433915212, | |
| "grad_norm": 0.06812290102243423, | |
| "learning_rate": 3.186373455625774e-05, | |
| "loss": 0.3427, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.8354114713216958, | |
| "grad_norm": 0.0630379170179367, | |
| "learning_rate": 3.139161424035786e-05, | |
| "loss": 0.332, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.8478802992518704, | |
| "grad_norm": 0.08122845739126205, | |
| "learning_rate": 3.091704210950266e-05, | |
| "loss": 0.3031, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.8603491271820449, | |
| "grad_norm": 0.07106555998325348, | |
| "learning_rate": 3.0440200208947694e-05, | |
| "loss": 0.314, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.8728179551122195, | |
| "grad_norm": 0.06012728437781334, | |
| "learning_rate": 2.9961271454629235e-05, | |
| "loss": 0.2994, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.885286783042394, | |
| "grad_norm": 0.07092411816120148, | |
| "learning_rate": 2.9480439562997965e-05, | |
| "loss": 0.2904, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.8977556109725686, | |
| "grad_norm": 0.0659562423825264, | |
| "learning_rate": 2.8997888980545583e-05, | |
| "loss": 0.3259, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.9102244389027432, | |
| "grad_norm": 0.06206708773970604, | |
| "learning_rate": 2.8513804813051363e-05, | |
| "loss": 0.2975, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.9226932668329177, | |
| "grad_norm": 0.07387180626392365, | |
| "learning_rate": 2.8028372754575805e-05, | |
| "loss": 0.3248, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.9351620947630923, | |
| "grad_norm": 0.06683710217475891, | |
| "learning_rate": 2.7541779016228664e-05, | |
| "loss": 0.3043, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.9476309226932669, | |
| "grad_norm": 0.09706446528434753, | |
| "learning_rate": 2.7054210254738572e-05, | |
| "loss": 0.3101, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.9600997506234414, | |
| "grad_norm": 0.06651865690946579, | |
| "learning_rate": 2.6565853500851802e-05, | |
| "loss": 0.2902, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.972568578553616, | |
| "grad_norm": 0.07901949435472488, | |
| "learning_rate": 2.607689608758746e-05, | |
| "loss": 0.3329, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9850374064837906, | |
| "grad_norm": 0.06636166572570801, | |
| "learning_rate": 2.558752557837684e-05, | |
| "loss": 0.3212, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.9975062344139651, | |
| "grad_norm": 0.06299249827861786, | |
| "learning_rate": 2.5097929695114297e-05, | |
| "loss": 0.288, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.0099750623441397, | |
| "grad_norm": 0.06922150403261185, | |
| "learning_rate": 2.4608296246147373e-05, | |
| "loss": 0.2973, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 1.0224438902743143, | |
| "grad_norm": 0.06634873151779175, | |
| "learning_rate": 2.4118813054233774e-05, | |
| "loss": 0.2869, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.0349127182044888, | |
| "grad_norm": 0.07161210477352142, | |
| "learning_rate": 2.3629667884492798e-05, | |
| "loss": 0.3053, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 1.0473815461346634, | |
| "grad_norm": 0.06999760866165161, | |
| "learning_rate": 2.3141048372378864e-05, | |
| "loss": 0.2907, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.059850374064838, | |
| "grad_norm": 0.07221878319978714, | |
| "learning_rate": 2.2653141951704814e-05, | |
| "loss": 0.2691, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 1.0723192019950125, | |
| "grad_norm": 0.07585354894399643, | |
| "learning_rate": 2.2166135782742527e-05, | |
| "loss": 0.2969, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.084788029925187, | |
| "grad_norm": 0.08216829597949982, | |
| "learning_rate": 2.16802166804285e-05, | |
| "loss": 0.3006, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 1.0972568578553616, | |
| "grad_norm": 0.06559521704912186, | |
| "learning_rate": 2.119557104270187e-05, | |
| "loss": 0.2777, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.1097256857855362, | |
| "grad_norm": 0.06534233689308167, | |
| "learning_rate": 2.0712384779002393e-05, | |
| "loss": 0.3153, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 1.1221945137157108, | |
| "grad_norm": 0.07653407752513885, | |
| "learning_rate": 2.0230843238955855e-05, | |
| "loss": 0.3023, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.1346633416458853, | |
| "grad_norm": 0.06991739571094513, | |
| "learning_rate": 1.9751131141274148e-05, | |
| "loss": 0.3058, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 1.14713216957606, | |
| "grad_norm": 0.07048314809799194, | |
| "learning_rate": 1.927343250289747e-05, | |
| "loss": 0.3056, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.1596009975062345, | |
| "grad_norm": 0.0698205828666687, | |
| "learning_rate": 1.8797930568405614e-05, | |
| "loss": 0.2994, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 1.172069825436409, | |
| "grad_norm": 0.0764131247997284, | |
| "learning_rate": 1.8324807739725613e-05, | |
| "loss": 0.3146, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.1845386533665836, | |
| "grad_norm": 0.07224885374307632, | |
| "learning_rate": 1.7854245506162582e-05, | |
| "loss": 0.3107, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 1.1970074812967582, | |
| "grad_norm": 0.07462743669748306, | |
| "learning_rate": 1.738642437478067e-05, | |
| "loss": 0.3146, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.2094763092269327, | |
| "grad_norm": 0.08045542985200882, | |
| "learning_rate": 1.6921523801160757e-05, | |
| "loss": 0.3102, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 1.2219451371571073, | |
| "grad_norm": 0.06614966690540314, | |
| "learning_rate": 1.645972212056157e-05, | |
| "loss": 0.2748, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.2344139650872819, | |
| "grad_norm": 0.07284315675497055, | |
| "learning_rate": 1.6001196479510448e-05, | |
| "loss": 0.3133, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 1.2468827930174564, | |
| "grad_norm": 0.07468894124031067, | |
| "learning_rate": 1.5546122767850234e-05, | |
| "loss": 0.3062, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.259351620947631, | |
| "grad_norm": 0.06674832105636597, | |
| "learning_rate": 1.5094675551268095e-05, | |
| "loss": 0.2781, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 1.2718204488778055, | |
| "grad_norm": 0.0756010115146637, | |
| "learning_rate": 1.4647028004332381e-05, | |
| "loss": 0.2935, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.28428927680798, | |
| "grad_norm": 0.07455847412347794, | |
| "learning_rate": 1.420335184406309e-05, | |
| "loss": 0.3056, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 1.2967581047381547, | |
| "grad_norm": 0.07162386924028397, | |
| "learning_rate": 1.3763817264061424e-05, | |
| "loss": 0.2948, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.3092269326683292, | |
| "grad_norm": 0.07474777102470398, | |
| "learning_rate": 1.3328592869223746e-05, | |
| "loss": 0.3089, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 1.3216957605985038, | |
| "grad_norm": 0.07821279019117355, | |
| "learning_rate": 1.2897845611064991e-05, | |
| "loss": 0.3152, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.3341645885286784, | |
| "grad_norm": 0.08288177102804184, | |
| "learning_rate": 1.2471740723676213e-05, | |
| "loss": 0.3105, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 1.346633416458853, | |
| "grad_norm": 0.09601106494665146, | |
| "learning_rate": 1.2050441660341075e-05, | |
| "loss": 0.3053, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.3591022443890275, | |
| "grad_norm": 0.0705566480755806, | |
| "learning_rate": 1.163411003083534e-05, | |
| "loss": 0.2892, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 1.371571072319202, | |
| "grad_norm": 0.07577934861183167, | |
| "learning_rate": 1.1222905539433592e-05, | |
| "loss": 0.2919, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.3840399002493766, | |
| "grad_norm": 0.0770437940955162, | |
| "learning_rate": 1.0816985923646839e-05, | |
| "loss": 0.2886, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 1.3965087281795512, | |
| "grad_norm": 0.082821324467659, | |
| "learning_rate": 1.0416506893714662e-05, | |
| "loss": 0.3008, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.4089775561097257, | |
| "grad_norm": 0.08322034776210785, | |
| "learning_rate": 1.0021622072874948e-05, | |
| "loss": 0.2882, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 1.4214463840399003, | |
| "grad_norm": 0.08221346884965897, | |
| "learning_rate": 9.632482938434198e-06, | |
| "loss": 0.3124, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.4339152119700749, | |
| "grad_norm": 0.07795987278223038, | |
| "learning_rate": 9.249238763661061e-06, | |
| "loss": 0.3374, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 1.4463840399002494, | |
| "grad_norm": 0.07835546880960464, | |
| "learning_rate": 8.872036560525255e-06, | |
| "loss": 0.3153, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.458852867830424, | |
| "grad_norm": 0.09244232624769211, | |
| "learning_rate": 8.501021023304009e-06, | |
| "loss": 0.2844, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 1.4713216957605986, | |
| "grad_norm": 0.09036150574684143, | |
| "learning_rate": 8.13633447307752e-06, | |
| "loss": 0.2733, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.4837905236907731, | |
| "grad_norm": 0.08004344999790192, | |
| "learning_rate": 7.77811680313475e-06, | |
| "loss": 0.3047, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 1.4962593516209477, | |
| "grad_norm": 0.07639230787754059, | |
| "learning_rate": 7.426505425310531e-06, | |
| "loss": 0.3215, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.508728179551122, | |
| "grad_norm": 0.07058786600828171, | |
| "learning_rate": 7.0816352172746185e-06, | |
| "loss": 0.2956, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 1.5211970074812968, | |
| "grad_norm": 0.08036904782056808, | |
| "learning_rate": 6.743638470792735e-06, | |
| "loss": 0.3137, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.5336658354114712, | |
| "grad_norm": 0.083469919860363, | |
| "learning_rate": 6.4126448409796565e-06, | |
| "loss": 0.3049, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 1.546134663341646, | |
| "grad_norm": 0.08417661488056183, | |
| "learning_rate": 6.088781296563637e-06, | |
| "loss": 0.2976, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.5586034912718203, | |
| "grad_norm": 0.07743105292320251, | |
| "learning_rate": 5.77217207118142e-06, | |
| "loss": 0.3089, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 1.571072319201995, | |
| "grad_norm": 0.08944889158010483, | |
| "learning_rate": 5.462938615722344e-06, | |
| "loss": 0.2935, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.5835411471321694, | |
| "grad_norm": 0.07214126735925674, | |
| "learning_rate": 5.161199551739942e-06, | |
| "loss": 0.2789, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 1.5960099750623442, | |
| "grad_norm": 0.07316874712705612, | |
| "learning_rate": 4.867070625948867e-06, | |
| "loss": 0.3113, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.6084788029925186, | |
| "grad_norm": 0.08915118128061295, | |
| "learning_rate": 4.58066466582461e-06, | |
| "loss": 0.298, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 1.6209476309226933, | |
| "grad_norm": 0.07792936265468597, | |
| "learning_rate": 4.3020915363230275e-06, | |
| "loss": 0.2944, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.6334164588528677, | |
| "grad_norm": 0.07877308130264282, | |
| "learning_rate": 4.031458097736265e-06, | |
| "loss": 0.3276, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 1.6458852867830425, | |
| "grad_norm": 0.08033192902803421, | |
| "learning_rate": 3.7688681647013252e-06, | |
| "loss": 0.2884, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.6583541147132168, | |
| "grad_norm": 0.06960167735815048, | |
| "learning_rate": 3.514422466376857e-06, | |
| "loss": 0.3036, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 1.6708229426433916, | |
| "grad_norm": 0.07343805581331253, | |
| "learning_rate": 3.2682186078036303e-06, | |
| "loss": 0.2983, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.683291770573566, | |
| "grad_norm": 0.08403794467449188, | |
| "learning_rate": 3.030351032463341e-06, | |
| "loss": 0.3029, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 1.6957605985037407, | |
| "grad_norm": 0.07004518061876297, | |
| "learning_rate": 2.8009109860502176e-06, | |
| "loss": 0.3161, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.708229426433915, | |
| "grad_norm": 0.08551249653100967, | |
| "learning_rate": 2.5799864814692904e-06, | |
| "loss": 0.2799, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 1.7206982543640899, | |
| "grad_norm": 0.08994453400373459, | |
| "learning_rate": 2.3676622650747604e-06, | |
| "loss": 0.3199, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.7331670822942642, | |
| "grad_norm": 0.07652537524700165, | |
| "learning_rate": 2.1640197841614086e-06, | |
| "loss": 0.2765, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 1.745635910224439, | |
| "grad_norm": 0.07537612318992615, | |
| "learning_rate": 1.9691371557215096e-06, | |
| "loss": 0.2986, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.7581047381546133, | |
| "grad_norm": 0.08738798648118973, | |
| "learning_rate": 1.7830891364792573e-06, | |
| "loss": 0.3137, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 1.770573566084788, | |
| "grad_norm": 0.08610476553440094, | |
| "learning_rate": 1.6059470942141912e-06, | |
| "loss": 0.3068, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.7830423940149625, | |
| "grad_norm": 0.07852538675069809, | |
| "learning_rate": 1.4377789803845964e-06, | |
| "loss": 0.2971, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 1.7955112219451372, | |
| "grad_norm": 0.08171375095844269, | |
| "learning_rate": 1.2786493040614245e-06, | |
| "loss": 0.281, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.8079800498753116, | |
| "grad_norm": 0.07712515443563461, | |
| "learning_rate": 1.1286191071826824e-06, | |
| "loss": 0.3003, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 1.8204488778054864, | |
| "grad_norm": 0.07640130817890167, | |
| "learning_rate": 9.877459411378326e-07, | |
| "loss": 0.2982, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.8329177057356607, | |
| "grad_norm": 0.09415088593959808, | |
| "learning_rate": 8.560838446911607e-07, | |
| "loss": 0.2919, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 1.8453865336658355, | |
| "grad_norm": 0.07435153424739838, | |
| "learning_rate": 7.336833232525625e-07, | |
| "loss": 0.2787, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.8578553615960098, | |
| "grad_norm": 0.07659658789634705, | |
| "learning_rate": 6.205913295037475e-07, | |
| "loss": 0.2953, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 1.8703241895261846, | |
| "grad_norm": 0.08028612285852432, | |
| "learning_rate": 5.168512453872288e-07, | |
| "loss": 0.3198, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.882793017456359, | |
| "grad_norm": 0.07494588941335678, | |
| "learning_rate": 4.2250286546509366e-07, | |
| "loss": 0.2757, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 1.8952618453865338, | |
| "grad_norm": 0.08031202107667923, | |
| "learning_rate": 3.375823816538476e-07, | |
| "loss": 0.2687, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.907730673316708, | |
| "grad_norm": 0.1463918387889862, | |
| "learning_rate": 2.6212236934124166e-07, | |
| "loss": 0.2904, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 1.9201995012468829, | |
| "grad_norm": 0.0829106792807579, | |
| "learning_rate": 1.9615177489038793e-07, | |
| "loss": 0.2968, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.9326683291770572, | |
| "grad_norm": 0.07514077425003052, | |
| "learning_rate": 1.3969590453598858e-07, | |
| "loss": 0.282, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 1.945137157107232, | |
| "grad_norm": 0.07502910494804382, | |
| "learning_rate": 9.27764146768928e-08, | |
| "loss": 0.31, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.9576059850374063, | |
| "grad_norm": 0.09159159660339355, | |
| "learning_rate": 5.54113035687226e-08, | |
| "loss": 0.2805, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 1.9700748129675811, | |
| "grad_norm": 0.07086782157421112, | |
| "learning_rate": 2.761490441976211e-08, | |
| "loss": 0.2869, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.9825436408977555, | |
| "grad_norm": 0.08850576728582382, | |
| "learning_rate": 9.397879892777961e-09, | |
| "loss": 0.3079, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 1.9950124688279303, | |
| "grad_norm": 0.07842981815338135, | |
| "learning_rate": 7.672180148132757e-10, | |
| "loss": 0.311, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 802, | |
| "total_flos": 6.289874027179672e+18, | |
| "train_loss": 0.31329592913760806, | |
| "train_runtime": 9614.2134, | |
| "train_samples_per_second": 8.008, | |
| "train_steps_per_second": 0.083 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 802, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.289874027179672e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |