diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/config.json b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..bbac9faf6fe6f5c31d66b774a6c3e3a506e45fb9 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 7, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.001, + "adam_lr": 0.002, + "base_dir": "logs_qa_muon_gated/diff_mode", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "81b05022-4380-4a27-af4c-a6f032bc1f83", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/fixed_eval_indices.json b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..745498b7da206c2872197b55b00fa4a57f7abfb2 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c28175bcb21d3cfdd241541f33d08aa60a546ab0ca2b6ebbf5d7c355e336bd88 +size 324072 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..4a5436bed8a59086a5be040cb7078d9d8087041d --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e408b6ab111862e2c5f9cd3b3c81883f0ac83b16f85e5e489da48573e0298cf5 +size 415293 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..ee519510eb56148a5efc0e7b4a553d790903aa68 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4c5bd37000d8bb1697b8d450b8385b1df052cd30bf6841652f5a84e059f1f6d +size 93944 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..62e35d03ea3038e7bc2a0f79e716df932ef49a62 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7348955475febf14709b490ad021ad9e107cfc9cb04d0c122f58bf65db7360a3 +size 111308 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/training_log_81b05022-4380-4a27-af4c-a6f032bc1f83.txt b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/training_log_81b05022-4380-4a27-af4c-a6f032bc1f83.txt new file mode 100644 index 0000000000000000000000000000000000000000..4e749cf42a610f4c3b8a1bbb9f7894ef7e86a443 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/training_log_81b05022-4380-4a27-af4c-a6f032bc1f83.txt @@ -0,0 +1,5618 @@ +[2025-09-09 13:02:51] [Rank 0] PRINT: --- Script Start: Tue Sep 9 13:02:51 2025 --- +[2025-09-09 13:02:51] [Rank 0] PRINT: --- Script Start: Tue Sep 9 13:02:51 2025 --- +[2025-09-09 13:02:51] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=7, model_parameterization='gated', per_group_k=100, muon_lr=0.001, adam_lr=0.002, base_dir='logs_qa_muon_gated/diff_mode', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-09 13:02:51] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=7, model_parameterization='gated', per_group_k=100, muon_lr=0.001, adam_lr=0.002, base_dir='logs_qa_muon_gated/diff_mode', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-09 13:02:51] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-09 13:02:51] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-09 13:02:51] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-09 13:02:51] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-09 13:02:51] [Rank 0] PRINT: Run directory: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42 +[2025-09-09 13:02:51] [Rank 0] PRINT: Run directory: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42 +[2025-09-09 13:02:51] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + muon_lr = exp_args.muon_lr + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-09 13:02:51] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + muon_lr = exp_args.muon_lr + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-09 13:02:51] [Rank 0] PRINT: Constructing model... +[2025-09-09 13:02:51] [Rank 0] PRINT: Constructing model... +[2025-09-09 13:02:53] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-09 13:02:53] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-09 13:02:53] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-09 13:02:53] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-09 13:02:53] [Rank 0] PRINT: Testing model forward function: +[2025-09-09 13:02:53] [Rank 0] PRINT: Testing model forward function: +[2025-09-09 13:03:01] [Rank 0] PRINT: Model test - Result type: +[2025-09-09 13:03:01] [Rank 0] PRINT: Model test - Result type: +[2025-09-09 13:03:01] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-09 13:03:01] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-09 13:03:01] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-09 13:03:01] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-09 13:03:01] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-09 13:03:01] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-09 13:03:01] [Rank 0] PRINT: Model returns: +[2025-09-09 13:03:01] [Rank 0] PRINT: Model returns: +[2025-09-09 13:03:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-09 13:03:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-09 13:03:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 7 +[2025-09-09 13:03:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 7 +[2025-09-09 13:03:02] [Rank 0] PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: 0.002). +[2025-09-09 13:03:02] [Rank 0] PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: 0.002). +[2025-09-09 13:03:02] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-09 13:03:02] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-09 13:03:02] [Rank 0] PRINT: Muon optimizer is active with 48 parameters. +[2025-09-09 13:03:02] [Rank 0] PRINT: Muon optimizer is active with 48 parameters. +[2025-09-09 13:03:02] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-09 13:03:02] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-09 13:03:11] [Rank 0] PRINT: Model compilation complete. +[2025-09-09 13:03:11] [Rank 0] PRINT: Model compilation complete. +[2025-09-09 13:03:11] [Rank 0] PRINT: Starting warmup... +[2025-09-09 13:03:11] [Rank 0] PRINT: Starting warmup... +[2025-09-09 13:09:34] [Rank 0] PRINT: Warmup complete. +[2025-09-09 13:09:34] [Rank 0] PRINT: Warmup complete. +[2025-09-09 13:09:34] [Rank 0] PRINT: Starting training... +[2025-09-09 13:09:34] [Rank 0] PRINT: Starting training... +[2025-09-09 13:09:41] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/fixed_eval_indices.json +[2025-09-09 13:09:41] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/fixed_eval_indices.json +[2025-09-09 13:09:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:09:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:12:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-09 13:12:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-09 13:13:27] [Rank 0] step:21/10000 train_time:42398ms step_avg:2018.96ms +[2025-09-09 13:13:27] [Rank 0] step:21/10000 train_time:42398ms step_avg:2018.96ms +[2025-09-09 13:13:28] [Rank 0] step:41/10000 train_time:43215ms step_avg:1054.02ms +[2025-09-09 13:13:28] [Rank 0] step:41/10000 train_time:43215ms step_avg:1054.02ms +[2025-09-09 13:13:29] [Rank 0] step:61/10000 train_time:44030ms step_avg:721.80ms +[2025-09-09 13:13:29] [Rank 0] step:61/10000 train_time:44030ms step_avg:721.80ms +[2025-09-09 13:13:29] [Rank 0] step:81/10000 train_time:44844ms step_avg:553.63ms +[2025-09-09 13:13:29] [Rank 0] step:81/10000 train_time:44844ms step_avg:553.63ms +[2025-09-09 13:13:30] [Rank 0] step:101/10000 train_time:45659ms step_avg:452.07ms +[2025-09-09 13:13:30] [Rank 0] step:101/10000 train_time:45659ms step_avg:452.07ms +[2025-09-09 13:13:31] [Rank 0] step:121/10000 train_time:46474ms step_avg:384.09ms +[2025-09-09 13:13:31] [Rank 0] step:121/10000 train_time:46474ms step_avg:384.09ms +[2025-09-09 13:13:32] [Rank 0] step:141/10000 train_time:47290ms step_avg:335.39ms +[2025-09-09 13:13:32] [Rank 0] step:141/10000 train_time:47290ms step_avg:335.39ms +[2025-09-09 13:13:33] [Rank 0] step:161/10000 train_time:48105ms step_avg:298.79ms +[2025-09-09 13:13:33] [Rank 0] step:161/10000 train_time:48105ms step_avg:298.79ms +[2025-09-09 13:13:34] [Rank 0] step:181/10000 train_time:48920ms step_avg:270.28ms +[2025-09-09 13:13:34] [Rank 0] step:181/10000 train_time:48920ms step_avg:270.28ms +[2025-09-09 13:13:34] [Rank 0] step:201/10000 train_time:49736ms step_avg:247.44ms +[2025-09-09 13:13:34] [Rank 0] step:201/10000 train_time:49736ms step_avg:247.44ms +[2025-09-09 13:13:35] [Rank 0] step:221/10000 train_time:50552ms step_avg:228.74ms +[2025-09-09 13:13:35] [Rank 0] step:221/10000 train_time:50552ms step_avg:228.74ms +[2025-09-09 13:13:36] [Rank 0] step:241/10000 train_time:51368ms step_avg:213.14ms +[2025-09-09 13:13:36] [Rank 0] step:241/10000 train_time:51368ms step_avg:213.14ms +[2025-09-09 13:13:37] [Rank 0] step:261/10000 train_time:52184ms step_avg:199.94ms +[2025-09-09 13:13:37] [Rank 0] step:261/10000 train_time:52184ms step_avg:199.94ms +[2025-09-09 13:13:38] [Rank 0] step:281/10000 train_time:53000ms step_avg:188.61ms +[2025-09-09 13:13:38] [Rank 0] step:281/10000 train_time:53000ms step_avg:188.61ms +[2025-09-09 13:13:38] [Rank 0] step:301/10000 train_time:53815ms step_avg:178.79ms +[2025-09-09 13:13:38] [Rank 0] step:301/10000 train_time:53815ms step_avg:178.79ms +[2025-09-09 13:13:39] [Rank 0] step:321/10000 train_time:54631ms step_avg:170.19ms +[2025-09-09 13:13:39] [Rank 0] step:321/10000 train_time:54631ms step_avg:170.19ms +[2025-09-09 13:13:40] [Rank 0] step:341/10000 train_time:55446ms step_avg:162.60ms +[2025-09-09 13:13:40] [Rank 0] step:341/10000 train_time:55446ms step_avg:162.60ms +[2025-09-09 13:13:41] [Rank 0] step:361/10000 train_time:56261ms step_avg:155.85ms +[2025-09-09 13:13:41] [Rank 0] step:361/10000 train_time:56261ms step_avg:155.85ms +[2025-09-09 13:13:42] [Rank 0] step:381/10000 train_time:57077ms step_avg:149.81ms +[2025-09-09 13:13:42] [Rank 0] step:381/10000 train_time:57077ms step_avg:149.81ms +[2025-09-09 13:13:43] [Rank 0] step:401/10000 train_time:57894ms step_avg:144.37ms +[2025-09-09 13:13:43] [Rank 0] step:401/10000 train_time:57894ms step_avg:144.37ms +[2025-09-09 13:13:43] [Rank 0] step:421/10000 train_time:58710ms step_avg:139.45ms +[2025-09-09 13:13:43] [Rank 0] step:421/10000 train_time:58710ms step_avg:139.45ms +[2025-09-09 13:13:44] [Rank 0] step:441/10000 train_time:59526ms step_avg:134.98ms +[2025-09-09 13:13:44] [Rank 0] step:441/10000 train_time:59526ms step_avg:134.98ms +[2025-09-09 13:13:45] [Rank 0] step:461/10000 train_time:60342ms step_avg:130.89ms +[2025-09-09 13:13:45] [Rank 0] step:461/10000 train_time:60342ms step_avg:130.89ms +[2025-09-09 13:13:46] [Rank 0] step:481/10000 train_time:61159ms step_avg:127.15ms +[2025-09-09 13:13:46] [Rank 0] step:481/10000 train_time:61159ms step_avg:127.15ms +[2025-09-09 13:13:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:13:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:13:47] [Rank 0] PRINT: step:500/10000 train_loss:2.9260 val_loss:1.0558 train_time:61978ms step_avg:123.96ms +[2025-09-09 13:13:47] [Rank 0] PRINT: step:500/10000 train_loss:2.9260 val_loss:1.0558 train_time:61978ms step_avg:123.96ms +[2025-09-09 13:13:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:13:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:13:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:13:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:15:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:15:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:15:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:15:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:15:09] [Rank 0] Total Loss: 3.7311 +[2025-09-09 13:15:09] [Rank 0] Total Loss: 3.7311 +[2025-09-09 13:15:09] [Rank 0] Total FTA (Unweighted): 0.5244 +[2025-09-09 13:15:09] [Rank 0] Total FTA (Unweighted): 0.5244 +[2025-09-09 13:15:09] [Rank 0] Total FTA (Weighted): 0.5244 +[2025-09-09 13:15:09] [Rank 0] Total FTA (Weighted): 0.5244 +[2025-09-09 13:15:09] [Rank 0] Group 0 Loss: 3.2243 +[2025-09-09 13:15:09] [Rank 0] Group 0 Loss: 3.2243 +[2025-09-09 13:15:09] [Rank 0] Group 1 Loss: 3.1185 +[2025-09-09 13:15:09] [Rank 0] Group 1 Loss: 3.1185 +[2025-09-09 13:15:09] [Rank 0] Group 2 Loss: 3.0458 +[2025-09-09 13:15:09] [Rank 0] Group 2 Loss: 3.0458 +[2025-09-09 13:15:09] [Rank 0] Group 3 Loss: 3.2831 +[2025-09-09 13:15:09] [Rank 0] Group 3 Loss: 3.2831 +[2025-09-09 13:15:09] [Rank 0] Group 4 Loss: 3.3483 +[2025-09-09 13:15:09] [Rank 0] Group 4 Loss: 3.3483 +[2025-09-09 13:15:09] [Rank 0] Group 5 Loss: 3.4593 +[2025-09-09 13:15:09] [Rank 0] Group 5 Loss: 3.4593 +[2025-09-09 13:15:09] [Rank 0] Group 6 Loss: 3.4631 +[2025-09-09 13:15:09] [Rank 0] Group 6 Loss: 3.4631 +[2025-09-09 13:15:09] [Rank 0] Group 7 Loss: 3.5470 +[2025-09-09 13:15:09] [Rank 0] Group 7 Loss: 3.5470 +[2025-09-09 13:15:09] [Rank 0] Group 8 Loss: 3.7908 +[2025-09-09 13:15:09] [Rank 0] Group 8 Loss: 3.7908 +[2025-09-09 13:15:09] [Rank 0] Group 9 Loss: 3.8862 +[2025-09-09 13:15:09] [Rank 0] Group 9 Loss: 3.8862 +[2025-09-09 13:15:09] [Rank 0] Group 10 Loss: 4.1027 +[2025-09-09 13:15:09] [Rank 0] Group 10 Loss: 4.1027 +[2025-09-09 13:15:09] [Rank 0] Group 11 Loss: 4.1692 +[2025-09-09 13:15:09] [Rank 0] Group 11 Loss: 4.1692 +[2025-09-09 13:15:09] [Rank 0] Group 12 Loss: 4.2465 +[2025-09-09 13:15:09] [Rank 0] Group 12 Loss: 4.2465 +[2025-09-09 13:15:09] [Rank 0] Group 13 Loss: 4.3114 +[2025-09-09 13:15:09] [Rank 0] Group 13 Loss: 4.3114 +[2025-09-09 13:15:09] [Rank 0] Group 14 Loss: 4.3421 +[2025-09-09 13:15:09] [Rank 0] Group 14 Loss: 4.3421 +[2025-09-09 13:15:09] [Rank 0] Group 15 Loss: 4.3595 +[2025-09-09 13:15:09] [Rank 0] Group 15 Loss: 4.3595 +[2025-09-09 13:15:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:15:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:15:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:15:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:15:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:15:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:15:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:15:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:15:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:15:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:15:09] [Rank 0] Group 5 FTA: 0.8600 +[2025-09-09 13:15:09] [Rank 0] Group 5 FTA: 0.8600 +[2025-09-09 13:15:09] [Rank 0] Group 6 FTA: 0.5500 +[2025-09-09 13:15:09] [Rank 0] Group 6 FTA: 0.5500 +[2025-09-09 13:15:09] [Rank 0] Group 7 FTA: 0.5300 +[2025-09-09 13:15:09] [Rank 0] Group 7 FTA: 0.5300 +[2025-09-09 13:15:09] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-09 13:15:09] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-09 13:15:09] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-09 13:15:09] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-09 13:15:09] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-09 13:15:09] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-09 13:15:09] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-09 13:15:09] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-09 13:15:09] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-09 13:15:09] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-09 13:15:09] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-09 13:15:09] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-09 13:15:09] [Rank 0] Group 14 FTA: 0.0700 +[2025-09-09 13:15:09] [Rank 0] Group 14 FTA: 0.0700 +[2025-09-09 13:15:09] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-09 13:15:09] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-09 13:15:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:15:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:15:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:15:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:15:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:15:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:15:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:15:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:15:10] [Rank 0] step:501/10000 train_time:61996ms step_avg:123.74ms +[2025-09-09 13:15:10] [Rank 0] step:501/10000 train_time:61996ms step_avg:123.74ms +[2025-09-09 13:15:11] [Rank 0] step:521/10000 train_time:62823ms step_avg:120.58ms +[2025-09-09 13:15:11] [Rank 0] step:521/10000 train_time:62823ms step_avg:120.58ms +[2025-09-09 13:15:12] [Rank 0] step:541/10000 train_time:63639ms step_avg:117.63ms +[2025-09-09 13:15:12] [Rank 0] step:541/10000 train_time:63639ms step_avg:117.63ms +[2025-09-09 13:15:13] [Rank 0] step:561/10000 train_time:64708ms step_avg:115.34ms +[2025-09-09 13:15:13] [Rank 0] step:561/10000 train_time:64708ms step_avg:115.34ms +[2025-09-09 13:15:14] [Rank 0] step:581/10000 train_time:65799ms step_avg:113.25ms +[2025-09-09 13:15:14] [Rank 0] step:581/10000 train_time:65799ms step_avg:113.25ms +[2025-09-09 13:15:15] [Rank 0] step:601/10000 train_time:66614ms step_avg:110.84ms +[2025-09-09 13:15:15] [Rank 0] step:601/10000 train_time:66614ms step_avg:110.84ms +[2025-09-09 13:15:16] [Rank 0] step:621/10000 train_time:67429ms step_avg:108.58ms +[2025-09-09 13:15:16] [Rank 0] step:621/10000 train_time:67429ms step_avg:108.58ms +[2025-09-09 13:15:17] [Rank 0] step:641/10000 train_time:68243ms step_avg:106.46ms +[2025-09-09 13:15:17] [Rank 0] step:641/10000 train_time:68243ms step_avg:106.46ms +[2025-09-09 13:15:17] [Rank 0] step:661/10000 train_time:69058ms step_avg:104.48ms +[2025-09-09 13:15:17] [Rank 0] step:661/10000 train_time:69058ms step_avg:104.48ms +[2025-09-09 13:15:18] [Rank 0] step:681/10000 train_time:69873ms step_avg:102.60ms +[2025-09-09 13:15:18] [Rank 0] step:681/10000 train_time:69873ms step_avg:102.60ms +[2025-09-09 13:15:19] [Rank 0] step:701/10000 train_time:70689ms step_avg:100.84ms +[2025-09-09 13:15:19] [Rank 0] step:701/10000 train_time:70689ms step_avg:100.84ms +[2025-09-09 13:15:20] [Rank 0] step:721/10000 train_time:71504ms step_avg:99.17ms +[2025-09-09 13:15:20] [Rank 0] step:721/10000 train_time:71504ms step_avg:99.17ms +[2025-09-09 13:15:21] [Rank 0] step:741/10000 train_time:72318ms step_avg:97.60ms +[2025-09-09 13:15:21] [Rank 0] step:741/10000 train_time:72318ms step_avg:97.60ms +[2025-09-09 13:15:22] [Rank 0] step:761/10000 train_time:73137ms step_avg:96.11ms +[2025-09-09 13:15:22] [Rank 0] step:761/10000 train_time:73137ms step_avg:96.11ms +[2025-09-09 13:15:22] [Rank 0] step:781/10000 train_time:73957ms step_avg:94.69ms +[2025-09-09 13:15:22] [Rank 0] step:781/10000 train_time:73957ms step_avg:94.69ms +[2025-09-09 13:15:23] [Rank 0] step:801/10000 train_time:74776ms step_avg:93.35ms +[2025-09-09 13:15:23] [Rank 0] step:801/10000 train_time:74776ms step_avg:93.35ms +[2025-09-09 13:15:24] [Rank 0] step:821/10000 train_time:75868ms step_avg:92.41ms +[2025-09-09 13:15:24] [Rank 0] step:821/10000 train_time:75868ms step_avg:92.41ms +[2025-09-09 13:15:25] [Rank 0] step:841/10000 train_time:76690ms step_avg:91.19ms +[2025-09-09 13:15:25] [Rank 0] step:841/10000 train_time:76690ms step_avg:91.19ms +[2025-09-09 13:15:26] [Rank 0] step:861/10000 train_time:77508ms step_avg:90.02ms +[2025-09-09 13:15:26] [Rank 0] step:861/10000 train_time:77508ms step_avg:90.02ms +[2025-09-09 13:15:27] [Rank 0] step:881/10000 train_time:78330ms step_avg:88.91ms +[2025-09-09 13:15:27] [Rank 0] step:881/10000 train_time:78330ms step_avg:88.91ms +[2025-09-09 13:15:28] [Rank 0] step:901/10000 train_time:79149ms step_avg:87.85ms +[2025-09-09 13:15:28] [Rank 0] step:901/10000 train_time:79149ms step_avg:87.85ms +[2025-09-09 13:15:28] [Rank 0] step:921/10000 train_time:79969ms step_avg:86.83ms +[2025-09-09 13:15:28] [Rank 0] step:921/10000 train_time:79969ms step_avg:86.83ms +[2025-09-09 13:15:29] [Rank 0] step:941/10000 train_time:80788ms step_avg:85.85ms +[2025-09-09 13:15:29] [Rank 0] step:941/10000 train_time:80788ms step_avg:85.85ms +[2025-09-09 13:15:30] [Rank 0] step:961/10000 train_time:81608ms step_avg:84.92ms +[2025-09-09 13:15:30] [Rank 0] step:961/10000 train_time:81608ms step_avg:84.92ms +[2025-09-09 13:15:31] [Rank 0] step:981/10000 train_time:82428ms step_avg:84.02ms +[2025-09-09 13:15:31] [Rank 0] step:981/10000 train_time:82428ms step_avg:84.02ms +[2025-09-09 13:15:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:15:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:15:32] [Rank 0] PRINT: step:1000/10000 train_loss:0.9243 val_loss:0.8252 train_time:83252ms step_avg:83.25ms +[2025-09-09 13:15:32] [Rank 0] PRINT: step:1000/10000 train_loss:0.9243 val_loss:0.8252 train_time:83252ms step_avg:83.25ms +[2025-09-09 13:15:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:15:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:15:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:15:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:16:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:16:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:16:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:16:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:16:55] [Rank 0] Total Loss: 3.9932 +[2025-09-09 13:16:55] [Rank 0] Total Loss: 3.9932 +[2025-09-09 13:16:55] [Rank 0] Total FTA (Unweighted): 0.7131 +[2025-09-09 13:16:55] [Rank 0] Total FTA (Unweighted): 0.7131 +[2025-09-09 13:16:55] [Rank 0] Total FTA (Weighted): 0.7131 +[2025-09-09 13:16:55] [Rank 0] Total FTA (Weighted): 0.7131 +[2025-09-09 13:16:55] [Rank 0] Group 0 Loss: 3.7328 +[2025-09-09 13:16:55] [Rank 0] Group 0 Loss: 3.7328 +[2025-09-09 13:16:55] [Rank 0] Group 1 Loss: 3.5551 +[2025-09-09 13:16:55] [Rank 0] Group 1 Loss: 3.5551 +[2025-09-09 13:16:55] [Rank 0] Group 2 Loss: 3.4327 +[2025-09-09 13:16:55] [Rank 0] Group 2 Loss: 3.4327 +[2025-09-09 13:16:55] [Rank 0] Group 3 Loss: 3.7960 +[2025-09-09 13:16:55] [Rank 0] Group 3 Loss: 3.7960 +[2025-09-09 13:16:55] [Rank 0] Group 4 Loss: 3.7897 +[2025-09-09 13:16:55] [Rank 0] Group 4 Loss: 3.7897 +[2025-09-09 13:16:55] [Rank 0] Group 5 Loss: 3.8310 +[2025-09-09 13:16:55] [Rank 0] Group 5 Loss: 3.8310 +[2025-09-09 13:16:55] [Rank 0] Group 6 Loss: 3.7497 +[2025-09-09 13:16:55] [Rank 0] Group 6 Loss: 3.7497 +[2025-09-09 13:16:55] [Rank 0] Group 7 Loss: 3.7918 +[2025-09-09 13:16:55] [Rank 0] Group 7 Loss: 3.7918 +[2025-09-09 13:16:55] [Rank 0] Group 8 Loss: 3.9739 +[2025-09-09 13:16:55] [Rank 0] Group 8 Loss: 3.9739 +[2025-09-09 13:16:55] [Rank 0] Group 9 Loss: 3.9593 +[2025-09-09 13:16:55] [Rank 0] Group 9 Loss: 3.9593 +[2025-09-09 13:16:55] [Rank 0] Group 10 Loss: 4.1611 +[2025-09-09 13:16:55] [Rank 0] Group 10 Loss: 4.1611 +[2025-09-09 13:16:55] [Rank 0] Group 11 Loss: 4.2506 +[2025-09-09 13:16:55] [Rank 0] Group 11 Loss: 4.2506 +[2025-09-09 13:16:55] [Rank 0] Group 12 Loss: 4.3389 +[2025-09-09 13:16:55] [Rank 0] Group 12 Loss: 4.3389 +[2025-09-09 13:16:55] [Rank 0] Group 13 Loss: 4.4342 +[2025-09-09 13:16:55] [Rank 0] Group 13 Loss: 4.4342 +[2025-09-09 13:16:55] [Rank 0] Group 14 Loss: 4.4913 +[2025-09-09 13:16:55] [Rank 0] Group 14 Loss: 4.4913 +[2025-09-09 13:16:55] [Rank 0] Group 15 Loss: 4.6031 +[2025-09-09 13:16:55] [Rank 0] Group 15 Loss: 4.6031 +[2025-09-09 13:16:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:16:55] [Rank 0] Group 7 FTA: 0.9600 +[2025-09-09 13:16:55] [Rank 0] Group 7 FTA: 0.9600 +[2025-09-09 13:16:55] [Rank 0] Group 8 FTA: 0.8400 +[2025-09-09 13:16:55] [Rank 0] Group 8 FTA: 0.8400 +[2025-09-09 13:16:55] [Rank 0] Group 9 FTA: 0.6800 +[2025-09-09 13:16:55] [Rank 0] Group 9 FTA: 0.6800 +[2025-09-09 13:16:55] [Rank 0] Group 10 FTA: 0.7100 +[2025-09-09 13:16:55] [Rank 0] Group 10 FTA: 0.7100 +[2025-09-09 13:16:55] [Rank 0] Group 11 FTA: 0.4700 +[2025-09-09 13:16:55] [Rank 0] Group 11 FTA: 0.4700 +[2025-09-09 13:16:55] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-09 13:16:55] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-09 13:16:55] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-09 13:16:55] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-09 13:16:55] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-09 13:16:55] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-09 13:16:55] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-09 13:16:55] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-09 13:16:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:16:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:16:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:16:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:16:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:16:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:16:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:16:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:16:56] [Rank 0] step:1001/10000 train_time:83269ms step_avg:83.19ms +[2025-09-09 13:16:56] [Rank 0] step:1001/10000 train_time:83269ms step_avg:83.19ms +[2025-09-09 13:16:57] [Rank 0] step:1021/10000 train_time:84091ms step_avg:82.36ms +[2025-09-09 13:16:57] [Rank 0] step:1021/10000 train_time:84091ms step_avg:82.36ms +[2025-09-09 13:16:58] [Rank 0] step:1041/10000 train_time:84910ms step_avg:81.57ms +[2025-09-09 13:16:58] [Rank 0] step:1041/10000 train_time:84910ms step_avg:81.57ms +[2025-09-09 13:16:59] [Rank 0] step:1061/10000 train_time:85731ms step_avg:80.80ms +[2025-09-09 13:16:59] [Rank 0] step:1061/10000 train_time:85731ms step_avg:80.80ms +[2025-09-09 13:17:00] [Rank 0] step:1081/10000 train_time:86550ms step_avg:80.07ms +[2025-09-09 13:17:00] [Rank 0] step:1081/10000 train_time:86550ms step_avg:80.07ms +[2025-09-09 13:17:01] [Rank 0] step:1101/10000 train_time:87370ms step_avg:79.36ms +[2025-09-09 13:17:01] [Rank 0] step:1101/10000 train_time:87370ms step_avg:79.36ms +[2025-09-09 13:17:01] [Rank 0] step:1121/10000 train_time:88190ms step_avg:78.67ms +[2025-09-09 13:17:01] [Rank 0] step:1121/10000 train_time:88190ms step_avg:78.67ms +[2025-09-09 13:17:02] [Rank 0] step:1141/10000 train_time:89011ms step_avg:78.01ms +[2025-09-09 13:17:02] [Rank 0] step:1141/10000 train_time:89011ms step_avg:78.01ms +[2025-09-09 13:17:03] [Rank 0] step:1161/10000 train_time:89831ms step_avg:77.37ms +[2025-09-09 13:17:03] [Rank 0] step:1161/10000 train_time:89831ms step_avg:77.37ms +[2025-09-09 13:17:04] [Rank 0] step:1181/10000 train_time:90652ms step_avg:76.76ms +[2025-09-09 13:17:04] [Rank 0] step:1181/10000 train_time:90652ms step_avg:76.76ms +[2025-09-09 13:17:05] [Rank 0] step:1201/10000 train_time:91473ms step_avg:76.16ms +[2025-09-09 13:17:05] [Rank 0] step:1201/10000 train_time:91473ms step_avg:76.16ms +[2025-09-09 13:17:05] [Rank 0] step:1221/10000 train_time:92293ms step_avg:75.59ms +[2025-09-09 13:17:05] [Rank 0] step:1221/10000 train_time:92293ms step_avg:75.59ms +[2025-09-09 13:17:06] [Rank 0] step:1241/10000 train_time:93113ms step_avg:75.03ms +[2025-09-09 13:17:06] [Rank 0] step:1241/10000 train_time:93113ms step_avg:75.03ms +[2025-09-09 13:17:07] [Rank 0] step:1261/10000 train_time:93935ms step_avg:74.49ms +[2025-09-09 13:17:07] [Rank 0] step:1261/10000 train_time:93935ms step_avg:74.49ms +[2025-09-09 13:17:08] [Rank 0] step:1281/10000 train_time:94755ms step_avg:73.97ms +[2025-09-09 13:17:08] [Rank 0] step:1281/10000 train_time:94755ms step_avg:73.97ms +[2025-09-09 13:17:09] [Rank 0] step:1301/10000 train_time:95574ms step_avg:73.46ms +[2025-09-09 13:17:09] [Rank 0] step:1301/10000 train_time:95574ms step_avg:73.46ms +[2025-09-09 13:17:10] [Rank 0] step:1321/10000 train_time:96394ms step_avg:72.97ms +[2025-09-09 13:17:10] [Rank 0] step:1321/10000 train_time:96394ms step_avg:72.97ms +[2025-09-09 13:17:10] [Rank 0] step:1341/10000 train_time:97214ms step_avg:72.49ms +[2025-09-09 13:17:10] [Rank 0] step:1341/10000 train_time:97214ms step_avg:72.49ms +[2025-09-09 13:17:11] [Rank 0] step:1361/10000 train_time:98034ms step_avg:72.03ms +[2025-09-09 13:17:11] [Rank 0] step:1361/10000 train_time:98034ms step_avg:72.03ms +[2025-09-09 13:17:12] [Rank 0] step:1381/10000 train_time:98855ms step_avg:71.58ms +[2025-09-09 13:17:12] [Rank 0] step:1381/10000 train_time:98855ms step_avg:71.58ms +[2025-09-09 13:17:13] [Rank 0] step:1401/10000 train_time:99676ms step_avg:71.15ms +[2025-09-09 13:17:13] [Rank 0] step:1401/10000 train_time:99676ms step_avg:71.15ms +[2025-09-09 13:17:14] [Rank 0] step:1421/10000 train_time:100497ms step_avg:70.72ms +[2025-09-09 13:17:14] [Rank 0] step:1421/10000 train_time:100497ms step_avg:70.72ms +[2025-09-09 13:17:14] [Rank 0] step:1441/10000 train_time:101317ms step_avg:70.31ms +[2025-09-09 13:17:14] [Rank 0] step:1441/10000 train_time:101317ms step_avg:70.31ms +[2025-09-09 13:17:15] [Rank 0] step:1461/10000 train_time:102137ms step_avg:69.91ms +[2025-09-09 13:17:15] [Rank 0] step:1461/10000 train_time:102137ms step_avg:69.91ms +[2025-09-09 13:17:16] [Rank 0] step:1481/10000 train_time:102958ms step_avg:69.52ms +[2025-09-09 13:17:16] [Rank 0] step:1481/10000 train_time:102958ms step_avg:69.52ms +[2025-09-09 13:17:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:17:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:17:17] [Rank 0] PRINT: step:1500/10000 train_loss:0.7938 val_loss:0.7484 train_time:103780ms step_avg:69.19ms +[2025-09-09 13:17:17] [Rank 0] PRINT: step:1500/10000 train_loss:0.7938 val_loss:0.7484 train_time:103780ms step_avg:69.19ms +[2025-09-09 13:17:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:17:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:17:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:17:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:18:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:18:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:18:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:18:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:18:39] [Rank 0] Total Loss: 4.2748 +[2025-09-09 13:18:39] [Rank 0] Total Loss: 4.2748 +[2025-09-09 13:18:39] [Rank 0] Total FTA (Unweighted): 0.7950 +[2025-09-09 13:18:39] [Rank 0] Total FTA (Unweighted): 0.7950 +[2025-09-09 13:18:39] [Rank 0] Total FTA (Weighted): 0.7950 +[2025-09-09 13:18:39] [Rank 0] Total FTA (Weighted): 0.7950 +[2025-09-09 13:18:39] [Rank 0] Group 0 Loss: 4.0856 +[2025-09-09 13:18:39] [Rank 0] Group 0 Loss: 4.0856 +[2025-09-09 13:18:39] [Rank 0] Group 1 Loss: 3.9430 +[2025-09-09 13:18:39] [Rank 0] Group 1 Loss: 3.9430 +[2025-09-09 13:18:39] [Rank 0] Group 2 Loss: 3.7838 +[2025-09-09 13:18:39] [Rank 0] Group 2 Loss: 3.7838 +[2025-09-09 13:18:39] [Rank 0] Group 3 Loss: 4.1151 +[2025-09-09 13:18:39] [Rank 0] Group 3 Loss: 4.1151 +[2025-09-09 13:18:39] [Rank 0] Group 4 Loss: 4.1129 +[2025-09-09 13:18:39] [Rank 0] Group 4 Loss: 4.1129 +[2025-09-09 13:18:39] [Rank 0] Group 5 Loss: 4.1464 +[2025-09-09 13:18:39] [Rank 0] Group 5 Loss: 4.1464 +[2025-09-09 13:18:39] [Rank 0] Group 6 Loss: 4.0761 +[2025-09-09 13:18:39] [Rank 0] Group 6 Loss: 4.0761 +[2025-09-09 13:18:39] [Rank 0] Group 7 Loss: 4.1354 +[2025-09-09 13:18:39] [Rank 0] Group 7 Loss: 4.1354 +[2025-09-09 13:18:39] [Rank 0] Group 8 Loss: 4.2710 +[2025-09-09 13:18:39] [Rank 0] Group 8 Loss: 4.2710 +[2025-09-09 13:18:39] [Rank 0] Group 9 Loss: 4.2285 +[2025-09-09 13:18:39] [Rank 0] Group 9 Loss: 4.2285 +[2025-09-09 13:18:39] [Rank 0] Group 10 Loss: 4.3902 +[2025-09-09 13:18:39] [Rank 0] Group 10 Loss: 4.3902 +[2025-09-09 13:18:39] [Rank 0] Group 11 Loss: 4.4530 +[2025-09-09 13:18:39] [Rank 0] Group 11 Loss: 4.4530 +[2025-09-09 13:18:39] [Rank 0] Group 12 Loss: 4.5132 +[2025-09-09 13:18:39] [Rank 0] Group 12 Loss: 4.5132 +[2025-09-09 13:18:39] [Rank 0] Group 13 Loss: 4.6335 +[2025-09-09 13:18:39] [Rank 0] Group 13 Loss: 4.6335 +[2025-09-09 13:18:39] [Rank 0] Group 14 Loss: 4.7129 +[2025-09-09 13:18:39] [Rank 0] Group 14 Loss: 4.7129 +[2025-09-09 13:18:39] [Rank 0] Group 15 Loss: 4.7966 +[2025-09-09 13:18:39] [Rank 0] Group 15 Loss: 4.7966 +[2025-09-09 13:18:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:18:39] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-09 13:18:39] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-09 13:18:39] [Rank 0] Group 9 FTA: 0.8700 +[2025-09-09 13:18:39] [Rank 0] Group 9 FTA: 0.8700 +[2025-09-09 13:18:39] [Rank 0] Group 10 FTA: 0.9200 +[2025-09-09 13:18:39] [Rank 0] Group 10 FTA: 0.9200 +[2025-09-09 13:18:39] [Rank 0] Group 11 FTA: 0.7700 +[2025-09-09 13:18:39] [Rank 0] Group 11 FTA: 0.7700 +[2025-09-09 13:18:39] [Rank 0] Group 12 FTA: 0.6200 +[2025-09-09 13:18:39] [Rank 0] Group 12 FTA: 0.6200 +[2025-09-09 13:18:39] [Rank 0] Group 13 FTA: 0.3000 +[2025-09-09 13:18:39] [Rank 0] Group 13 FTA: 0.3000 +[2025-09-09 13:18:39] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-09 13:18:39] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-09 13:18:39] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-09 13:18:39] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-09 13:18:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:18:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:18:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:18:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:18:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:18:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:18:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:18:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:18:41] [Rank 0] step:1501/10000 train_time:103796ms step_avg:69.15ms +[2025-09-09 13:18:41] [Rank 0] step:1501/10000 train_time:103796ms step_avg:69.15ms +[2025-09-09 13:18:41] [Rank 0] step:1521/10000 train_time:104615ms step_avg:68.78ms +[2025-09-09 13:18:41] [Rank 0] step:1521/10000 train_time:104615ms step_avg:68.78ms +[2025-09-09 13:18:42] [Rank 0] step:1541/10000 train_time:105435ms step_avg:68.42ms +[2025-09-09 13:18:42] [Rank 0] step:1541/10000 train_time:105435ms step_avg:68.42ms +[2025-09-09 13:18:43] [Rank 0] step:1561/10000 train_time:106256ms step_avg:68.07ms +[2025-09-09 13:18:43] [Rank 0] step:1561/10000 train_time:106256ms step_avg:68.07ms +[2025-09-09 13:18:44] [Rank 0] step:1581/10000 train_time:107076ms step_avg:67.73ms +[2025-09-09 13:18:44] [Rank 0] step:1581/10000 train_time:107076ms step_avg:67.73ms +[2025-09-09 13:18:45] [Rank 0] step:1601/10000 train_time:107897ms step_avg:67.39ms +[2025-09-09 13:18:45] [Rank 0] step:1601/10000 train_time:107897ms step_avg:67.39ms +[2025-09-09 13:18:46] [Rank 0] step:1621/10000 train_time:108719ms step_avg:67.07ms +[2025-09-09 13:18:46] [Rank 0] step:1621/10000 train_time:108719ms step_avg:67.07ms +[2025-09-09 13:18:47] [Rank 0] step:1641/10000 train_time:109809ms step_avg:66.92ms +[2025-09-09 13:18:47] [Rank 0] step:1641/10000 train_time:109809ms step_avg:66.92ms +[2025-09-09 13:18:47] [Rank 0] step:1661/10000 train_time:110630ms step_avg:66.60ms +[2025-09-09 13:18:47] [Rank 0] step:1661/10000 train_time:110630ms step_avg:66.60ms +[2025-09-09 13:18:48] [Rank 0] step:1681/10000 train_time:111450ms step_avg:66.30ms +[2025-09-09 13:18:48] [Rank 0] step:1681/10000 train_time:111450ms step_avg:66.30ms +[2025-09-09 13:18:49] [Rank 0] step:1701/10000 train_time:112271ms step_avg:66.00ms +[2025-09-09 13:18:49] [Rank 0] step:1701/10000 train_time:112271ms step_avg:66.00ms +[2025-09-09 13:18:50] [Rank 0] step:1721/10000 train_time:113091ms step_avg:65.71ms +[2025-09-09 13:18:50] [Rank 0] step:1721/10000 train_time:113091ms step_avg:65.71ms +[2025-09-09 13:18:51] [Rank 0] step:1741/10000 train_time:113914ms step_avg:65.43ms +[2025-09-09 13:18:51] [Rank 0] step:1741/10000 train_time:113914ms step_avg:65.43ms +[2025-09-09 13:18:52] [Rank 0] step:1761/10000 train_time:114733ms step_avg:65.15ms +[2025-09-09 13:18:52] [Rank 0] step:1761/10000 train_time:114733ms step_avg:65.15ms +[2025-09-09 13:18:52] [Rank 0] step:1781/10000 train_time:115553ms step_avg:64.88ms +[2025-09-09 13:18:52] [Rank 0] step:1781/10000 train_time:115553ms step_avg:64.88ms +[2025-09-09 13:18:53] [Rank 0] step:1801/10000 train_time:116373ms step_avg:64.62ms +[2025-09-09 13:18:53] [Rank 0] step:1801/10000 train_time:116373ms step_avg:64.62ms +[2025-09-09 13:18:54] [Rank 0] step:1821/10000 train_time:117192ms step_avg:64.36ms +[2025-09-09 13:18:54] [Rank 0] step:1821/10000 train_time:117192ms step_avg:64.36ms +[2025-09-09 13:18:55] [Rank 0] step:1841/10000 train_time:118012ms step_avg:64.10ms +[2025-09-09 13:18:55] [Rank 0] step:1841/10000 train_time:118012ms step_avg:64.10ms +[2025-09-09 13:18:56] [Rank 0] step:1861/10000 train_time:118833ms step_avg:63.85ms +[2025-09-09 13:18:56] [Rank 0] step:1861/10000 train_time:118833ms step_avg:63.85ms +[2025-09-09 13:18:56] [Rank 0] step:1881/10000 train_time:119653ms step_avg:63.61ms +[2025-09-09 13:18:56] [Rank 0] step:1881/10000 train_time:119653ms step_avg:63.61ms +[2025-09-09 13:18:57] [Rank 0] step:1901/10000 train_time:120473ms step_avg:63.37ms +[2025-09-09 13:18:57] [Rank 0] step:1901/10000 train_time:120473ms step_avg:63.37ms +[2025-09-09 13:18:58] [Rank 0] step:1921/10000 train_time:121294ms step_avg:63.14ms +[2025-09-09 13:18:58] [Rank 0] step:1921/10000 train_time:121294ms step_avg:63.14ms +[2025-09-09 13:18:59] [Rank 0] step:1941/10000 train_time:122115ms step_avg:62.91ms +[2025-09-09 13:18:59] [Rank 0] step:1941/10000 train_time:122115ms step_avg:62.91ms +[2025-09-09 13:19:00] [Rank 0] step:1961/10000 train_time:122936ms step_avg:62.69ms +[2025-09-09 13:19:00] [Rank 0] step:1961/10000 train_time:122936ms step_avg:62.69ms +[2025-09-09 13:19:01] [Rank 0] step:1981/10000 train_time:123766ms step_avg:62.48ms +[2025-09-09 13:19:01] [Rank 0] step:1981/10000 train_time:123766ms step_avg:62.48ms +[2025-09-09 13:19:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:19:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:19:02] [Rank 0] PRINT: step:2000/10000 train_loss:0.7406 val_loss:0.7086 train_time:124590ms step_avg:62.30ms +[2025-09-09 13:19:02] [Rank 0] PRINT: step:2000/10000 train_loss:0.7406 val_loss:0.7086 train_time:124590ms step_avg:62.30ms +[2025-09-09 13:19:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:19:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:19:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:19:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:20:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:20:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:20:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:20:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:20:23] [Rank 0] Total Loss: 4.4459 +[2025-09-09 13:20:23] [Rank 0] Total Loss: 4.4459 +[2025-09-09 13:20:23] [Rank 0] Total FTA (Unweighted): 0.8394 +[2025-09-09 13:20:23] [Rank 0] Total FTA (Unweighted): 0.8394 +[2025-09-09 13:20:23] [Rank 0] Total FTA (Weighted): 0.8394 +[2025-09-09 13:20:23] [Rank 0] Total FTA (Weighted): 0.8394 +[2025-09-09 13:20:23] [Rank 0] Group 0 Loss: 4.3363 +[2025-09-09 13:20:23] [Rank 0] Group 0 Loss: 4.3363 +[2025-09-09 13:20:23] [Rank 0] Group 1 Loss: 4.1684 +[2025-09-09 13:20:23] [Rank 0] Group 1 Loss: 4.1684 +[2025-09-09 13:20:23] [Rank 0] Group 2 Loss: 3.9554 +[2025-09-09 13:20:23] [Rank 0] Group 2 Loss: 3.9554 +[2025-09-09 13:20:23] [Rank 0] Group 3 Loss: 4.3310 +[2025-09-09 13:20:23] [Rank 0] Group 3 Loss: 4.3310 +[2025-09-09 13:20:23] [Rank 0] Group 4 Loss: 4.3196 +[2025-09-09 13:20:23] [Rank 0] Group 4 Loss: 4.3196 +[2025-09-09 13:20:23] [Rank 0] Group 5 Loss: 4.3370 +[2025-09-09 13:20:23] [Rank 0] Group 5 Loss: 4.3370 +[2025-09-09 13:20:23] [Rank 0] Group 6 Loss: 4.2683 +[2025-09-09 13:20:23] [Rank 0] Group 6 Loss: 4.2683 +[2025-09-09 13:20:23] [Rank 0] Group 7 Loss: 4.3068 +[2025-09-09 13:20:23] [Rank 0] Group 7 Loss: 4.3068 +[2025-09-09 13:20:23] [Rank 0] Group 8 Loss: 4.4744 +[2025-09-09 13:20:23] [Rank 0] Group 8 Loss: 4.4744 +[2025-09-09 13:20:23] [Rank 0] Group 9 Loss: 4.4293 +[2025-09-09 13:20:23] [Rank 0] Group 9 Loss: 4.4293 +[2025-09-09 13:20:23] [Rank 0] Group 10 Loss: 4.5538 +[2025-09-09 13:20:23] [Rank 0] Group 10 Loss: 4.5538 +[2025-09-09 13:20:23] [Rank 0] Group 11 Loss: 4.5971 +[2025-09-09 13:20:23] [Rank 0] Group 11 Loss: 4.5971 +[2025-09-09 13:20:23] [Rank 0] Group 12 Loss: 4.6104 +[2025-09-09 13:20:23] [Rank 0] Group 12 Loss: 4.6104 +[2025-09-09 13:20:23] [Rank 0] Group 13 Loss: 4.7604 +[2025-09-09 13:20:23] [Rank 0] Group 13 Loss: 4.7604 +[2025-09-09 13:20:23] [Rank 0] Group 14 Loss: 4.7760 +[2025-09-09 13:20:23] [Rank 0] Group 14 Loss: 4.7760 +[2025-09-09 13:20:23] [Rank 0] Group 15 Loss: 4.9106 +[2025-09-09 13:20:23] [Rank 0] Group 15 Loss: 4.9106 +[2025-09-09 13:20:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:20:23] [Rank 0] Group 9 FTA: 0.9600 +[2025-09-09 13:20:23] [Rank 0] Group 9 FTA: 0.9600 +[2025-09-09 13:20:23] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-09 13:20:23] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-09 13:20:23] [Rank 0] Group 11 FTA: 0.8900 +[2025-09-09 13:20:23] [Rank 0] Group 11 FTA: 0.8900 +[2025-09-09 13:20:23] [Rank 0] Group 12 FTA: 0.7800 +[2025-09-09 13:20:23] [Rank 0] Group 12 FTA: 0.7800 +[2025-09-09 13:20:23] [Rank 0] Group 13 FTA: 0.4400 +[2025-09-09 13:20:23] [Rank 0] Group 13 FTA: 0.4400 +[2025-09-09 13:20:23] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-09 13:20:23] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-09 13:20:23] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-09 13:20:23] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-09 13:20:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:20:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:20:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:20:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:20:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:20:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:20:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:20:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:20:25] [Rank 0] step:2001/10000 train_time:124607ms step_avg:62.27ms +[2025-09-09 13:20:25] [Rank 0] step:2001/10000 train_time:124607ms step_avg:62.27ms +[2025-09-09 13:20:26] [Rank 0] step:2021/10000 train_time:126112ms step_avg:62.40ms +[2025-09-09 13:20:26] [Rank 0] step:2021/10000 train_time:126112ms step_avg:62.40ms +[2025-09-09 13:20:27] [Rank 0] step:2041/10000 train_time:126930ms step_avg:62.19ms +[2025-09-09 13:20:27] [Rank 0] step:2041/10000 train_time:126930ms step_avg:62.19ms +[2025-09-09 13:20:28] [Rank 0] step:2061/10000 train_time:127751ms step_avg:61.98ms +[2025-09-09 13:20:28] [Rank 0] step:2061/10000 train_time:127751ms step_avg:61.98ms +[2025-09-09 13:20:29] [Rank 0] step:2081/10000 train_time:128704ms step_avg:61.85ms +[2025-09-09 13:20:29] [Rank 0] step:2081/10000 train_time:128704ms step_avg:61.85ms +[2025-09-09 13:20:30] [Rank 0] step:2101/10000 train_time:129859ms step_avg:61.81ms +[2025-09-09 13:20:30] [Rank 0] step:2101/10000 train_time:129859ms step_avg:61.81ms +[2025-09-09 13:20:31] [Rank 0] step:2121/10000 train_time:130680ms step_avg:61.61ms +[2025-09-09 13:20:31] [Rank 0] step:2121/10000 train_time:130680ms step_avg:61.61ms +[2025-09-09 13:20:32] [Rank 0] step:2141/10000 train_time:131502ms step_avg:61.42ms +[2025-09-09 13:20:32] [Rank 0] step:2141/10000 train_time:131502ms step_avg:61.42ms +[2025-09-09 13:20:33] [Rank 0] step:2161/10000 train_time:132324ms step_avg:61.23ms +[2025-09-09 13:20:33] [Rank 0] step:2161/10000 train_time:132324ms step_avg:61.23ms +[2025-09-09 13:20:33] [Rank 0] step:2181/10000 train_time:133145ms step_avg:61.05ms +[2025-09-09 13:20:33] [Rank 0] step:2181/10000 train_time:133145ms step_avg:61.05ms +[2025-09-09 13:20:34] [Rank 0] step:2201/10000 train_time:133963ms step_avg:60.86ms +[2025-09-09 13:20:34] [Rank 0] step:2201/10000 train_time:133963ms step_avg:60.86ms +[2025-09-09 13:20:35] [Rank 0] step:2221/10000 train_time:134784ms step_avg:60.69ms +[2025-09-09 13:20:35] [Rank 0] step:2221/10000 train_time:134784ms step_avg:60.69ms +[2025-09-09 13:20:36] [Rank 0] step:2241/10000 train_time:135611ms step_avg:60.51ms +[2025-09-09 13:20:36] [Rank 0] step:2241/10000 train_time:135611ms step_avg:60.51ms +[2025-09-09 13:20:37] [Rank 0] step:2261/10000 train_time:136438ms step_avg:60.34ms +[2025-09-09 13:20:37] [Rank 0] step:2261/10000 train_time:136438ms step_avg:60.34ms +[2025-09-09 13:20:38] [Rank 0] step:2281/10000 train_time:137264ms step_avg:60.18ms +[2025-09-09 13:20:38] [Rank 0] step:2281/10000 train_time:137264ms step_avg:60.18ms +[2025-09-09 13:20:38] [Rank 0] step:2301/10000 train_time:138091ms step_avg:60.01ms +[2025-09-09 13:20:38] [Rank 0] step:2301/10000 train_time:138091ms step_avg:60.01ms +[2025-09-09 13:20:39] [Rank 0] step:2321/10000 train_time:138919ms step_avg:59.85ms +[2025-09-09 13:20:39] [Rank 0] step:2321/10000 train_time:138919ms step_avg:59.85ms +[2025-09-09 13:20:40] [Rank 0] step:2341/10000 train_time:139746ms step_avg:59.69ms +[2025-09-09 13:20:40] [Rank 0] step:2341/10000 train_time:139746ms step_avg:59.69ms +[2025-09-09 13:20:41] [Rank 0] step:2361/10000 train_time:140573ms step_avg:59.54ms +[2025-09-09 13:20:41] [Rank 0] step:2361/10000 train_time:140573ms step_avg:59.54ms +[2025-09-09 13:20:42] [Rank 0] step:2381/10000 train_time:141402ms step_avg:59.39ms +[2025-09-09 13:20:42] [Rank 0] step:2381/10000 train_time:141402ms step_avg:59.39ms +[2025-09-09 13:20:42] [Rank 0] step:2401/10000 train_time:142227ms step_avg:59.24ms +[2025-09-09 13:20:42] [Rank 0] step:2401/10000 train_time:142227ms step_avg:59.24ms +[2025-09-09 13:20:43] [Rank 0] step:2421/10000 train_time:143054ms step_avg:59.09ms +[2025-09-09 13:20:43] [Rank 0] step:2421/10000 train_time:143054ms step_avg:59.09ms +[2025-09-09 13:20:44] [Rank 0] step:2441/10000 train_time:143881ms step_avg:58.94ms +[2025-09-09 13:20:44] [Rank 0] step:2441/10000 train_time:143881ms step_avg:58.94ms +[2025-09-09 13:20:45] [Rank 0] step:2461/10000 train_time:144709ms step_avg:58.80ms +[2025-09-09 13:20:45] [Rank 0] step:2461/10000 train_time:144709ms step_avg:58.80ms +[2025-09-09 13:20:46] [Rank 0] step:2481/10000 train_time:145536ms step_avg:58.66ms +[2025-09-09 13:20:46] [Rank 0] step:2481/10000 train_time:145536ms step_avg:58.66ms +[2025-09-09 13:20:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:20:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:20:47] [Rank 0] PRINT: step:2500/10000 train_loss:0.7097 val_loss:0.6828 train_time:146367ms step_avg:58.55ms +[2025-09-09 13:20:47] [Rank 0] PRINT: step:2500/10000 train_loss:0.7097 val_loss:0.6828 train_time:146367ms step_avg:58.55ms +[2025-09-09 13:20:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:20:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:20:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:20:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:22:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:22:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:22:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:22:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:22:09] [Rank 0] Total Loss: 4.5237 +[2025-09-09 13:22:09] [Rank 0] Total Loss: 4.5237 +[2025-09-09 13:22:09] [Rank 0] Total FTA (Unweighted): 0.8687 +[2025-09-09 13:22:09] [Rank 0] Total FTA (Unweighted): 0.8687 +[2025-09-09 13:22:09] [Rank 0] Total FTA (Weighted): 0.8688 +[2025-09-09 13:22:09] [Rank 0] Total FTA (Weighted): 0.8688 +[2025-09-09 13:22:09] [Rank 0] Group 0 Loss: 4.3726 +[2025-09-09 13:22:09] [Rank 0] Group 0 Loss: 4.3726 +[2025-09-09 13:22:09] [Rank 0] Group 1 Loss: 4.2814 +[2025-09-09 13:22:09] [Rank 0] Group 1 Loss: 4.2814 +[2025-09-09 13:22:09] [Rank 0] Group 2 Loss: 4.0886 +[2025-09-09 13:22:09] [Rank 0] Group 2 Loss: 4.0886 +[2025-09-09 13:22:09] [Rank 0] Group 3 Loss: 4.4380 +[2025-09-09 13:22:09] [Rank 0] Group 3 Loss: 4.4380 +[2025-09-09 13:22:09] [Rank 0] Group 4 Loss: 4.3976 +[2025-09-09 13:22:09] [Rank 0] Group 4 Loss: 4.3976 +[2025-09-09 13:22:09] [Rank 0] Group 5 Loss: 4.4289 +[2025-09-09 13:22:09] [Rank 0] Group 5 Loss: 4.4289 +[2025-09-09 13:22:09] [Rank 0] Group 6 Loss: 4.3600 +[2025-09-09 13:22:09] [Rank 0] Group 6 Loss: 4.3600 +[2025-09-09 13:22:09] [Rank 0] Group 7 Loss: 4.3831 +[2025-09-09 13:22:09] [Rank 0] Group 7 Loss: 4.3831 +[2025-09-09 13:22:09] [Rank 0] Group 8 Loss: 4.5859 +[2025-09-09 13:22:09] [Rank 0] Group 8 Loss: 4.5859 +[2025-09-09 13:22:09] [Rank 0] Group 9 Loss: 4.5002 +[2025-09-09 13:22:09] [Rank 0] Group 9 Loss: 4.5002 +[2025-09-09 13:22:09] [Rank 0] Group 10 Loss: 4.6733 +[2025-09-09 13:22:09] [Rank 0] Group 10 Loss: 4.6733 +[2025-09-09 13:22:09] [Rank 0] Group 11 Loss: 4.6675 +[2025-09-09 13:22:09] [Rank 0] Group 11 Loss: 4.6675 +[2025-09-09 13:22:09] [Rank 0] Group 12 Loss: 4.6693 +[2025-09-09 13:22:09] [Rank 0] Group 12 Loss: 4.6693 +[2025-09-09 13:22:09] [Rank 0] Group 13 Loss: 4.7918 +[2025-09-09 13:22:09] [Rank 0] Group 13 Loss: 4.7918 +[2025-09-09 13:22:09] [Rank 0] Group 14 Loss: 4.8348 +[2025-09-09 13:22:09] [Rank 0] Group 14 Loss: 4.8348 +[2025-09-09 13:22:09] [Rank 0] Group 15 Loss: 4.9067 +[2025-09-09 13:22:09] [Rank 0] Group 15 Loss: 4.9067 +[2025-09-09 13:22:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:22:09] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-09 13:22:09] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-09 13:22:09] [Rank 0] Group 11 FTA: 0.9500 +[2025-09-09 13:22:09] [Rank 0] Group 11 FTA: 0.9500 +[2025-09-09 13:22:09] [Rank 0] Group 12 FTA: 0.8600 +[2025-09-09 13:22:09] [Rank 0] Group 12 FTA: 0.8600 +[2025-09-09 13:22:09] [Rank 0] Group 13 FTA: 0.6400 +[2025-09-09 13:22:09] [Rank 0] Group 13 FTA: 0.6400 +[2025-09-09 13:22:09] [Rank 0] Group 14 FTA: 0.2900 +[2025-09-09 13:22:09] [Rank 0] Group 14 FTA: 0.2900 +[2025-09-09 13:22:09] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-09 13:22:09] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-09 13:22:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:22:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:22:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:22:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:22:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:22:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:22:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:22:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:22:11] [Rank 0] step:2501/10000 train_time:146384ms step_avg:58.53ms +[2025-09-09 13:22:11] [Rank 0] step:2501/10000 train_time:146384ms step_avg:58.53ms +[2025-09-09 13:22:12] [Rank 0] step:2521/10000 train_time:147206ms step_avg:58.39ms +[2025-09-09 13:22:12] [Rank 0] step:2521/10000 train_time:147206ms step_avg:58.39ms +[2025-09-09 13:22:13] [Rank 0] step:2541/10000 train_time:148032ms step_avg:58.26ms +[2025-09-09 13:22:13] [Rank 0] step:2541/10000 train_time:148032ms step_avg:58.26ms +[2025-09-09 13:22:13] [Rank 0] step:2561/10000 train_time:148859ms step_avg:58.13ms +[2025-09-09 13:22:13] [Rank 0] step:2561/10000 train_time:148859ms step_avg:58.13ms +[2025-09-09 13:22:14] [Rank 0] step:2581/10000 train_time:149687ms step_avg:58.00ms +[2025-09-09 13:22:14] [Rank 0] step:2581/10000 train_time:149687ms step_avg:58.00ms +[2025-09-09 13:22:15] [Rank 0] step:2601/10000 train_time:150516ms step_avg:57.87ms +[2025-09-09 13:22:15] [Rank 0] step:2601/10000 train_time:150516ms step_avg:57.87ms +[2025-09-09 13:22:16] [Rank 0] step:2621/10000 train_time:151340ms step_avg:57.74ms +[2025-09-09 13:22:16] [Rank 0] step:2621/10000 train_time:151340ms step_avg:57.74ms +[2025-09-09 13:22:17] [Rank 0] step:2641/10000 train_time:152167ms step_avg:57.62ms +[2025-09-09 13:22:17] [Rank 0] step:2641/10000 train_time:152167ms step_avg:57.62ms +[2025-09-09 13:22:17] [Rank 0] step:2661/10000 train_time:152994ms step_avg:57.49ms +[2025-09-09 13:22:17] [Rank 0] step:2661/10000 train_time:152994ms step_avg:57.49ms +[2025-09-09 13:22:18] [Rank 0] step:2681/10000 train_time:153822ms step_avg:57.37ms +[2025-09-09 13:22:18] [Rank 0] step:2681/10000 train_time:153822ms step_avg:57.37ms +[2025-09-09 13:22:19] [Rank 0] step:2701/10000 train_time:154650ms step_avg:57.26ms +[2025-09-09 13:22:19] [Rank 0] step:2701/10000 train_time:154650ms step_avg:57.26ms +[2025-09-09 13:22:20] [Rank 0] step:2721/10000 train_time:155476ms step_avg:57.14ms +[2025-09-09 13:22:20] [Rank 0] step:2721/10000 train_time:155476ms step_avg:57.14ms +[2025-09-09 13:22:21] [Rank 0] step:2741/10000 train_time:156303ms step_avg:57.02ms +[2025-09-09 13:22:21] [Rank 0] step:2741/10000 train_time:156303ms step_avg:57.02ms +[2025-09-09 13:22:22] [Rank 0] step:2761/10000 train_time:157128ms step_avg:56.91ms +[2025-09-09 13:22:22] [Rank 0] step:2761/10000 train_time:157128ms step_avg:56.91ms +[2025-09-09 13:22:22] [Rank 0] step:2781/10000 train_time:157954ms step_avg:56.80ms +[2025-09-09 13:22:22] [Rank 0] step:2781/10000 train_time:157954ms step_avg:56.80ms +[2025-09-09 13:22:23] [Rank 0] step:2801/10000 train_time:158784ms step_avg:56.69ms +[2025-09-09 13:22:23] [Rank 0] step:2801/10000 train_time:158784ms step_avg:56.69ms +[2025-09-09 13:22:25] [Rank 0] step:2821/10000 train_time:160286ms step_avg:56.82ms +[2025-09-09 13:22:25] [Rank 0] step:2821/10000 train_time:160286ms step_avg:56.82ms +[2025-09-09 13:22:26] [Rank 0] step:2841/10000 train_time:161114ms step_avg:56.71ms +[2025-09-09 13:22:26] [Rank 0] step:2841/10000 train_time:161114ms step_avg:56.71ms +[2025-09-09 13:22:26] [Rank 0] step:2861/10000 train_time:161941ms step_avg:56.60ms +[2025-09-09 13:22:26] [Rank 0] step:2861/10000 train_time:161941ms step_avg:56.60ms +[2025-09-09 13:22:27] [Rank 0] step:2881/10000 train_time:162767ms step_avg:56.50ms +[2025-09-09 13:22:27] [Rank 0] step:2881/10000 train_time:162767ms step_avg:56.50ms +[2025-09-09 13:22:28] [Rank 0] step:2901/10000 train_time:163594ms step_avg:56.39ms +[2025-09-09 13:22:28] [Rank 0] step:2901/10000 train_time:163594ms step_avg:56.39ms +[2025-09-09 13:22:29] [Rank 0] step:2921/10000 train_time:164422ms step_avg:56.29ms +[2025-09-09 13:22:29] [Rank 0] step:2921/10000 train_time:164422ms step_avg:56.29ms +[2025-09-09 13:22:30] [Rank 0] step:2941/10000 train_time:165248ms step_avg:56.19ms +[2025-09-09 13:22:30] [Rank 0] step:2941/10000 train_time:165248ms step_avg:56.19ms +[2025-09-09 13:22:31] [Rank 0] step:2961/10000 train_time:166074ms step_avg:56.09ms +[2025-09-09 13:22:31] [Rank 0] step:2961/10000 train_time:166074ms step_avg:56.09ms +[2025-09-09 13:22:31] [Rank 0] step:2981/10000 train_time:166900ms step_avg:55.99ms +[2025-09-09 13:22:31] [Rank 0] step:2981/10000 train_time:166900ms step_avg:55.99ms +[2025-09-09 13:22:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:22:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:22:33] [Rank 0] PRINT: step:3000/10000 train_loss:0.6879 val_loss:0.6665 train_time:167729ms step_avg:55.91ms +[2025-09-09 13:22:33] [Rank 0] PRINT: step:3000/10000 train_loss:0.6879 val_loss:0.6665 train_time:167729ms step_avg:55.91ms +[2025-09-09 13:22:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:22:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:22:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:22:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:23:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:23:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:23:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:23:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:23:54] [Rank 0] Total Loss: 4.6241 +[2025-09-09 13:23:54] [Rank 0] Total Loss: 4.6241 +[2025-09-09 13:23:54] [Rank 0] Total FTA (Unweighted): 0.8894 +[2025-09-09 13:23:54] [Rank 0] Total FTA (Unweighted): 0.8894 +[2025-09-09 13:23:54] [Rank 0] Total FTA (Weighted): 0.8894 +[2025-09-09 13:23:54] [Rank 0] Total FTA (Weighted): 0.8894 +[2025-09-09 13:23:54] [Rank 0] Group 0 Loss: 4.4789 +[2025-09-09 13:23:54] [Rank 0] Group 0 Loss: 4.4789 +[2025-09-09 13:23:54] [Rank 0] Group 1 Loss: 4.3314 +[2025-09-09 13:23:54] [Rank 0] Group 1 Loss: 4.3314 +[2025-09-09 13:23:54] [Rank 0] Group 2 Loss: 4.1590 +[2025-09-09 13:23:54] [Rank 0] Group 2 Loss: 4.1590 +[2025-09-09 13:23:54] [Rank 0] Group 3 Loss: 4.5315 +[2025-09-09 13:23:54] [Rank 0] Group 3 Loss: 4.5315 +[2025-09-09 13:23:54] [Rank 0] Group 4 Loss: 4.5080 +[2025-09-09 13:23:54] [Rank 0] Group 4 Loss: 4.5080 +[2025-09-09 13:23:54] [Rank 0] Group 5 Loss: 4.5400 +[2025-09-09 13:23:54] [Rank 0] Group 5 Loss: 4.5400 +[2025-09-09 13:23:54] [Rank 0] Group 6 Loss: 4.4923 +[2025-09-09 13:23:54] [Rank 0] Group 6 Loss: 4.4923 +[2025-09-09 13:23:54] [Rank 0] Group 7 Loss: 4.5067 +[2025-09-09 13:23:54] [Rank 0] Group 7 Loss: 4.5067 +[2025-09-09 13:23:54] [Rank 0] Group 8 Loss: 4.6881 +[2025-09-09 13:23:54] [Rank 0] Group 8 Loss: 4.6881 +[2025-09-09 13:23:54] [Rank 0] Group 9 Loss: 4.6534 +[2025-09-09 13:23:54] [Rank 0] Group 9 Loss: 4.6534 +[2025-09-09 13:23:54] [Rank 0] Group 10 Loss: 4.8001 +[2025-09-09 13:23:54] [Rank 0] Group 10 Loss: 4.8001 +[2025-09-09 13:23:54] [Rank 0] Group 11 Loss: 4.7568 +[2025-09-09 13:23:54] [Rank 0] Group 11 Loss: 4.7568 +[2025-09-09 13:23:54] [Rank 0] Group 12 Loss: 4.7913 +[2025-09-09 13:23:54] [Rank 0] Group 12 Loss: 4.7913 +[2025-09-09 13:23:54] [Rank 0] Group 13 Loss: 4.8994 +[2025-09-09 13:23:54] [Rank 0] Group 13 Loss: 4.8994 +[2025-09-09 13:23:54] [Rank 0] Group 14 Loss: 4.9156 +[2025-09-09 13:23:54] [Rank 0] Group 14 Loss: 4.9156 +[2025-09-09 13:23:54] [Rank 0] Group 15 Loss: 4.9338 +[2025-09-09 13:23:54] [Rank 0] Group 15 Loss: 4.9338 +[2025-09-09 13:23:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:23:54] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 13:23:54] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 13:23:54] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-09 13:23:54] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-09 13:23:54] [Rank 0] Group 12 FTA: 0.9300 +[2025-09-09 13:23:54] [Rank 0] Group 12 FTA: 0.9300 +[2025-09-09 13:23:54] [Rank 0] Group 13 FTA: 0.7500 +[2025-09-09 13:23:54] [Rank 0] Group 13 FTA: 0.7500 +[2025-09-09 13:23:54] [Rank 0] Group 14 FTA: 0.4000 +[2025-09-09 13:23:54] [Rank 0] Group 14 FTA: 0.4000 +[2025-09-09 13:23:54] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-09 13:23:54] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-09 13:23:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:23:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:23:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:23:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:23:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:23:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:23:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:23:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:23:56] [Rank 0] step:3001/10000 train_time:167746ms step_avg:55.90ms +[2025-09-09 13:23:56] [Rank 0] step:3001/10000 train_time:167746ms step_avg:55.90ms +[2025-09-09 13:23:57] [Rank 0] step:3021/10000 train_time:168593ms step_avg:55.81ms +[2025-09-09 13:23:57] [Rank 0] step:3021/10000 train_time:168593ms step_avg:55.81ms +[2025-09-09 13:23:57] [Rank 0] step:3041/10000 train_time:169421ms step_avg:55.71ms +[2025-09-09 13:23:57] [Rank 0] step:3041/10000 train_time:169421ms step_avg:55.71ms +[2025-09-09 13:23:58] [Rank 0] step:3061/10000 train_time:170247ms step_avg:55.62ms +[2025-09-09 13:23:58] [Rank 0] step:3061/10000 train_time:170247ms step_avg:55.62ms +[2025-09-09 13:23:59] [Rank 0] step:3081/10000 train_time:171074ms step_avg:55.53ms +[2025-09-09 13:23:59] [Rank 0] step:3081/10000 train_time:171074ms step_avg:55.53ms +[2025-09-09 13:24:00] [Rank 0] step:3101/10000 train_time:171902ms step_avg:55.43ms +[2025-09-09 13:24:00] [Rank 0] step:3101/10000 train_time:171902ms step_avg:55.43ms +[2025-09-09 13:24:01] [Rank 0] step:3121/10000 train_time:172729ms step_avg:55.34ms +[2025-09-09 13:24:01] [Rank 0] step:3121/10000 train_time:172729ms step_avg:55.34ms +[2025-09-09 13:24:02] [Rank 0] step:3141/10000 train_time:173558ms step_avg:55.26ms +[2025-09-09 13:24:02] [Rank 0] step:3141/10000 train_time:173558ms step_avg:55.26ms +[2025-09-09 13:24:02] [Rank 0] step:3161/10000 train_time:174388ms step_avg:55.17ms +[2025-09-09 13:24:02] [Rank 0] step:3161/10000 train_time:174388ms step_avg:55.17ms +[2025-09-09 13:24:03] [Rank 0] step:3181/10000 train_time:175214ms step_avg:55.08ms +[2025-09-09 13:24:03] [Rank 0] step:3181/10000 train_time:175214ms step_avg:55.08ms +[2025-09-09 13:24:04] [Rank 0] step:3201/10000 train_time:176042ms step_avg:55.00ms +[2025-09-09 13:24:04] [Rank 0] step:3201/10000 train_time:176042ms step_avg:55.00ms +[2025-09-09 13:24:05] [Rank 0] step:3221/10000 train_time:176870ms step_avg:54.91ms +[2025-09-09 13:24:05] [Rank 0] step:3221/10000 train_time:176870ms step_avg:54.91ms +[2025-09-09 13:24:06] [Rank 0] step:3241/10000 train_time:177697ms step_avg:54.83ms +[2025-09-09 13:24:06] [Rank 0] step:3241/10000 train_time:177697ms step_avg:54.83ms +[2025-09-09 13:24:07] [Rank 0] step:3261/10000 train_time:178524ms step_avg:54.75ms +[2025-09-09 13:24:07] [Rank 0] step:3261/10000 train_time:178524ms step_avg:54.75ms +[2025-09-09 13:24:07] [Rank 0] step:3281/10000 train_time:179352ms step_avg:54.66ms +[2025-09-09 13:24:07] [Rank 0] step:3281/10000 train_time:179352ms step_avg:54.66ms +[2025-09-09 13:24:08] [Rank 0] step:3301/10000 train_time:180180ms step_avg:54.58ms +[2025-09-09 13:24:08] [Rank 0] step:3301/10000 train_time:180180ms step_avg:54.58ms +[2025-09-09 13:24:09] [Rank 0] step:3321/10000 train_time:181010ms step_avg:54.50ms +[2025-09-09 13:24:09] [Rank 0] step:3321/10000 train_time:181010ms step_avg:54.50ms +[2025-09-09 13:24:10] [Rank 0] step:3341/10000 train_time:181835ms step_avg:54.43ms +[2025-09-09 13:24:10] [Rank 0] step:3341/10000 train_time:181835ms step_avg:54.43ms +[2025-09-09 13:24:11] [Rank 0] step:3361/10000 train_time:182661ms step_avg:54.35ms +[2025-09-09 13:24:11] [Rank 0] step:3361/10000 train_time:182661ms step_avg:54.35ms +[2025-09-09 13:24:12] [Rank 0] step:3381/10000 train_time:183489ms step_avg:54.27ms +[2025-09-09 13:24:12] [Rank 0] step:3381/10000 train_time:183489ms step_avg:54.27ms +[2025-09-09 13:24:12] [Rank 0] step:3401/10000 train_time:184316ms step_avg:54.19ms +[2025-09-09 13:24:12] [Rank 0] step:3401/10000 train_time:184316ms step_avg:54.19ms +[2025-09-09 13:24:13] [Rank 0] step:3421/10000 train_time:185143ms step_avg:54.12ms +[2025-09-09 13:24:13] [Rank 0] step:3421/10000 train_time:185143ms step_avg:54.12ms +[2025-09-09 13:24:14] [Rank 0] step:3441/10000 train_time:185970ms step_avg:54.05ms +[2025-09-09 13:24:14] [Rank 0] step:3441/10000 train_time:185970ms step_avg:54.05ms +[2025-09-09 13:24:15] [Rank 0] step:3461/10000 train_time:186797ms step_avg:53.97ms +[2025-09-09 13:24:15] [Rank 0] step:3461/10000 train_time:186797ms step_avg:53.97ms +[2025-09-09 13:24:16] [Rank 0] step:3481/10000 train_time:187623ms step_avg:53.90ms +[2025-09-09 13:24:16] [Rank 0] step:3481/10000 train_time:187623ms step_avg:53.90ms +[2025-09-09 13:24:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:24:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:24:17] [Rank 0] PRINT: step:3500/10000 train_loss:0.6736 val_loss:0.6545 train_time:188460ms step_avg:53.85ms +[2025-09-09 13:24:17] [Rank 0] PRINT: step:3500/10000 train_loss:0.6736 val_loss:0.6545 train_time:188460ms step_avg:53.85ms +[2025-09-09 13:24:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:24:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:24:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:24:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:25:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:25:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:25:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:25:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:25:39] [Rank 0] Total Loss: 4.6430 +[2025-09-09 13:25:39] [Rank 0] Total Loss: 4.6430 +[2025-09-09 13:25:39] [Rank 0] Total FTA (Unweighted): 0.9094 +[2025-09-09 13:25:39] [Rank 0] Total FTA (Unweighted): 0.9094 +[2025-09-09 13:25:39] [Rank 0] Total FTA (Weighted): 0.9094 +[2025-09-09 13:25:39] [Rank 0] Total FTA (Weighted): 0.9094 +[2025-09-09 13:25:39] [Rank 0] Group 0 Loss: 4.5441 +[2025-09-09 13:25:39] [Rank 0] Group 0 Loss: 4.5441 +[2025-09-09 13:25:39] [Rank 0] Group 1 Loss: 4.3393 +[2025-09-09 13:25:39] [Rank 0] Group 1 Loss: 4.3393 +[2025-09-09 13:25:39] [Rank 0] Group 2 Loss: 4.1622 +[2025-09-09 13:25:39] [Rank 0] Group 2 Loss: 4.1622 +[2025-09-09 13:25:39] [Rank 0] Group 3 Loss: 4.5378 +[2025-09-09 13:25:39] [Rank 0] Group 3 Loss: 4.5378 +[2025-09-09 13:25:39] [Rank 0] Group 4 Loss: 4.5170 +[2025-09-09 13:25:39] [Rank 0] Group 4 Loss: 4.5170 +[2025-09-09 13:25:39] [Rank 0] Group 5 Loss: 4.5973 +[2025-09-09 13:25:39] [Rank 0] Group 5 Loss: 4.5973 +[2025-09-09 13:25:39] [Rank 0] Group 6 Loss: 4.5015 +[2025-09-09 13:25:39] [Rank 0] Group 6 Loss: 4.5015 +[2025-09-09 13:25:39] [Rank 0] Group 7 Loss: 4.5288 +[2025-09-09 13:25:39] [Rank 0] Group 7 Loss: 4.5288 +[2025-09-09 13:25:39] [Rank 0] Group 8 Loss: 4.7124 +[2025-09-09 13:25:39] [Rank 0] Group 8 Loss: 4.7124 +[2025-09-09 13:25:39] [Rank 0] Group 9 Loss: 4.6733 +[2025-09-09 13:25:39] [Rank 0] Group 9 Loss: 4.6733 +[2025-09-09 13:25:39] [Rank 0] Group 10 Loss: 4.8149 +[2025-09-09 13:25:39] [Rank 0] Group 10 Loss: 4.8149 +[2025-09-09 13:25:39] [Rank 0] Group 11 Loss: 4.8193 +[2025-09-09 13:25:39] [Rank 0] Group 11 Loss: 4.8193 +[2025-09-09 13:25:39] [Rank 0] Group 12 Loss: 4.7947 +[2025-09-09 13:25:39] [Rank 0] Group 12 Loss: 4.7947 +[2025-09-09 13:25:39] [Rank 0] Group 13 Loss: 4.9106 +[2025-09-09 13:25:39] [Rank 0] Group 13 Loss: 4.9106 +[2025-09-09 13:25:39] [Rank 0] Group 14 Loss: 4.9236 +[2025-09-09 13:25:39] [Rank 0] Group 14 Loss: 4.9236 +[2025-09-09 13:25:39] [Rank 0] Group 15 Loss: 4.9113 +[2025-09-09 13:25:39] [Rank 0] Group 15 Loss: 4.9113 +[2025-09-09 13:25:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:25:39] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 13:25:39] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 13:25:39] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 13:25:39] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 13:25:39] [Rank 0] Group 13 FTA: 0.8800 +[2025-09-09 13:25:39] [Rank 0] Group 13 FTA: 0.8800 +[2025-09-09 13:25:39] [Rank 0] Group 14 FTA: 0.4700 +[2025-09-09 13:25:39] [Rank 0] Group 14 FTA: 0.4700 +[2025-09-09 13:25:39] [Rank 0] Group 15 FTA: 0.2300 +[2025-09-09 13:25:39] [Rank 0] Group 15 FTA: 0.2300 +[2025-09-09 13:25:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:25:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:25:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:25:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:25:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:25:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:25:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:25:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:25:40] [Rank 0] step:3501/10000 train_time:188477ms step_avg:53.84ms +[2025-09-09 13:25:40] [Rank 0] step:3501/10000 train_time:188477ms step_avg:53.84ms +[2025-09-09 13:25:41] [Rank 0] step:3521/10000 train_time:189332ms step_avg:53.77ms +[2025-09-09 13:25:41] [Rank 0] step:3521/10000 train_time:189332ms step_avg:53.77ms +[2025-09-09 13:25:42] [Rank 0] step:3541/10000 train_time:190159ms step_avg:53.70ms +[2025-09-09 13:25:42] [Rank 0] step:3541/10000 train_time:190159ms step_avg:53.70ms +[2025-09-09 13:25:43] [Rank 0] step:3561/10000 train_time:190986ms step_avg:53.63ms +[2025-09-09 13:25:43] [Rank 0] step:3561/10000 train_time:190986ms step_avg:53.63ms +[2025-09-09 13:25:44] [Rank 0] step:3581/10000 train_time:191813ms step_avg:53.56ms +[2025-09-09 13:25:44] [Rank 0] step:3581/10000 train_time:191813ms step_avg:53.56ms +[2025-09-09 13:25:45] [Rank 0] step:3601/10000 train_time:192641ms step_avg:53.50ms +[2025-09-09 13:25:45] [Rank 0] step:3601/10000 train_time:192641ms step_avg:53.50ms +[2025-09-09 13:25:46] [Rank 0] step:3621/10000 train_time:193963ms step_avg:53.57ms +[2025-09-09 13:25:46] [Rank 0] step:3621/10000 train_time:193963ms step_avg:53.57ms +[2025-09-09 13:25:47] [Rank 0] step:3641/10000 train_time:195056ms step_avg:53.57ms +[2025-09-09 13:25:47] [Rank 0] step:3641/10000 train_time:195056ms step_avg:53.57ms +[2025-09-09 13:25:48] [Rank 0] step:3661/10000 train_time:195884ms step_avg:53.51ms +[2025-09-09 13:25:48] [Rank 0] step:3661/10000 train_time:195884ms step_avg:53.51ms +[2025-09-09 13:25:49] [Rank 0] step:3681/10000 train_time:196711ms step_avg:53.44ms +[2025-09-09 13:25:49] [Rank 0] step:3681/10000 train_time:196711ms step_avg:53.44ms +[2025-09-09 13:25:49] [Rank 0] step:3701/10000 train_time:197539ms step_avg:53.37ms +[2025-09-09 13:25:49] [Rank 0] step:3701/10000 train_time:197539ms step_avg:53.37ms +[2025-09-09 13:25:50] [Rank 0] step:3721/10000 train_time:198367ms step_avg:53.31ms +[2025-09-09 13:25:50] [Rank 0] step:3721/10000 train_time:198367ms step_avg:53.31ms +[2025-09-09 13:25:51] [Rank 0] step:3741/10000 train_time:199194ms step_avg:53.25ms +[2025-09-09 13:25:51] [Rank 0] step:3741/10000 train_time:199194ms step_avg:53.25ms +[2025-09-09 13:25:52] [Rank 0] step:3761/10000 train_time:200022ms step_avg:53.18ms +[2025-09-09 13:25:52] [Rank 0] step:3761/10000 train_time:200022ms step_avg:53.18ms +[2025-09-09 13:25:53] [Rank 0] step:3781/10000 train_time:200850ms step_avg:53.12ms +[2025-09-09 13:25:53] [Rank 0] step:3781/10000 train_time:200850ms step_avg:53.12ms +[2025-09-09 13:25:54] [Rank 0] step:3801/10000 train_time:201677ms step_avg:53.06ms +[2025-09-09 13:25:54] [Rank 0] step:3801/10000 train_time:201677ms step_avg:53.06ms +[2025-09-09 13:25:54] [Rank 0] step:3821/10000 train_time:202504ms step_avg:53.00ms +[2025-09-09 13:25:54] [Rank 0] step:3821/10000 train_time:202504ms step_avg:53.00ms +[2025-09-09 13:25:55] [Rank 0] step:3841/10000 train_time:203333ms step_avg:52.94ms +[2025-09-09 13:25:55] [Rank 0] step:3841/10000 train_time:203333ms step_avg:52.94ms +[2025-09-09 13:25:56] [Rank 0] step:3861/10000 train_time:204161ms step_avg:52.88ms +[2025-09-09 13:25:56] [Rank 0] step:3861/10000 train_time:204161ms step_avg:52.88ms +[2025-09-09 13:25:57] [Rank 0] step:3881/10000 train_time:204988ms step_avg:52.82ms +[2025-09-09 13:25:57] [Rank 0] step:3881/10000 train_time:204988ms step_avg:52.82ms +[2025-09-09 13:25:58] [Rank 0] step:3901/10000 train_time:205813ms step_avg:52.76ms +[2025-09-09 13:25:58] [Rank 0] step:3901/10000 train_time:205813ms step_avg:52.76ms +[2025-09-09 13:25:59] [Rank 0] step:3921/10000 train_time:206640ms step_avg:52.70ms +[2025-09-09 13:25:59] [Rank 0] step:3921/10000 train_time:206640ms step_avg:52.70ms +[2025-09-09 13:25:59] [Rank 0] step:3941/10000 train_time:207466ms step_avg:52.64ms +[2025-09-09 13:25:59] [Rank 0] step:3941/10000 train_time:207466ms step_avg:52.64ms +[2025-09-09 13:26:00] [Rank 0] step:3961/10000 train_time:208293ms step_avg:52.59ms +[2025-09-09 13:26:00] [Rank 0] step:3961/10000 train_time:208293ms step_avg:52.59ms +[2025-09-09 13:26:01] [Rank 0] step:3981/10000 train_time:209121ms step_avg:52.53ms +[2025-09-09 13:26:01] [Rank 0] step:3981/10000 train_time:209121ms step_avg:52.53ms +[2025-09-09 13:26:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:26:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:26:02] [Rank 0] PRINT: step:4000/10000 train_loss:0.6628 val_loss:0.6446 train_time:209950ms step_avg:52.49ms +[2025-09-09 13:26:02] [Rank 0] PRINT: step:4000/10000 train_loss:0.6628 val_loss:0.6446 train_time:209950ms step_avg:52.49ms +[2025-09-09 13:26:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:26:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:26:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:26:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:27:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:27:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:27:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:27:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:27:24] [Rank 0] Total Loss: 4.6886 +[2025-09-09 13:27:24] [Rank 0] Total Loss: 4.6886 +[2025-09-09 13:27:24] [Rank 0] Total FTA (Unweighted): 0.9325 +[2025-09-09 13:27:24] [Rank 0] Total FTA (Unweighted): 0.9325 +[2025-09-09 13:27:24] [Rank 0] Total FTA (Weighted): 0.9325 +[2025-09-09 13:27:24] [Rank 0] Total FTA (Weighted): 0.9325 +[2025-09-09 13:27:24] [Rank 0] Group 0 Loss: 4.5595 +[2025-09-09 13:27:24] [Rank 0] Group 0 Loss: 4.5595 +[2025-09-09 13:27:24] [Rank 0] Group 1 Loss: 4.3555 +[2025-09-09 13:27:24] [Rank 0] Group 1 Loss: 4.3555 +[2025-09-09 13:27:24] [Rank 0] Group 2 Loss: 4.2684 +[2025-09-09 13:27:24] [Rank 0] Group 2 Loss: 4.2684 +[2025-09-09 13:27:24] [Rank 0] Group 3 Loss: 4.6082 +[2025-09-09 13:27:24] [Rank 0] Group 3 Loss: 4.6082 +[2025-09-09 13:27:24] [Rank 0] Group 4 Loss: 4.5669 +[2025-09-09 13:27:24] [Rank 0] Group 4 Loss: 4.5669 +[2025-09-09 13:27:24] [Rank 0] Group 5 Loss: 4.6373 +[2025-09-09 13:27:24] [Rank 0] Group 5 Loss: 4.6373 +[2025-09-09 13:27:24] [Rank 0] Group 6 Loss: 4.5567 +[2025-09-09 13:27:24] [Rank 0] Group 6 Loss: 4.5567 +[2025-09-09 13:27:25] [Rank 0] Group 7 Loss: 4.5879 +[2025-09-09 13:27:25] [Rank 0] Group 7 Loss: 4.5879 +[2025-09-09 13:27:25] [Rank 0] Group 8 Loss: 4.7589 +[2025-09-09 13:27:25] [Rank 0] Group 8 Loss: 4.7589 +[2025-09-09 13:27:25] [Rank 0] Group 9 Loss: 4.7200 +[2025-09-09 13:27:25] [Rank 0] Group 9 Loss: 4.7200 +[2025-09-09 13:27:25] [Rank 0] Group 10 Loss: 4.8572 +[2025-09-09 13:27:25] [Rank 0] Group 10 Loss: 4.8572 +[2025-09-09 13:27:25] [Rank 0] Group 11 Loss: 4.8513 +[2025-09-09 13:27:25] [Rank 0] Group 11 Loss: 4.8513 +[2025-09-09 13:27:25] [Rank 0] Group 12 Loss: 4.8275 +[2025-09-09 13:27:25] [Rank 0] Group 12 Loss: 4.8275 +[2025-09-09 13:27:25] [Rank 0] Group 13 Loss: 4.9628 +[2025-09-09 13:27:25] [Rank 0] Group 13 Loss: 4.9628 +[2025-09-09 13:27:25] [Rank 0] Group 14 Loss: 4.9504 +[2025-09-09 13:27:25] [Rank 0] Group 14 Loss: 4.9504 +[2025-09-09 13:27:25] [Rank 0] Group 15 Loss: 4.9491 +[2025-09-09 13:27:25] [Rank 0] Group 15 Loss: 4.9491 +[2025-09-09 13:27:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:27:25] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 13:27:25] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 13:27:25] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 13:27:25] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 13:27:25] [Rank 0] Group 13 FTA: 0.9300 +[2025-09-09 13:27:25] [Rank 0] Group 13 FTA: 0.9300 +[2025-09-09 13:27:25] [Rank 0] Group 14 FTA: 0.6100 +[2025-09-09 13:27:25] [Rank 0] Group 14 FTA: 0.6100 +[2025-09-09 13:27:25] [Rank 0] Group 15 FTA: 0.4000 +[2025-09-09 13:27:25] [Rank 0] Group 15 FTA: 0.4000 +[2025-09-09 13:27:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:27:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:27:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:27:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:27:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:27:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:27:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:27:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:27:27] [Rank 0] step:4001/10000 train_time:209967ms step_avg:52.48ms +[2025-09-09 13:27:27] [Rank 0] step:4001/10000 train_time:209967ms step_avg:52.48ms +[2025-09-09 13:27:28] [Rank 0] step:4021/10000 train_time:211491ms step_avg:52.60ms +[2025-09-09 13:27:28] [Rank 0] step:4021/10000 train_time:211491ms step_avg:52.60ms +[2025-09-09 13:27:29] [Rank 0] step:4041/10000 train_time:212319ms step_avg:52.54ms +[2025-09-09 13:27:29] [Rank 0] step:4041/10000 train_time:212319ms step_avg:52.54ms +[2025-09-09 13:27:30] [Rank 0] step:4061/10000 train_time:213146ms step_avg:52.49ms +[2025-09-09 13:27:30] [Rank 0] step:4061/10000 train_time:213146ms step_avg:52.49ms +[2025-09-09 13:27:31] [Rank 0] step:4081/10000 train_time:213973ms step_avg:52.43ms +[2025-09-09 13:27:31] [Rank 0] step:4081/10000 train_time:213973ms step_avg:52.43ms +[2025-09-09 13:27:32] [Rank 0] step:4101/10000 train_time:214800ms step_avg:52.38ms +[2025-09-09 13:27:32] [Rank 0] step:4101/10000 train_time:214800ms step_avg:52.38ms +[2025-09-09 13:27:33] [Rank 0] step:4121/10000 train_time:215626ms step_avg:52.32ms +[2025-09-09 13:27:33] [Rank 0] step:4121/10000 train_time:215626ms step_avg:52.32ms +[2025-09-09 13:27:33] [Rank 0] step:4141/10000 train_time:216452ms step_avg:52.27ms +[2025-09-09 13:27:33] [Rank 0] step:4141/10000 train_time:216452ms step_avg:52.27ms +[2025-09-09 13:27:34] [Rank 0] step:4161/10000 train_time:217279ms step_avg:52.22ms +[2025-09-09 13:27:34] [Rank 0] step:4161/10000 train_time:217279ms step_avg:52.22ms +[2025-09-09 13:27:35] [Rank 0] step:4181/10000 train_time:218106ms step_avg:52.17ms +[2025-09-09 13:27:35] [Rank 0] step:4181/10000 train_time:218106ms step_avg:52.17ms +[2025-09-09 13:27:36] [Rank 0] step:4201/10000 train_time:218933ms step_avg:52.11ms +[2025-09-09 13:27:36] [Rank 0] step:4201/10000 train_time:218933ms step_avg:52.11ms +[2025-09-09 13:27:37] [Rank 0] step:4221/10000 train_time:219760ms step_avg:52.06ms +[2025-09-09 13:27:37] [Rank 0] step:4221/10000 train_time:219760ms step_avg:52.06ms +[2025-09-09 13:27:38] [Rank 0] step:4241/10000 train_time:220586ms step_avg:52.01ms +[2025-09-09 13:27:38] [Rank 0] step:4241/10000 train_time:220586ms step_avg:52.01ms +[2025-09-09 13:27:38] [Rank 0] step:4261/10000 train_time:221413ms step_avg:51.96ms +[2025-09-09 13:27:38] [Rank 0] step:4261/10000 train_time:221413ms step_avg:51.96ms +[2025-09-09 13:27:39] [Rank 0] step:4281/10000 train_time:222240ms step_avg:51.91ms +[2025-09-09 13:27:39] [Rank 0] step:4281/10000 train_time:222240ms step_avg:51.91ms +[2025-09-09 13:27:40] [Rank 0] step:4301/10000 train_time:223067ms step_avg:51.86ms +[2025-09-09 13:27:40] [Rank 0] step:4301/10000 train_time:223067ms step_avg:51.86ms +[2025-09-09 13:27:41] [Rank 0] step:4321/10000 train_time:223893ms step_avg:51.82ms +[2025-09-09 13:27:41] [Rank 0] step:4321/10000 train_time:223893ms step_avg:51.82ms +[2025-09-09 13:27:42] [Rank 0] step:4341/10000 train_time:224721ms step_avg:51.77ms +[2025-09-09 13:27:42] [Rank 0] step:4341/10000 train_time:224721ms step_avg:51.77ms +[2025-09-09 13:27:43] [Rank 0] step:4361/10000 train_time:225547ms step_avg:51.72ms +[2025-09-09 13:27:43] [Rank 0] step:4361/10000 train_time:225547ms step_avg:51.72ms +[2025-09-09 13:27:43] [Rank 0] step:4381/10000 train_time:226374ms step_avg:51.67ms +[2025-09-09 13:27:43] [Rank 0] step:4381/10000 train_time:226374ms step_avg:51.67ms +[2025-09-09 13:27:44] [Rank 0] step:4401/10000 train_time:227200ms step_avg:51.62ms +[2025-09-09 13:27:44] [Rank 0] step:4401/10000 train_time:227200ms step_avg:51.62ms +[2025-09-09 13:27:45] [Rank 0] step:4421/10000 train_time:228027ms step_avg:51.58ms +[2025-09-09 13:27:45] [Rank 0] step:4421/10000 train_time:228027ms step_avg:51.58ms +[2025-09-09 13:27:46] [Rank 0] step:4441/10000 train_time:228856ms step_avg:51.53ms +[2025-09-09 13:27:46] [Rank 0] step:4441/10000 train_time:228856ms step_avg:51.53ms +[2025-09-09 13:27:47] [Rank 0] step:4461/10000 train_time:229681ms step_avg:51.49ms +[2025-09-09 13:27:47] [Rank 0] step:4461/10000 train_time:229681ms step_avg:51.49ms +[2025-09-09 13:27:48] [Rank 0] step:4481/10000 train_time:230508ms step_avg:51.44ms +[2025-09-09 13:27:48] [Rank 0] step:4481/10000 train_time:230508ms step_avg:51.44ms +[2025-09-09 13:27:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:27:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:27:49] [Rank 0] PRINT: step:4500/10000 train_loss:0.6538 val_loss:0.6365 train_time:231337ms step_avg:51.41ms +[2025-09-09 13:27:49] [Rank 0] PRINT: step:4500/10000 train_loss:0.6538 val_loss:0.6365 train_time:231337ms step_avg:51.41ms +[2025-09-09 13:27:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:27:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:27:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:27:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:29:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:29:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:29:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:29:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:29:10] [Rank 0] Total Loss: 4.7287 +[2025-09-09 13:29:10] [Rank 0] Total Loss: 4.7287 +[2025-09-09 13:29:10] [Rank 0] Total FTA (Unweighted): 0.9425 +[2025-09-09 13:29:10] [Rank 0] Total FTA (Unweighted): 0.9425 +[2025-09-09 13:29:10] [Rank 0] Total FTA (Weighted): 0.9425 +[2025-09-09 13:29:10] [Rank 0] Total FTA (Weighted): 0.9425 +[2025-09-09 13:29:10] [Rank 0] Group 0 Loss: 4.6434 +[2025-09-09 13:29:10] [Rank 0] Group 0 Loss: 4.6434 +[2025-09-09 13:29:10] [Rank 0] Group 1 Loss: 4.3582 +[2025-09-09 13:29:10] [Rank 0] Group 1 Loss: 4.3582 +[2025-09-09 13:29:10] [Rank 0] Group 2 Loss: 4.3592 +[2025-09-09 13:29:10] [Rank 0] Group 2 Loss: 4.3592 +[2025-09-09 13:29:10] [Rank 0] Group 3 Loss: 4.6431 +[2025-09-09 13:29:10] [Rank 0] Group 3 Loss: 4.6431 +[2025-09-09 13:29:10] [Rank 0] Group 4 Loss: 4.6115 +[2025-09-09 13:29:10] [Rank 0] Group 4 Loss: 4.6115 +[2025-09-09 13:29:10] [Rank 0] Group 5 Loss: 4.6645 +[2025-09-09 13:29:10] [Rank 0] Group 5 Loss: 4.6645 +[2025-09-09 13:29:10] [Rank 0] Group 6 Loss: 4.6203 +[2025-09-09 13:29:10] [Rank 0] Group 6 Loss: 4.6203 +[2025-09-09 13:29:10] [Rank 0] Group 7 Loss: 4.6202 +[2025-09-09 13:29:10] [Rank 0] Group 7 Loss: 4.6202 +[2025-09-09 13:29:10] [Rank 0] Group 8 Loss: 4.8071 +[2025-09-09 13:29:10] [Rank 0] Group 8 Loss: 4.8071 +[2025-09-09 13:29:10] [Rank 0] Group 9 Loss: 4.7726 +[2025-09-09 13:29:10] [Rank 0] Group 9 Loss: 4.7726 +[2025-09-09 13:29:10] [Rank 0] Group 10 Loss: 4.9076 +[2025-09-09 13:29:10] [Rank 0] Group 10 Loss: 4.9076 +[2025-09-09 13:29:10] [Rank 0] Group 11 Loss: 4.8995 +[2025-09-09 13:29:10] [Rank 0] Group 11 Loss: 4.8995 +[2025-09-09 13:29:10] [Rank 0] Group 12 Loss: 4.8566 +[2025-09-09 13:29:10] [Rank 0] Group 12 Loss: 4.8566 +[2025-09-09 13:29:10] [Rank 0] Group 13 Loss: 4.9708 +[2025-09-09 13:29:10] [Rank 0] Group 13 Loss: 4.9708 +[2025-09-09 13:29:10] [Rank 0] Group 14 Loss: 4.9682 +[2025-09-09 13:29:10] [Rank 0] Group 14 Loss: 4.9682 +[2025-09-09 13:29:10] [Rank 0] Group 15 Loss: 4.9559 +[2025-09-09 13:29:10] [Rank 0] Group 15 Loss: 4.9559 +[2025-09-09 13:29:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 13:29:10] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 13:29:10] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:29:10] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 13:29:10] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 13:29:10] [Rank 0] Group 13 FTA: 0.9500 +[2025-09-09 13:29:10] [Rank 0] Group 13 FTA: 0.9500 +[2025-09-09 13:29:10] [Rank 0] Group 14 FTA: 0.6800 +[2025-09-09 13:29:10] [Rank 0] Group 14 FTA: 0.6800 +[2025-09-09 13:29:10] [Rank 0] Group 15 FTA: 0.4700 +[2025-09-09 13:29:10] [Rank 0] Group 15 FTA: 0.4700 +[2025-09-09 13:29:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:29:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:29:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:29:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:29:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:29:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:29:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:29:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:29:12] [Rank 0] step:4501/10000 train_time:231355ms step_avg:51.40ms +[2025-09-09 13:29:12] [Rank 0] step:4501/10000 train_time:231355ms step_avg:51.40ms +[2025-09-09 13:29:13] [Rank 0] step:4521/10000 train_time:232185ms step_avg:51.36ms +[2025-09-09 13:29:13] [Rank 0] step:4521/10000 train_time:232185ms step_avg:51.36ms +[2025-09-09 13:29:13] [Rank 0] step:4541/10000 train_time:233010ms step_avg:51.31ms +[2025-09-09 13:29:13] [Rank 0] step:4541/10000 train_time:233010ms step_avg:51.31ms +[2025-09-09 13:29:14] [Rank 0] step:4561/10000 train_time:233836ms step_avg:51.27ms +[2025-09-09 13:29:14] [Rank 0] step:4561/10000 train_time:233836ms step_avg:51.27ms +[2025-09-09 13:29:15] [Rank 0] step:4581/10000 train_time:234663ms step_avg:51.23ms +[2025-09-09 13:29:15] [Rank 0] step:4581/10000 train_time:234663ms step_avg:51.23ms +[2025-09-09 13:29:16] [Rank 0] step:4601/10000 train_time:235490ms step_avg:51.18ms +[2025-09-09 13:29:16] [Rank 0] step:4601/10000 train_time:235490ms step_avg:51.18ms +[2025-09-09 13:29:17] [Rank 0] step:4621/10000 train_time:236317ms step_avg:51.14ms +[2025-09-09 13:29:17] [Rank 0] step:4621/10000 train_time:236317ms step_avg:51.14ms +[2025-09-09 13:29:18] [Rank 0] step:4641/10000 train_time:237146ms step_avg:51.10ms +[2025-09-09 13:29:18] [Rank 0] step:4641/10000 train_time:237146ms step_avg:51.10ms +[2025-09-09 13:29:18] [Rank 0] step:4661/10000 train_time:237971ms step_avg:51.06ms +[2025-09-09 13:29:18] [Rank 0] step:4661/10000 train_time:237971ms step_avg:51.06ms +[2025-09-09 13:29:19] [Rank 0] step:4681/10000 train_time:238798ms step_avg:51.01ms +[2025-09-09 13:29:19] [Rank 0] step:4681/10000 train_time:238798ms step_avg:51.01ms +[2025-09-09 13:29:20] [Rank 0] step:4701/10000 train_time:239624ms step_avg:50.97ms +[2025-09-09 13:29:20] [Rank 0] step:4701/10000 train_time:239624ms step_avg:50.97ms +[2025-09-09 13:29:21] [Rank 0] step:4721/10000 train_time:240451ms step_avg:50.93ms +[2025-09-09 13:29:21] [Rank 0] step:4721/10000 train_time:240451ms step_avg:50.93ms +[2025-09-09 13:29:22] [Rank 0] step:4741/10000 train_time:241278ms step_avg:50.89ms +[2025-09-09 13:29:22] [Rank 0] step:4741/10000 train_time:241278ms step_avg:50.89ms +[2025-09-09 13:29:23] [Rank 0] step:4761/10000 train_time:242105ms step_avg:50.85ms +[2025-09-09 13:29:23] [Rank 0] step:4761/10000 train_time:242105ms step_avg:50.85ms +[2025-09-09 13:29:23] [Rank 0] step:4781/10000 train_time:242937ms step_avg:50.81ms +[2025-09-09 13:29:23] [Rank 0] step:4781/10000 train_time:242937ms step_avg:50.81ms +[2025-09-09 13:29:24] [Rank 0] step:4801/10000 train_time:243761ms step_avg:50.77ms +[2025-09-09 13:29:24] [Rank 0] step:4801/10000 train_time:243761ms step_avg:50.77ms +[2025-09-09 13:29:25] [Rank 0] step:4821/10000 train_time:244588ms step_avg:50.73ms +[2025-09-09 13:29:25] [Rank 0] step:4821/10000 train_time:244588ms step_avg:50.73ms +[2025-09-09 13:29:26] [Rank 0] step:4841/10000 train_time:245725ms step_avg:50.76ms +[2025-09-09 13:29:26] [Rank 0] step:4841/10000 train_time:245725ms step_avg:50.76ms +[2025-09-09 13:29:27] [Rank 0] step:4861/10000 train_time:246552ms step_avg:50.72ms +[2025-09-09 13:29:27] [Rank 0] step:4861/10000 train_time:246552ms step_avg:50.72ms +[2025-09-09 13:29:28] [Rank 0] step:4881/10000 train_time:247378ms step_avg:50.68ms +[2025-09-09 13:29:28] [Rank 0] step:4881/10000 train_time:247378ms step_avg:50.68ms +[2025-09-09 13:29:29] [Rank 0] step:4901/10000 train_time:248203ms step_avg:50.64ms +[2025-09-09 13:29:29] [Rank 0] step:4901/10000 train_time:248203ms step_avg:50.64ms +[2025-09-09 13:29:29] [Rank 0] step:4921/10000 train_time:249029ms step_avg:50.61ms +[2025-09-09 13:29:29] [Rank 0] step:4921/10000 train_time:249029ms step_avg:50.61ms +[2025-09-09 13:29:30] [Rank 0] step:4941/10000 train_time:249855ms step_avg:50.57ms +[2025-09-09 13:29:30] [Rank 0] step:4941/10000 train_time:249855ms step_avg:50.57ms +[2025-09-09 13:29:31] [Rank 0] step:4961/10000 train_time:250683ms step_avg:50.53ms +[2025-09-09 13:29:31] [Rank 0] step:4961/10000 train_time:250683ms step_avg:50.53ms +[2025-09-09 13:29:32] [Rank 0] step:4981/10000 train_time:251508ms step_avg:50.49ms +[2025-09-09 13:29:32] [Rank 0] step:4981/10000 train_time:251508ms step_avg:50.49ms +[2025-09-09 13:29:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:29:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:29:33] [Rank 0] PRINT: step:5000/10000 train_loss:0.6452 val_loss:0.6297 train_time:252336ms step_avg:50.47ms +[2025-09-09 13:29:33] [Rank 0] PRINT: step:5000/10000 train_loss:0.6452 val_loss:0.6297 train_time:252336ms step_avg:50.47ms +[2025-09-09 13:29:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:29:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:29:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:29:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:30:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:30:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:30:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:30:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:30:56] [Rank 0] Total Loss: 4.7405 +[2025-09-09 13:30:56] [Rank 0] Total Loss: 4.7405 +[2025-09-09 13:30:56] [Rank 0] Total FTA (Unweighted): 0.9506 +[2025-09-09 13:30:56] [Rank 0] Total FTA (Unweighted): 0.9506 +[2025-09-09 13:30:56] [Rank 0] Total FTA (Weighted): 0.9506 +[2025-09-09 13:30:56] [Rank 0] Total FTA (Weighted): 0.9506 +[2025-09-09 13:30:56] [Rank 0] Group 0 Loss: 4.6200 +[2025-09-09 13:30:56] [Rank 0] Group 0 Loss: 4.6200 +[2025-09-09 13:30:56] [Rank 0] Group 1 Loss: 4.3799 +[2025-09-09 13:30:56] [Rank 0] Group 1 Loss: 4.3799 +[2025-09-09 13:30:56] [Rank 0] Group 2 Loss: 4.2984 +[2025-09-09 13:30:56] [Rank 0] Group 2 Loss: 4.2984 +[2025-09-09 13:30:56] [Rank 0] Group 3 Loss: 4.6570 +[2025-09-09 13:30:56] [Rank 0] Group 3 Loss: 4.6570 +[2025-09-09 13:30:56] [Rank 0] Group 4 Loss: 4.6406 +[2025-09-09 13:30:56] [Rank 0] Group 4 Loss: 4.6406 +[2025-09-09 13:30:56] [Rank 0] Group 5 Loss: 4.7024 +[2025-09-09 13:30:56] [Rank 0] Group 5 Loss: 4.7024 +[2025-09-09 13:30:56] [Rank 0] Group 6 Loss: 4.6216 +[2025-09-09 13:30:56] [Rank 0] Group 6 Loss: 4.6216 +[2025-09-09 13:30:56] [Rank 0] Group 7 Loss: 4.6609 +[2025-09-09 13:30:56] [Rank 0] Group 7 Loss: 4.6609 +[2025-09-09 13:30:56] [Rank 0] Group 8 Loss: 4.8258 +[2025-09-09 13:30:56] [Rank 0] Group 8 Loss: 4.8258 +[2025-09-09 13:30:56] [Rank 0] Group 9 Loss: 4.7952 +[2025-09-09 13:30:56] [Rank 0] Group 9 Loss: 4.7952 +[2025-09-09 13:30:56] [Rank 0] Group 10 Loss: 4.9094 +[2025-09-09 13:30:56] [Rank 0] Group 10 Loss: 4.9094 +[2025-09-09 13:30:56] [Rank 0] Group 11 Loss: 4.9302 +[2025-09-09 13:30:56] [Rank 0] Group 11 Loss: 4.9302 +[2025-09-09 13:30:56] [Rank 0] Group 12 Loss: 4.8981 +[2025-09-09 13:30:56] [Rank 0] Group 12 Loss: 4.8981 +[2025-09-09 13:30:56] [Rank 0] Group 13 Loss: 4.9996 +[2025-09-09 13:30:56] [Rank 0] Group 13 Loss: 4.9996 +[2025-09-09 13:30:56] [Rank 0] Group 14 Loss: 4.9691 +[2025-09-09 13:30:56] [Rank 0] Group 14 Loss: 4.9691 +[2025-09-09 13:30:56] [Rank 0] Group 15 Loss: 4.9391 +[2025-09-09 13:30:56] [Rank 0] Group 15 Loss: 4.9391 +[2025-09-09 13:30:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:30:57] [Rank 0] Group 13 FTA: 0.9500 +[2025-09-09 13:30:57] [Rank 0] Group 13 FTA: 0.9500 +[2025-09-09 13:30:57] [Rank 0] Group 14 FTA: 0.7100 +[2025-09-09 13:30:57] [Rank 0] Group 14 FTA: 0.7100 +[2025-09-09 13:30:57] [Rank 0] Group 15 FTA: 0.5500 +[2025-09-09 13:30:57] [Rank 0] Group 15 FTA: 0.5500 +[2025-09-09 13:30:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:30:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:30:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:30:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:30:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:30:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:30:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:30:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:30:58] [Rank 0] step:5001/10000 train_time:252353ms step_avg:50.46ms +[2025-09-09 13:30:58] [Rank 0] step:5001/10000 train_time:252353ms step_avg:50.46ms +[2025-09-09 13:30:59] [Rank 0] step:5021/10000 train_time:253187ms step_avg:50.43ms +[2025-09-09 13:30:59] [Rank 0] step:5021/10000 train_time:253187ms step_avg:50.43ms +[2025-09-09 13:31:00] [Rank 0] step:5041/10000 train_time:254012ms step_avg:50.39ms +[2025-09-09 13:31:00] [Rank 0] step:5041/10000 train_time:254012ms step_avg:50.39ms +[2025-09-09 13:31:00] [Rank 0] step:5061/10000 train_time:254839ms step_avg:50.35ms +[2025-09-09 13:31:00] [Rank 0] step:5061/10000 train_time:254839ms step_avg:50.35ms +[2025-09-09 13:31:02] [Rank 0] step:5081/10000 train_time:256096ms step_avg:50.40ms +[2025-09-09 13:31:02] [Rank 0] step:5081/10000 train_time:256096ms step_avg:50.40ms +[2025-09-09 13:31:03] [Rank 0] step:5101/10000 train_time:256922ms step_avg:50.37ms +[2025-09-09 13:31:03] [Rank 0] step:5101/10000 train_time:256922ms step_avg:50.37ms +[2025-09-09 13:31:03] [Rank 0] step:5121/10000 train_time:257748ms step_avg:50.33ms +[2025-09-09 13:31:03] [Rank 0] step:5121/10000 train_time:257748ms step_avg:50.33ms +[2025-09-09 13:31:04] [Rank 0] step:5141/10000 train_time:258574ms step_avg:50.30ms +[2025-09-09 13:31:04] [Rank 0] step:5141/10000 train_time:258574ms step_avg:50.30ms +[2025-09-09 13:31:05] [Rank 0] step:5161/10000 train_time:259408ms step_avg:50.26ms +[2025-09-09 13:31:05] [Rank 0] step:5161/10000 train_time:259408ms step_avg:50.26ms +[2025-09-09 13:31:06] [Rank 0] step:5181/10000 train_time:260234ms step_avg:50.23ms +[2025-09-09 13:31:06] [Rank 0] step:5181/10000 train_time:260234ms step_avg:50.23ms +[2025-09-09 13:31:07] [Rank 0] step:5201/10000 train_time:261062ms step_avg:50.19ms +[2025-09-09 13:31:07] [Rank 0] step:5201/10000 train_time:261062ms step_avg:50.19ms +[2025-09-09 13:31:08] [Rank 0] step:5221/10000 train_time:261888ms step_avg:50.16ms +[2025-09-09 13:31:08] [Rank 0] step:5221/10000 train_time:261888ms step_avg:50.16ms +[2025-09-09 13:31:08] [Rank 0] step:5241/10000 train_time:262715ms step_avg:50.13ms +[2025-09-09 13:31:08] [Rank 0] step:5241/10000 train_time:262715ms step_avg:50.13ms +[2025-09-09 13:31:09] [Rank 0] step:5261/10000 train_time:263542ms step_avg:50.09ms +[2025-09-09 13:31:09] [Rank 0] step:5261/10000 train_time:263542ms step_avg:50.09ms +[2025-09-09 13:31:10] [Rank 0] step:5281/10000 train_time:264369ms step_avg:50.06ms +[2025-09-09 13:31:10] [Rank 0] step:5281/10000 train_time:264369ms step_avg:50.06ms +[2025-09-09 13:31:11] [Rank 0] step:5301/10000 train_time:265196ms step_avg:50.03ms +[2025-09-09 13:31:11] [Rank 0] step:5301/10000 train_time:265196ms step_avg:50.03ms +[2025-09-09 13:31:12] [Rank 0] step:5321/10000 train_time:266023ms step_avg:49.99ms +[2025-09-09 13:31:12] [Rank 0] step:5321/10000 train_time:266023ms step_avg:49.99ms +[2025-09-09 13:31:12] [Rank 0] step:5341/10000 train_time:266851ms step_avg:49.96ms +[2025-09-09 13:31:12] [Rank 0] step:5341/10000 train_time:266851ms step_avg:49.96ms +[2025-09-09 13:31:13] [Rank 0] step:5361/10000 train_time:267676ms step_avg:49.93ms +[2025-09-09 13:31:13] [Rank 0] step:5361/10000 train_time:267676ms step_avg:49.93ms +[2025-09-09 13:31:14] [Rank 0] step:5381/10000 train_time:268503ms step_avg:49.90ms +[2025-09-09 13:31:14] [Rank 0] step:5381/10000 train_time:268503ms step_avg:49.90ms +[2025-09-09 13:31:15] [Rank 0] step:5401/10000 train_time:269329ms step_avg:49.87ms +[2025-09-09 13:31:15] [Rank 0] step:5401/10000 train_time:269329ms step_avg:49.87ms +[2025-09-09 13:31:16] [Rank 0] step:5421/10000 train_time:270156ms step_avg:49.84ms +[2025-09-09 13:31:16] [Rank 0] step:5421/10000 train_time:270156ms step_avg:49.84ms +[2025-09-09 13:31:17] [Rank 0] step:5441/10000 train_time:270983ms step_avg:49.80ms +[2025-09-09 13:31:17] [Rank 0] step:5441/10000 train_time:270983ms step_avg:49.80ms +[2025-09-09 13:31:17] [Rank 0] step:5461/10000 train_time:271810ms step_avg:49.77ms +[2025-09-09 13:31:17] [Rank 0] step:5461/10000 train_time:271810ms step_avg:49.77ms +[2025-09-09 13:31:18] [Rank 0] step:5481/10000 train_time:272636ms step_avg:49.74ms +[2025-09-09 13:31:18] [Rank 0] step:5481/10000 train_time:272636ms step_avg:49.74ms +[2025-09-09 13:31:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:31:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:31:20] [Rank 0] PRINT: step:5500/10000 train_loss:0.6377 val_loss:0.6240 train_time:273465ms step_avg:49.72ms +[2025-09-09 13:31:20] [Rank 0] PRINT: step:5500/10000 train_loss:0.6377 val_loss:0.6240 train_time:273465ms step_avg:49.72ms +[2025-09-09 13:31:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:31:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:31:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:31:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:32:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:32:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:32:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:32:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:32:41] [Rank 0] Total Loss: 4.7795 +[2025-09-09 13:32:41] [Rank 0] Total Loss: 4.7795 +[2025-09-09 13:32:41] [Rank 0] Total FTA (Unweighted): 0.9600 +[2025-09-09 13:32:41] [Rank 0] Total FTA (Unweighted): 0.9600 +[2025-09-09 13:32:41] [Rank 0] Total FTA (Weighted): 0.9600 +[2025-09-09 13:32:41] [Rank 0] Total FTA (Weighted): 0.9600 +[2025-09-09 13:32:41] [Rank 0] Group 0 Loss: 4.6951 +[2025-09-09 13:32:41] [Rank 0] Group 0 Loss: 4.6951 +[2025-09-09 13:32:41] [Rank 0] Group 1 Loss: 4.3908 +[2025-09-09 13:32:41] [Rank 0] Group 1 Loss: 4.3908 +[2025-09-09 13:32:41] [Rank 0] Group 2 Loss: 4.3677 +[2025-09-09 13:32:41] [Rank 0] Group 2 Loss: 4.3677 +[2025-09-09 13:32:41] [Rank 0] Group 3 Loss: 4.6675 +[2025-09-09 13:32:41] [Rank 0] Group 3 Loss: 4.6675 +[2025-09-09 13:32:41] [Rank 0] Group 4 Loss: 4.6544 +[2025-09-09 13:32:41] [Rank 0] Group 4 Loss: 4.6544 +[2025-09-09 13:32:41] [Rank 0] Group 5 Loss: 4.7259 +[2025-09-09 13:32:41] [Rank 0] Group 5 Loss: 4.7259 +[2025-09-09 13:32:41] [Rank 0] Group 6 Loss: 4.6639 +[2025-09-09 13:32:41] [Rank 0] Group 6 Loss: 4.6639 +[2025-09-09 13:32:42] [Rank 0] Group 7 Loss: 4.6773 +[2025-09-09 13:32:42] [Rank 0] Group 7 Loss: 4.6773 +[2025-09-09 13:32:42] [Rank 0] Group 8 Loss: 4.8550 +[2025-09-09 13:32:42] [Rank 0] Group 8 Loss: 4.8550 +[2025-09-09 13:32:42] [Rank 0] Group 9 Loss: 4.8140 +[2025-09-09 13:32:42] [Rank 0] Group 9 Loss: 4.8140 +[2025-09-09 13:32:42] [Rank 0] Group 10 Loss: 4.9852 +[2025-09-09 13:32:42] [Rank 0] Group 10 Loss: 4.9852 +[2025-09-09 13:32:42] [Rank 0] Group 11 Loss: 4.9820 +[2025-09-09 13:32:42] [Rank 0] Group 11 Loss: 4.9820 +[2025-09-09 13:32:42] [Rank 0] Group 12 Loss: 4.9495 +[2025-09-09 13:32:42] [Rank 0] Group 12 Loss: 4.9495 +[2025-09-09 13:32:42] [Rank 0] Group 13 Loss: 5.0458 +[2025-09-09 13:32:42] [Rank 0] Group 13 Loss: 5.0458 +[2025-09-09 13:32:42] [Rank 0] Group 14 Loss: 5.0346 +[2025-09-09 13:32:42] [Rank 0] Group 14 Loss: 5.0346 +[2025-09-09 13:32:42] [Rank 0] Group 15 Loss: 4.9627 +[2025-09-09 13:32:42] [Rank 0] Group 15 Loss: 4.9627 +[2025-09-09 13:32:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:32:42] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 13:32:42] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 13:32:42] [Rank 0] Group 13 FTA: 0.9800 +[2025-09-09 13:32:42] [Rank 0] Group 13 FTA: 0.9800 +[2025-09-09 13:32:42] [Rank 0] Group 14 FTA: 0.8400 +[2025-09-09 13:32:42] [Rank 0] Group 14 FTA: 0.8400 +[2025-09-09 13:32:42] [Rank 0] Group 15 FTA: 0.5500 +[2025-09-09 13:32:42] [Rank 0] Group 15 FTA: 0.5500 +[2025-09-09 13:32:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:32:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:32:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:32:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:32:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:32:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:32:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:32:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:32:43] [Rank 0] step:5501/10000 train_time:273482ms step_avg:49.72ms +[2025-09-09 13:32:43] [Rank 0] step:5501/10000 train_time:273482ms step_avg:49.72ms +[2025-09-09 13:32:44] [Rank 0] step:5521/10000 train_time:274310ms step_avg:49.68ms +[2025-09-09 13:32:44] [Rank 0] step:5521/10000 train_time:274310ms step_avg:49.68ms +[2025-09-09 13:32:45] [Rank 0] step:5541/10000 train_time:275137ms step_avg:49.65ms +[2025-09-09 13:32:45] [Rank 0] step:5541/10000 train_time:275137ms step_avg:49.65ms +[2025-09-09 13:32:46] [Rank 0] step:5561/10000 train_time:275963ms step_avg:49.62ms +[2025-09-09 13:32:46] [Rank 0] step:5561/10000 train_time:275963ms step_avg:49.62ms +[2025-09-09 13:32:46] [Rank 0] step:5581/10000 train_time:276789ms step_avg:49.59ms +[2025-09-09 13:32:46] [Rank 0] step:5581/10000 train_time:276789ms step_avg:49.59ms +[2025-09-09 13:32:47] [Rank 0] step:5601/10000 train_time:277615ms step_avg:49.57ms +[2025-09-09 13:32:47] [Rank 0] step:5601/10000 train_time:277615ms step_avg:49.57ms +[2025-09-09 13:32:48] [Rank 0] step:5621/10000 train_time:278442ms step_avg:49.54ms +[2025-09-09 13:32:48] [Rank 0] step:5621/10000 train_time:278442ms step_avg:49.54ms +[2025-09-09 13:32:50] [Rank 0] step:5641/10000 train_time:279953ms step_avg:49.63ms +[2025-09-09 13:32:50] [Rank 0] step:5641/10000 train_time:279953ms step_avg:49.63ms +[2025-09-09 13:32:50] [Rank 0] step:5661/10000 train_time:280780ms step_avg:49.60ms +[2025-09-09 13:32:50] [Rank 0] step:5661/10000 train_time:280780ms step_avg:49.60ms +[2025-09-09 13:32:51] [Rank 0] step:5681/10000 train_time:281609ms step_avg:49.57ms +[2025-09-09 13:32:51] [Rank 0] step:5681/10000 train_time:281609ms step_avg:49.57ms +[2025-09-09 13:32:52] [Rank 0] step:5701/10000 train_time:282434ms step_avg:49.54ms +[2025-09-09 13:32:52] [Rank 0] step:5701/10000 train_time:282434ms step_avg:49.54ms +[2025-09-09 13:32:53] [Rank 0] step:5721/10000 train_time:283261ms step_avg:49.51ms +[2025-09-09 13:32:53] [Rank 0] step:5721/10000 train_time:283261ms step_avg:49.51ms +[2025-09-09 13:32:54] [Rank 0] step:5741/10000 train_time:284087ms step_avg:49.48ms +[2025-09-09 13:32:54] [Rank 0] step:5741/10000 train_time:284087ms step_avg:49.48ms +[2025-09-09 13:32:54] [Rank 0] step:5761/10000 train_time:284913ms step_avg:49.46ms +[2025-09-09 13:32:54] [Rank 0] step:5761/10000 train_time:284913ms step_avg:49.46ms +[2025-09-09 13:32:55] [Rank 0] step:5781/10000 train_time:285740ms step_avg:49.43ms +[2025-09-09 13:32:55] [Rank 0] step:5781/10000 train_time:285740ms step_avg:49.43ms +[2025-09-09 13:32:56] [Rank 0] step:5801/10000 train_time:286566ms step_avg:49.40ms +[2025-09-09 13:32:56] [Rank 0] step:5801/10000 train_time:286566ms step_avg:49.40ms +[2025-09-09 13:32:57] [Rank 0] step:5821/10000 train_time:287393ms step_avg:49.37ms +[2025-09-09 13:32:57] [Rank 0] step:5821/10000 train_time:287393ms step_avg:49.37ms +[2025-09-09 13:32:58] [Rank 0] step:5841/10000 train_time:288221ms step_avg:49.34ms +[2025-09-09 13:32:58] [Rank 0] step:5841/10000 train_time:288221ms step_avg:49.34ms +[2025-09-09 13:32:59] [Rank 0] step:5861/10000 train_time:289048ms step_avg:49.32ms +[2025-09-09 13:32:59] [Rank 0] step:5861/10000 train_time:289048ms step_avg:49.32ms +[2025-09-09 13:32:59] [Rank 0] step:5881/10000 train_time:289875ms step_avg:49.29ms +[2025-09-09 13:32:59] [Rank 0] step:5881/10000 train_time:289875ms step_avg:49.29ms +[2025-09-09 13:33:00] [Rank 0] step:5901/10000 train_time:290701ms step_avg:49.26ms +[2025-09-09 13:33:00] [Rank 0] step:5901/10000 train_time:290701ms step_avg:49.26ms +[2025-09-09 13:33:01] [Rank 0] step:5921/10000 train_time:291529ms step_avg:49.24ms +[2025-09-09 13:33:01] [Rank 0] step:5921/10000 train_time:291529ms step_avg:49.24ms +[2025-09-09 13:33:02] [Rank 0] step:5941/10000 train_time:292357ms step_avg:49.21ms +[2025-09-09 13:33:02] [Rank 0] step:5941/10000 train_time:292357ms step_avg:49.21ms +[2025-09-09 13:33:03] [Rank 0] step:5961/10000 train_time:293184ms step_avg:49.18ms +[2025-09-09 13:33:03] [Rank 0] step:5961/10000 train_time:293184ms step_avg:49.18ms +[2025-09-09 13:33:04] [Rank 0] step:5981/10000 train_time:294011ms step_avg:49.16ms +[2025-09-09 13:33:04] [Rank 0] step:5981/10000 train_time:294011ms step_avg:49.16ms +[2025-09-09 13:33:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:33:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:33:05] [Rank 0] PRINT: step:6000/10000 train_loss:0.6315 val_loss:0.6194 train_time:294841ms step_avg:49.14ms +[2025-09-09 13:33:05] [Rank 0] PRINT: step:6000/10000 train_loss:0.6315 val_loss:0.6194 train_time:294841ms step_avg:49.14ms +[2025-09-09 13:33:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:33:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:33:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:33:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:34:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:34:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:34:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:34:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:34:27] [Rank 0] Total Loss: 4.8195 +[2025-09-09 13:34:27] [Rank 0] Total Loss: 4.8195 +[2025-09-09 13:34:27] [Rank 0] Total FTA (Unweighted): 0.9725 +[2025-09-09 13:34:27] [Rank 0] Total FTA (Unweighted): 0.9725 +[2025-09-09 13:34:27] [Rank 0] Total FTA (Weighted): 0.9725 +[2025-09-09 13:34:27] [Rank 0] Total FTA (Weighted): 0.9725 +[2025-09-09 13:34:27] [Rank 0] Group 0 Loss: 4.7009 +[2025-09-09 13:34:27] [Rank 0] Group 0 Loss: 4.7009 +[2025-09-09 13:34:27] [Rank 0] Group 1 Loss: 4.4001 +[2025-09-09 13:34:27] [Rank 0] Group 1 Loss: 4.4001 +[2025-09-09 13:34:27] [Rank 0] Group 2 Loss: 4.4022 +[2025-09-09 13:34:27] [Rank 0] Group 2 Loss: 4.4022 +[2025-09-09 13:34:27] [Rank 0] Group 3 Loss: 4.7111 +[2025-09-09 13:34:27] [Rank 0] Group 3 Loss: 4.7111 +[2025-09-09 13:34:27] [Rank 0] Group 4 Loss: 4.7096 +[2025-09-09 13:34:27] [Rank 0] Group 4 Loss: 4.7096 +[2025-09-09 13:34:27] [Rank 0] Group 5 Loss: 4.7630 +[2025-09-09 13:34:27] [Rank 0] Group 5 Loss: 4.7630 +[2025-09-09 13:34:27] [Rank 0] Group 6 Loss: 4.7098 +[2025-09-09 13:34:27] [Rank 0] Group 6 Loss: 4.7098 +[2025-09-09 13:34:27] [Rank 0] Group 7 Loss: 4.7267 +[2025-09-09 13:34:27] [Rank 0] Group 7 Loss: 4.7267 +[2025-09-09 13:34:27] [Rank 0] Group 8 Loss: 4.9168 +[2025-09-09 13:34:27] [Rank 0] Group 8 Loss: 4.9168 +[2025-09-09 13:34:27] [Rank 0] Group 9 Loss: 4.8819 +[2025-09-09 13:34:27] [Rank 0] Group 9 Loss: 4.8819 +[2025-09-09 13:34:27] [Rank 0] Group 10 Loss: 4.9972 +[2025-09-09 13:34:27] [Rank 0] Group 10 Loss: 4.9972 +[2025-09-09 13:34:27] [Rank 0] Group 11 Loss: 5.0068 +[2025-09-09 13:34:27] [Rank 0] Group 11 Loss: 5.0068 +[2025-09-09 13:34:27] [Rank 0] Group 12 Loss: 4.9916 +[2025-09-09 13:34:27] [Rank 0] Group 12 Loss: 4.9916 +[2025-09-09 13:34:27] [Rank 0] Group 13 Loss: 5.1010 +[2025-09-09 13:34:27] [Rank 0] Group 13 Loss: 5.1010 +[2025-09-09 13:34:27] [Rank 0] Group 14 Loss: 5.0714 +[2025-09-09 13:34:27] [Rank 0] Group 14 Loss: 5.0714 +[2025-09-09 13:34:27] [Rank 0] Group 15 Loss: 5.0222 +[2025-09-09 13:34:27] [Rank 0] Group 15 Loss: 5.0222 +[2025-09-09 13:34:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:34:27] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 13:34:27] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 13:34:27] [Rank 0] Group 14 FTA: 0.9200 +[2025-09-09 13:34:27] [Rank 0] Group 14 FTA: 0.9200 +[2025-09-09 13:34:27] [Rank 0] Group 15 FTA: 0.6500 +[2025-09-09 13:34:27] [Rank 0] Group 15 FTA: 0.6500 +[2025-09-09 13:34:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:34:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:34:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:34:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:34:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:34:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:34:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:34:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:34:28] [Rank 0] step:6001/10000 train_time:294858ms step_avg:49.13ms +[2025-09-09 13:34:28] [Rank 0] step:6001/10000 train_time:294858ms step_avg:49.13ms +[2025-09-09 13:34:29] [Rank 0] step:6021/10000 train_time:295756ms step_avg:49.12ms +[2025-09-09 13:34:29] [Rank 0] step:6021/10000 train_time:295756ms step_avg:49.12ms +[2025-09-09 13:34:30] [Rank 0] step:6041/10000 train_time:296583ms step_avg:49.09ms +[2025-09-09 13:34:30] [Rank 0] step:6041/10000 train_time:296583ms step_avg:49.09ms +[2025-09-09 13:34:31] [Rank 0] step:6061/10000 train_time:297409ms step_avg:49.07ms +[2025-09-09 13:34:31] [Rank 0] step:6061/10000 train_time:297409ms step_avg:49.07ms +[2025-09-09 13:34:32] [Rank 0] step:6081/10000 train_time:298236ms step_avg:49.04ms +[2025-09-09 13:34:32] [Rank 0] step:6081/10000 train_time:298236ms step_avg:49.04ms +[2025-09-09 13:34:32] [Rank 0] step:6101/10000 train_time:299063ms step_avg:49.02ms +[2025-09-09 13:34:32] [Rank 0] step:6101/10000 train_time:299063ms step_avg:49.02ms +[2025-09-09 13:34:33] [Rank 0] step:6121/10000 train_time:299891ms step_avg:48.99ms +[2025-09-09 13:34:33] [Rank 0] step:6121/10000 train_time:299891ms step_avg:48.99ms +[2025-09-09 13:34:34] [Rank 0] step:6141/10000 train_time:300721ms step_avg:48.97ms +[2025-09-09 13:34:34] [Rank 0] step:6141/10000 train_time:300721ms step_avg:48.97ms +[2025-09-09 13:34:35] [Rank 0] step:6161/10000 train_time:301546ms step_avg:48.94ms +[2025-09-09 13:34:35] [Rank 0] step:6161/10000 train_time:301546ms step_avg:48.94ms +[2025-09-09 13:34:36] [Rank 0] step:6181/10000 train_time:302373ms step_avg:48.92ms +[2025-09-09 13:34:36] [Rank 0] step:6181/10000 train_time:302373ms step_avg:48.92ms +[2025-09-09 13:34:37] [Rank 0] step:6201/10000 train_time:303201ms step_avg:48.90ms +[2025-09-09 13:34:37] [Rank 0] step:6201/10000 train_time:303201ms step_avg:48.90ms +[2025-09-09 13:34:37] [Rank 0] step:6221/10000 train_time:304028ms step_avg:48.87ms +[2025-09-09 13:34:37] [Rank 0] step:6221/10000 train_time:304028ms step_avg:48.87ms +[2025-09-09 13:34:38] [Rank 0] step:6241/10000 train_time:304856ms step_avg:48.85ms +[2025-09-09 13:34:38] [Rank 0] step:6241/10000 train_time:304856ms step_avg:48.85ms +[2025-09-09 13:34:39] [Rank 0] step:6261/10000 train_time:305684ms step_avg:48.82ms +[2025-09-09 13:34:39] [Rank 0] step:6261/10000 train_time:305684ms step_avg:48.82ms +[2025-09-09 13:34:40] [Rank 0] step:6281/10000 train_time:306512ms step_avg:48.80ms +[2025-09-09 13:34:40] [Rank 0] step:6281/10000 train_time:306512ms step_avg:48.80ms +[2025-09-09 13:34:41] [Rank 0] step:6301/10000 train_time:307342ms step_avg:48.78ms +[2025-09-09 13:34:41] [Rank 0] step:6301/10000 train_time:307342ms step_avg:48.78ms +[2025-09-09 13:34:42] [Rank 0] step:6321/10000 train_time:308167ms step_avg:48.75ms +[2025-09-09 13:34:42] [Rank 0] step:6321/10000 train_time:308167ms step_avg:48.75ms +[2025-09-09 13:34:42] [Rank 0] step:6341/10000 train_time:308995ms step_avg:48.73ms +[2025-09-09 13:34:42] [Rank 0] step:6341/10000 train_time:308995ms step_avg:48.73ms +[2025-09-09 13:34:43] [Rank 0] step:6361/10000 train_time:309822ms step_avg:48.71ms +[2025-09-09 13:34:43] [Rank 0] step:6361/10000 train_time:309822ms step_avg:48.71ms +[2025-09-09 13:34:44] [Rank 0] step:6381/10000 train_time:310650ms step_avg:48.68ms +[2025-09-09 13:34:44] [Rank 0] step:6381/10000 train_time:310650ms step_avg:48.68ms +[2025-09-09 13:34:45] [Rank 0] step:6401/10000 train_time:311477ms step_avg:48.66ms +[2025-09-09 13:34:45] [Rank 0] step:6401/10000 train_time:311477ms step_avg:48.66ms +[2025-09-09 13:34:46] [Rank 0] step:6421/10000 train_time:312305ms step_avg:48.64ms +[2025-09-09 13:34:46] [Rank 0] step:6421/10000 train_time:312305ms step_avg:48.64ms +[2025-09-09 13:34:46] [Rank 0] step:6441/10000 train_time:313132ms step_avg:48.62ms +[2025-09-09 13:34:46] [Rank 0] step:6441/10000 train_time:313132ms step_avg:48.62ms +[2025-09-09 13:34:47] [Rank 0] step:6461/10000 train_time:313962ms step_avg:48.59ms +[2025-09-09 13:34:47] [Rank 0] step:6461/10000 train_time:313962ms step_avg:48.59ms +[2025-09-09 13:34:48] [Rank 0] step:6481/10000 train_time:314788ms step_avg:48.57ms +[2025-09-09 13:34:48] [Rank 0] step:6481/10000 train_time:314788ms step_avg:48.57ms +[2025-09-09 13:34:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:34:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:34:49] [Rank 0] PRINT: step:6500/10000 train_loss:0.6262 val_loss:0.6152 train_time:315618ms step_avg:48.56ms +[2025-09-09 13:34:49] [Rank 0] PRINT: step:6500/10000 train_loss:0.6262 val_loss:0.6152 train_time:315618ms step_avg:48.56ms +[2025-09-09 13:34:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:34:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:34:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:34:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:36:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:36:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:36:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:36:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:36:12] [Rank 0] Total Loss: 4.8461 +[2025-09-09 13:36:12] [Rank 0] Total Loss: 4.8461 +[2025-09-09 13:36:12] [Rank 0] Total FTA (Unweighted): 0.9806 +[2025-09-09 13:36:12] [Rank 0] Total FTA (Unweighted): 0.9806 +[2025-09-09 13:36:12] [Rank 0] Total FTA (Weighted): 0.9806 +[2025-09-09 13:36:12] [Rank 0] Total FTA (Weighted): 0.9806 +[2025-09-09 13:36:12] [Rank 0] Group 0 Loss: 4.7838 +[2025-09-09 13:36:12] [Rank 0] Group 0 Loss: 4.7838 +[2025-09-09 13:36:12] [Rank 0] Group 1 Loss: 4.4941 +[2025-09-09 13:36:12] [Rank 0] Group 1 Loss: 4.4941 +[2025-09-09 13:36:12] [Rank 0] Group 2 Loss: 4.4018 +[2025-09-09 13:36:12] [Rank 0] Group 2 Loss: 4.4018 +[2025-09-09 13:36:12] [Rank 0] Group 3 Loss: 4.7611 +[2025-09-09 13:36:12] [Rank 0] Group 3 Loss: 4.7611 +[2025-09-09 13:36:12] [Rank 0] Group 4 Loss: 4.7079 +[2025-09-09 13:36:12] [Rank 0] Group 4 Loss: 4.7079 +[2025-09-09 13:36:12] [Rank 0] Group 5 Loss: 4.7675 +[2025-09-09 13:36:12] [Rank 0] Group 5 Loss: 4.7675 +[2025-09-09 13:36:12] [Rank 0] Group 6 Loss: 4.7261 +[2025-09-09 13:36:12] [Rank 0] Group 6 Loss: 4.7261 +[2025-09-09 13:36:12] [Rank 0] Group 7 Loss: 4.7596 +[2025-09-09 13:36:12] [Rank 0] Group 7 Loss: 4.7596 +[2025-09-09 13:36:12] [Rank 0] Group 8 Loss: 4.9222 +[2025-09-09 13:36:12] [Rank 0] Group 8 Loss: 4.9222 +[2025-09-09 13:36:12] [Rank 0] Group 9 Loss: 4.8773 +[2025-09-09 13:36:12] [Rank 0] Group 9 Loss: 4.8773 +[2025-09-09 13:36:12] [Rank 0] Group 10 Loss: 5.0225 +[2025-09-09 13:36:12] [Rank 0] Group 10 Loss: 5.0225 +[2025-09-09 13:36:12] [Rank 0] Group 11 Loss: 5.0378 +[2025-09-09 13:36:12] [Rank 0] Group 11 Loss: 5.0378 +[2025-09-09 13:36:12] [Rank 0] Group 12 Loss: 5.0358 +[2025-09-09 13:36:12] [Rank 0] Group 12 Loss: 5.0358 +[2025-09-09 13:36:12] [Rank 0] Group 13 Loss: 5.1227 +[2025-09-09 13:36:12] [Rank 0] Group 13 Loss: 5.1227 +[2025-09-09 13:36:12] [Rank 0] Group 14 Loss: 5.0815 +[2025-09-09 13:36:12] [Rank 0] Group 14 Loss: 5.0815 +[2025-09-09 13:36:12] [Rank 0] Group 15 Loss: 5.0358 +[2025-09-09 13:36:12] [Rank 0] Group 15 Loss: 5.0358 +[2025-09-09 13:36:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:36:12] [Rank 0] Group 14 FTA: 0.9400 +[2025-09-09 13:36:12] [Rank 0] Group 14 FTA: 0.9400 +[2025-09-09 13:36:12] [Rank 0] Group 15 FTA: 0.7500 +[2025-09-09 13:36:12] [Rank 0] Group 15 FTA: 0.7500 +[2025-09-09 13:36:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:36:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:36:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:36:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:36:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:36:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:36:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:36:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:36:13] [Rank 0] step:6501/10000 train_time:315636ms step_avg:48.55ms +[2025-09-09 13:36:13] [Rank 0] step:6501/10000 train_time:315636ms step_avg:48.55ms +[2025-09-09 13:36:14] [Rank 0] step:6521/10000 train_time:316486ms step_avg:48.53ms +[2025-09-09 13:36:14] [Rank 0] step:6521/10000 train_time:316486ms step_avg:48.53ms +[2025-09-09 13:36:15] [Rank 0] step:6541/10000 train_time:317312ms step_avg:48.51ms +[2025-09-09 13:36:15] [Rank 0] step:6541/10000 train_time:317312ms step_avg:48.51ms +[2025-09-09 13:36:16] [Rank 0] step:6561/10000 train_time:318140ms step_avg:48.49ms +[2025-09-09 13:36:16] [Rank 0] step:6561/10000 train_time:318140ms step_avg:48.49ms +[2025-09-09 13:36:17] [Rank 0] step:6581/10000 train_time:318967ms step_avg:48.47ms +[2025-09-09 13:36:17] [Rank 0] step:6581/10000 train_time:318967ms step_avg:48.47ms +[2025-09-09 13:36:18] [Rank 0] step:6601/10000 train_time:320150ms step_avg:48.50ms +[2025-09-09 13:36:18] [Rank 0] step:6601/10000 train_time:320150ms step_avg:48.50ms +[2025-09-09 13:36:19] [Rank 0] step:6621/10000 train_time:321098ms step_avg:48.50ms +[2025-09-09 13:36:19] [Rank 0] step:6621/10000 train_time:321098ms step_avg:48.50ms +[2025-09-09 13:36:19] [Rank 0] step:6641/10000 train_time:321926ms step_avg:48.48ms +[2025-09-09 13:36:19] [Rank 0] step:6641/10000 train_time:321926ms step_avg:48.48ms +[2025-09-09 13:36:20] [Rank 0] step:6661/10000 train_time:322756ms step_avg:48.45ms +[2025-09-09 13:36:20] [Rank 0] step:6661/10000 train_time:322756ms step_avg:48.45ms +[2025-09-09 13:36:21] [Rank 0] step:6681/10000 train_time:323582ms step_avg:48.43ms +[2025-09-09 13:36:21] [Rank 0] step:6681/10000 train_time:323582ms step_avg:48.43ms +[2025-09-09 13:36:22] [Rank 0] step:6701/10000 train_time:324413ms step_avg:48.41ms +[2025-09-09 13:36:22] [Rank 0] step:6701/10000 train_time:324413ms step_avg:48.41ms +[2025-09-09 13:36:23] [Rank 0] step:6721/10000 train_time:325241ms step_avg:48.39ms +[2025-09-09 13:36:23] [Rank 0] step:6721/10000 train_time:325241ms step_avg:48.39ms +[2025-09-09 13:36:24] [Rank 0] step:6741/10000 train_time:326070ms step_avg:48.37ms +[2025-09-09 13:36:24] [Rank 0] step:6741/10000 train_time:326070ms step_avg:48.37ms +[2025-09-09 13:36:24] [Rank 0] step:6761/10000 train_time:326899ms step_avg:48.35ms +[2025-09-09 13:36:24] [Rank 0] step:6761/10000 train_time:326899ms step_avg:48.35ms +[2025-09-09 13:36:25] [Rank 0] step:6781/10000 train_time:327727ms step_avg:48.33ms +[2025-09-09 13:36:25] [Rank 0] step:6781/10000 train_time:327727ms step_avg:48.33ms +[2025-09-09 13:36:26] [Rank 0] step:6801/10000 train_time:328557ms step_avg:48.31ms +[2025-09-09 13:36:26] [Rank 0] step:6801/10000 train_time:328557ms step_avg:48.31ms +[2025-09-09 13:36:27] [Rank 0] step:6821/10000 train_time:329384ms step_avg:48.29ms +[2025-09-09 13:36:27] [Rank 0] step:6821/10000 train_time:329384ms step_avg:48.29ms +[2025-09-09 13:36:28] [Rank 0] step:6841/10000 train_time:330915ms step_avg:48.37ms +[2025-09-09 13:36:28] [Rank 0] step:6841/10000 train_time:330915ms step_avg:48.37ms +[2025-09-09 13:36:29] [Rank 0] step:6861/10000 train_time:331744ms step_avg:48.35ms +[2025-09-09 13:36:29] [Rank 0] step:6861/10000 train_time:331744ms step_avg:48.35ms +[2025-09-09 13:36:30] [Rank 0] step:6881/10000 train_time:332573ms step_avg:48.33ms +[2025-09-09 13:36:30] [Rank 0] step:6881/10000 train_time:332573ms step_avg:48.33ms +[2025-09-09 13:36:31] [Rank 0] step:6901/10000 train_time:333401ms step_avg:48.31ms +[2025-09-09 13:36:31] [Rank 0] step:6901/10000 train_time:333401ms step_avg:48.31ms +[2025-09-09 13:36:32] [Rank 0] step:6921/10000 train_time:334229ms step_avg:48.29ms +[2025-09-09 13:36:32] [Rank 0] step:6921/10000 train_time:334229ms step_avg:48.29ms +[2025-09-09 13:36:33] [Rank 0] step:6941/10000 train_time:335057ms step_avg:48.27ms +[2025-09-09 13:36:33] [Rank 0] step:6941/10000 train_time:335057ms step_avg:48.27ms +[2025-09-09 13:36:33] [Rank 0] step:6961/10000 train_time:335888ms step_avg:48.25ms +[2025-09-09 13:36:33] [Rank 0] step:6961/10000 train_time:335888ms step_avg:48.25ms +[2025-09-09 13:36:34] [Rank 0] step:6981/10000 train_time:336713ms step_avg:48.23ms +[2025-09-09 13:36:34] [Rank 0] step:6981/10000 train_time:336713ms step_avg:48.23ms +[2025-09-09 13:36:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:36:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:36:36] [Rank 0] PRINT: step:7000/10000 train_loss:0.6209 val_loss:0.6123 train_time:337542ms step_avg:48.22ms +[2025-09-09 13:36:36] [Rank 0] PRINT: step:7000/10000 train_loss:0.6209 val_loss:0.6123 train_time:337542ms step_avg:48.22ms +[2025-09-09 13:36:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:36:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:36:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:36:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:37:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:37:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:37:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:37:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:37:57] [Rank 0] Total Loss: 4.9353 +[2025-09-09 13:37:57] [Rank 0] Total Loss: 4.9353 +[2025-09-09 13:37:57] [Rank 0] Total FTA (Unweighted): 0.9888 +[2025-09-09 13:37:57] [Rank 0] Total FTA (Unweighted): 0.9888 +[2025-09-09 13:37:57] [Rank 0] Total FTA (Weighted): 0.9888 +[2025-09-09 13:37:57] [Rank 0] Total FTA (Weighted): 0.9888 +[2025-09-09 13:37:57] [Rank 0] Group 0 Loss: 4.8687 +[2025-09-09 13:37:57] [Rank 0] Group 0 Loss: 4.8687 +[2025-09-09 13:37:57] [Rank 0] Group 1 Loss: 4.5775 +[2025-09-09 13:37:57] [Rank 0] Group 1 Loss: 4.5775 +[2025-09-09 13:37:57] [Rank 0] Group 2 Loss: 4.4722 +[2025-09-09 13:37:57] [Rank 0] Group 2 Loss: 4.4722 +[2025-09-09 13:37:57] [Rank 0] Group 3 Loss: 4.8896 +[2025-09-09 13:37:57] [Rank 0] Group 3 Loss: 4.8896 +[2025-09-09 13:37:57] [Rank 0] Group 4 Loss: 4.8004 +[2025-09-09 13:37:57] [Rank 0] Group 4 Loss: 4.8004 +[2025-09-09 13:37:57] [Rank 0] Group 5 Loss: 4.8550 +[2025-09-09 13:37:57] [Rank 0] Group 5 Loss: 4.8550 +[2025-09-09 13:37:57] [Rank 0] Group 6 Loss: 4.8315 +[2025-09-09 13:37:57] [Rank 0] Group 6 Loss: 4.8315 +[2025-09-09 13:37:57] [Rank 0] Group 7 Loss: 4.8541 +[2025-09-09 13:37:57] [Rank 0] Group 7 Loss: 4.8541 +[2025-09-09 13:37:57] [Rank 0] Group 8 Loss: 5.0199 +[2025-09-09 13:37:57] [Rank 0] Group 8 Loss: 5.0199 +[2025-09-09 13:37:57] [Rank 0] Group 9 Loss: 4.9870 +[2025-09-09 13:37:57] [Rank 0] Group 9 Loss: 4.9870 +[2025-09-09 13:37:57] [Rank 0] Group 10 Loss: 5.0660 +[2025-09-09 13:37:57] [Rank 0] Group 10 Loss: 5.0660 +[2025-09-09 13:37:57] [Rank 0] Group 11 Loss: 5.1264 +[2025-09-09 13:37:57] [Rank 0] Group 11 Loss: 5.1264 +[2025-09-09 13:37:57] [Rank 0] Group 12 Loss: 5.1202 +[2025-09-09 13:37:57] [Rank 0] Group 12 Loss: 5.1202 +[2025-09-09 13:37:57] [Rank 0] Group 13 Loss: 5.2212 +[2025-09-09 13:37:57] [Rank 0] Group 13 Loss: 5.2212 +[2025-09-09 13:37:57] [Rank 0] Group 14 Loss: 5.1642 +[2025-09-09 13:37:57] [Rank 0] Group 14 Loss: 5.1642 +[2025-09-09 13:37:57] [Rank 0] Group 15 Loss: 5.1101 +[2025-09-09 13:37:57] [Rank 0] Group 15 Loss: 5.1101 +[2025-09-09 13:37:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:37:57] [Rank 0] Group 14 FTA: 0.9800 +[2025-09-09 13:37:57] [Rank 0] Group 14 FTA: 0.9800 +[2025-09-09 13:37:57] [Rank 0] Group 15 FTA: 0.8400 +[2025-09-09 13:37:57] [Rank 0] Group 15 FTA: 0.8400 +[2025-09-09 13:37:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:37:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:37:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:37:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:37:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:37:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:37:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:37:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:37:59] [Rank 0] step:7001/10000 train_time:337560ms step_avg:48.22ms +[2025-09-09 13:37:59] [Rank 0] step:7001/10000 train_time:337560ms step_avg:48.22ms +[2025-09-09 13:38:00] [Rank 0] step:7021/10000 train_time:338399ms step_avg:48.20ms +[2025-09-09 13:38:00] [Rank 0] step:7021/10000 train_time:338399ms step_avg:48.20ms +[2025-09-09 13:38:00] [Rank 0] step:7041/10000 train_time:339226ms step_avg:48.18ms +[2025-09-09 13:38:00] [Rank 0] step:7041/10000 train_time:339226ms step_avg:48.18ms +[2025-09-09 13:38:01] [Rank 0] step:7061/10000 train_time:340053ms step_avg:48.16ms +[2025-09-09 13:38:01] [Rank 0] step:7061/10000 train_time:340053ms step_avg:48.16ms +[2025-09-09 13:38:02] [Rank 0] step:7081/10000 train_time:340880ms step_avg:48.14ms +[2025-09-09 13:38:02] [Rank 0] step:7081/10000 train_time:340880ms step_avg:48.14ms +[2025-09-09 13:38:03] [Rank 0] step:7101/10000 train_time:341707ms step_avg:48.12ms +[2025-09-09 13:38:03] [Rank 0] step:7101/10000 train_time:341707ms step_avg:48.12ms +[2025-09-09 13:38:04] [Rank 0] step:7121/10000 train_time:342535ms step_avg:48.10ms +[2025-09-09 13:38:04] [Rank 0] step:7121/10000 train_time:342535ms step_avg:48.10ms +[2025-09-09 13:38:05] [Rank 0] step:7141/10000 train_time:343362ms step_avg:48.08ms +[2025-09-09 13:38:05] [Rank 0] step:7141/10000 train_time:343362ms step_avg:48.08ms +[2025-09-09 13:38:05] [Rank 0] step:7161/10000 train_time:344190ms step_avg:48.06ms +[2025-09-09 13:38:05] [Rank 0] step:7161/10000 train_time:344190ms step_avg:48.06ms +[2025-09-09 13:38:06] [Rank 0] step:7181/10000 train_time:345018ms step_avg:48.05ms +[2025-09-09 13:38:06] [Rank 0] step:7181/10000 train_time:345018ms step_avg:48.05ms +[2025-09-09 13:38:07] [Rank 0] step:7201/10000 train_time:345845ms step_avg:48.03ms +[2025-09-09 13:38:07] [Rank 0] step:7201/10000 train_time:345845ms step_avg:48.03ms +[2025-09-09 13:38:08] [Rank 0] step:7221/10000 train_time:346671ms step_avg:48.01ms +[2025-09-09 13:38:08] [Rank 0] step:7221/10000 train_time:346671ms step_avg:48.01ms +[2025-09-09 13:38:09] [Rank 0] step:7241/10000 train_time:347498ms step_avg:47.99ms +[2025-09-09 13:38:09] [Rank 0] step:7241/10000 train_time:347498ms step_avg:47.99ms +[2025-09-09 13:38:10] [Rank 0] step:7261/10000 train_time:348324ms step_avg:47.97ms +[2025-09-09 13:38:10] [Rank 0] step:7261/10000 train_time:348324ms step_avg:47.97ms +[2025-09-09 13:38:10] [Rank 0] step:7281/10000 train_time:349152ms step_avg:47.95ms +[2025-09-09 13:38:10] [Rank 0] step:7281/10000 train_time:349152ms step_avg:47.95ms +[2025-09-09 13:38:11] [Rank 0] step:7301/10000 train_time:349978ms step_avg:47.94ms +[2025-09-09 13:38:11] [Rank 0] step:7301/10000 train_time:349978ms step_avg:47.94ms +[2025-09-09 13:38:12] [Rank 0] step:7321/10000 train_time:350805ms step_avg:47.92ms +[2025-09-09 13:38:12] [Rank 0] step:7321/10000 train_time:350805ms step_avg:47.92ms +[2025-09-09 13:38:13] [Rank 0] step:7341/10000 train_time:351635ms step_avg:47.90ms +[2025-09-09 13:38:13] [Rank 0] step:7341/10000 train_time:351635ms step_avg:47.90ms +[2025-09-09 13:38:14] [Rank 0] step:7361/10000 train_time:352460ms step_avg:47.88ms +[2025-09-09 13:38:14] [Rank 0] step:7361/10000 train_time:352460ms step_avg:47.88ms +[2025-09-09 13:38:15] [Rank 0] step:7381/10000 train_time:353286ms step_avg:47.86ms +[2025-09-09 13:38:15] [Rank 0] step:7381/10000 train_time:353286ms step_avg:47.86ms +[2025-09-09 13:38:15] [Rank 0] step:7401/10000 train_time:354112ms step_avg:47.85ms +[2025-09-09 13:38:15] [Rank 0] step:7401/10000 train_time:354112ms step_avg:47.85ms +[2025-09-09 13:38:16] [Rank 0] step:7421/10000 train_time:354940ms step_avg:47.83ms +[2025-09-09 13:38:16] [Rank 0] step:7421/10000 train_time:354940ms step_avg:47.83ms +[2025-09-09 13:38:17] [Rank 0] step:7441/10000 train_time:355768ms step_avg:47.81ms +[2025-09-09 13:38:17] [Rank 0] step:7441/10000 train_time:355768ms step_avg:47.81ms +[2025-09-09 13:38:18] [Rank 0] step:7461/10000 train_time:356595ms step_avg:47.79ms +[2025-09-09 13:38:18] [Rank 0] step:7461/10000 train_time:356595ms step_avg:47.79ms +[2025-09-09 13:38:19] [Rank 0] step:7481/10000 train_time:357422ms step_avg:47.78ms +[2025-09-09 13:38:19] [Rank 0] step:7481/10000 train_time:357422ms step_avg:47.78ms +[2025-09-09 13:38:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:38:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:38:20] [Rank 0] PRINT: step:7500/10000 train_loss:0.6166 val_loss:0.6099 train_time:358253ms step_avg:47.77ms +[2025-09-09 13:38:20] [Rank 0] PRINT: step:7500/10000 train_loss:0.6166 val_loss:0.6099 train_time:358253ms step_avg:47.77ms +[2025-09-09 13:38:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:38:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:38:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:38:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:39:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:39:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:39:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:39:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:39:41] [Rank 0] Total Loss: 4.8918 +[2025-09-09 13:39:41] [Rank 0] Total Loss: 4.8918 +[2025-09-09 13:39:41] [Rank 0] Total FTA (Unweighted): 0.9925 +[2025-09-09 13:39:41] [Rank 0] Total FTA (Unweighted): 0.9925 +[2025-09-09 13:39:41] [Rank 0] Total FTA (Weighted): 0.9925 +[2025-09-09 13:39:41] [Rank 0] Total FTA (Weighted): 0.9925 +[2025-09-09 13:39:41] [Rank 0] Group 0 Loss: 4.8079 +[2025-09-09 13:39:41] [Rank 0] Group 0 Loss: 4.8079 +[2025-09-09 13:39:41] [Rank 0] Group 1 Loss: 4.5007 +[2025-09-09 13:39:41] [Rank 0] Group 1 Loss: 4.5007 +[2025-09-09 13:39:41] [Rank 0] Group 2 Loss: 4.3989 +[2025-09-09 13:39:41] [Rank 0] Group 2 Loss: 4.3989 +[2025-09-09 13:39:41] [Rank 0] Group 3 Loss: 4.8109 +[2025-09-09 13:39:41] [Rank 0] Group 3 Loss: 4.8109 +[2025-09-09 13:39:41] [Rank 0] Group 4 Loss: 4.7628 +[2025-09-09 13:39:41] [Rank 0] Group 4 Loss: 4.7628 +[2025-09-09 13:39:41] [Rank 0] Group 5 Loss: 4.8212 +[2025-09-09 13:39:41] [Rank 0] Group 5 Loss: 4.8212 +[2025-09-09 13:39:41] [Rank 0] Group 6 Loss: 4.7768 +[2025-09-09 13:39:41] [Rank 0] Group 6 Loss: 4.7768 +[2025-09-09 13:39:41] [Rank 0] Group 7 Loss: 4.8185 +[2025-09-09 13:39:41] [Rank 0] Group 7 Loss: 4.8185 +[2025-09-09 13:39:41] [Rank 0] Group 8 Loss: 4.9819 +[2025-09-09 13:39:41] [Rank 0] Group 8 Loss: 4.9819 +[2025-09-09 13:39:41] [Rank 0] Group 9 Loss: 4.9306 +[2025-09-09 13:39:41] [Rank 0] Group 9 Loss: 4.9306 +[2025-09-09 13:39:41] [Rank 0] Group 10 Loss: 5.0779 +[2025-09-09 13:39:41] [Rank 0] Group 10 Loss: 5.0779 +[2025-09-09 13:39:41] [Rank 0] Group 11 Loss: 5.1068 +[2025-09-09 13:39:41] [Rank 0] Group 11 Loss: 5.1068 +[2025-09-09 13:39:41] [Rank 0] Group 12 Loss: 5.0713 +[2025-09-09 13:39:41] [Rank 0] Group 12 Loss: 5.0713 +[2025-09-09 13:39:41] [Rank 0] Group 13 Loss: 5.1725 +[2025-09-09 13:39:41] [Rank 0] Group 13 Loss: 5.1725 +[2025-09-09 13:39:41] [Rank 0] Group 14 Loss: 5.1443 +[2025-09-09 13:39:41] [Rank 0] Group 14 Loss: 5.1443 +[2025-09-09 13:39:41] [Rank 0] Group 15 Loss: 5.0864 +[2025-09-09 13:39:41] [Rank 0] Group 15 Loss: 5.0864 +[2025-09-09 13:39:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 13:39:41] [Rank 0] Group 15 FTA: 0.8800 +[2025-09-09 13:39:41] [Rank 0] Group 15 FTA: 0.8800 +[2025-09-09 13:39:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:39:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:39:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:39:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:39:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:39:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:39:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:39:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:39:42] [Rank 0] step:7501/10000 train_time:358270ms step_avg:47.76ms +[2025-09-09 13:39:42] [Rank 0] step:7501/10000 train_time:358270ms step_avg:47.76ms +[2025-09-09 13:39:43] [Rank 0] step:7521/10000 train_time:359102ms step_avg:47.75ms +[2025-09-09 13:39:43] [Rank 0] step:7521/10000 train_time:359102ms step_avg:47.75ms +[2025-09-09 13:39:44] [Rank 0] step:7541/10000 train_time:359930ms step_avg:47.73ms +[2025-09-09 13:39:44] [Rank 0] step:7541/10000 train_time:359930ms step_avg:47.73ms +[2025-09-09 13:39:45] [Rank 0] step:7561/10000 train_time:360759ms step_avg:47.71ms +[2025-09-09 13:39:45] [Rank 0] step:7561/10000 train_time:360759ms step_avg:47.71ms +[2025-09-09 13:39:46] [Rank 0] step:7581/10000 train_time:361589ms step_avg:47.70ms +[2025-09-09 13:39:46] [Rank 0] step:7581/10000 train_time:361589ms step_avg:47.70ms +[2025-09-09 13:39:47] [Rank 0] step:7601/10000 train_time:362417ms step_avg:47.68ms +[2025-09-09 13:39:47] [Rank 0] step:7601/10000 train_time:362417ms step_avg:47.68ms +[2025-09-09 13:39:47] [Rank 0] step:7621/10000 train_time:363245ms step_avg:47.66ms +[2025-09-09 13:39:47] [Rank 0] step:7621/10000 train_time:363245ms step_avg:47.66ms +[2025-09-09 13:39:49] [Rank 0] step:7641/10000 train_time:364750ms step_avg:47.74ms +[2025-09-09 13:39:49] [Rank 0] step:7641/10000 train_time:364750ms step_avg:47.74ms +[2025-09-09 13:39:50] [Rank 0] step:7661/10000 train_time:365579ms step_avg:47.72ms +[2025-09-09 13:39:50] [Rank 0] step:7661/10000 train_time:365579ms step_avg:47.72ms +[2025-09-09 13:39:51] [Rank 0] step:7681/10000 train_time:366407ms step_avg:47.70ms +[2025-09-09 13:39:51] [Rank 0] step:7681/10000 train_time:366407ms step_avg:47.70ms +[2025-09-09 13:39:51] [Rank 0] step:7701/10000 train_time:367235ms step_avg:47.69ms +[2025-09-09 13:39:51] [Rank 0] step:7701/10000 train_time:367235ms step_avg:47.69ms +[2025-09-09 13:39:52] [Rank 0] step:7721/10000 train_time:368064ms step_avg:47.67ms +[2025-09-09 13:39:52] [Rank 0] step:7721/10000 train_time:368064ms step_avg:47.67ms +[2025-09-09 13:39:53] [Rank 0] step:7741/10000 train_time:368891ms step_avg:47.65ms +[2025-09-09 13:39:53] [Rank 0] step:7741/10000 train_time:368891ms step_avg:47.65ms +[2025-09-09 13:39:54] [Rank 0] step:7761/10000 train_time:369721ms step_avg:47.64ms +[2025-09-09 13:39:54] [Rank 0] step:7761/10000 train_time:369721ms step_avg:47.64ms +[2025-09-09 13:39:55] [Rank 0] step:7781/10000 train_time:370556ms step_avg:47.62ms +[2025-09-09 13:39:55] [Rank 0] step:7781/10000 train_time:370556ms step_avg:47.62ms +[2025-09-09 13:39:55] [Rank 0] step:7801/10000 train_time:371385ms step_avg:47.61ms +[2025-09-09 13:39:55] [Rank 0] step:7801/10000 train_time:371385ms step_avg:47.61ms +[2025-09-09 13:39:56] [Rank 0] step:7821/10000 train_time:372213ms step_avg:47.59ms +[2025-09-09 13:39:56] [Rank 0] step:7821/10000 train_time:372213ms step_avg:47.59ms +[2025-09-09 13:39:57] [Rank 0] step:7841/10000 train_time:373040ms step_avg:47.58ms +[2025-09-09 13:39:57] [Rank 0] step:7841/10000 train_time:373040ms step_avg:47.58ms +[2025-09-09 13:39:58] [Rank 0] step:7861/10000 train_time:373869ms step_avg:47.56ms +[2025-09-09 13:39:58] [Rank 0] step:7861/10000 train_time:373869ms step_avg:47.56ms +[2025-09-09 13:39:59] [Rank 0] step:7881/10000 train_time:374697ms step_avg:47.54ms +[2025-09-09 13:39:59] [Rank 0] step:7881/10000 train_time:374697ms step_avg:47.54ms +[2025-09-09 13:40:00] [Rank 0] step:7901/10000 train_time:375524ms step_avg:47.53ms +[2025-09-09 13:40:00] [Rank 0] step:7901/10000 train_time:375524ms step_avg:47.53ms +[2025-09-09 13:40:00] [Rank 0] step:7921/10000 train_time:376354ms step_avg:47.51ms +[2025-09-09 13:40:00] [Rank 0] step:7921/10000 train_time:376354ms step_avg:47.51ms +[2025-09-09 13:40:01] [Rank 0] step:7941/10000 train_time:377263ms step_avg:47.51ms +[2025-09-09 13:40:01] [Rank 0] step:7941/10000 train_time:377263ms step_avg:47.51ms +[2025-09-09 13:40:02] [Rank 0] step:7961/10000 train_time:378091ms step_avg:47.49ms +[2025-09-09 13:40:02] [Rank 0] step:7961/10000 train_time:378091ms step_avg:47.49ms +[2025-09-09 13:40:03] [Rank 0] step:7981/10000 train_time:378919ms step_avg:47.48ms +[2025-09-09 13:40:03] [Rank 0] step:7981/10000 train_time:378919ms step_avg:47.48ms +[2025-09-09 13:40:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:40:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:40:04] [Rank 0] PRINT: step:8000/10000 train_loss:0.6134 val_loss:0.6084 train_time:379749ms step_avg:47.47ms +[2025-09-09 13:40:04] [Rank 0] PRINT: step:8000/10000 train_loss:0.6134 val_loss:0.6084 train_time:379749ms step_avg:47.47ms +[2025-09-09 13:40:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:40:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:40:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:40:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:41:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:41:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:41:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:41:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:41:26] [Rank 0] Total Loss: 4.8530 +[2025-09-09 13:41:26] [Rank 0] Total Loss: 4.8530 +[2025-09-09 13:41:26] [Rank 0] Total FTA (Unweighted): 0.9938 +[2025-09-09 13:41:26] [Rank 0] Total FTA (Unweighted): 0.9938 +[2025-09-09 13:41:26] [Rank 0] Total FTA (Weighted): 0.9938 +[2025-09-09 13:41:26] [Rank 0] Total FTA (Weighted): 0.9938 +[2025-09-09 13:41:26] [Rank 0] Group 0 Loss: 4.7167 +[2025-09-09 13:41:26] [Rank 0] Group 0 Loss: 4.7167 +[2025-09-09 13:41:26] [Rank 0] Group 1 Loss: 4.4849 +[2025-09-09 13:41:26] [Rank 0] Group 1 Loss: 4.4849 +[2025-09-09 13:41:26] [Rank 0] Group 2 Loss: 4.3597 +[2025-09-09 13:41:26] [Rank 0] Group 2 Loss: 4.3597 +[2025-09-09 13:41:26] [Rank 0] Group 3 Loss: 4.7588 +[2025-09-09 13:41:26] [Rank 0] Group 3 Loss: 4.7588 +[2025-09-09 13:41:26] [Rank 0] Group 4 Loss: 4.7232 +[2025-09-09 13:41:26] [Rank 0] Group 4 Loss: 4.7232 +[2025-09-09 13:41:26] [Rank 0] Group 5 Loss: 4.7925 +[2025-09-09 13:41:26] [Rank 0] Group 5 Loss: 4.7925 +[2025-09-09 13:41:26] [Rank 0] Group 6 Loss: 4.7335 +[2025-09-09 13:41:26] [Rank 0] Group 6 Loss: 4.7335 +[2025-09-09 13:41:26] [Rank 0] Group 7 Loss: 4.7811 +[2025-09-09 13:41:26] [Rank 0] Group 7 Loss: 4.7811 +[2025-09-09 13:41:26] [Rank 0] Group 8 Loss: 4.9522 +[2025-09-09 13:41:26] [Rank 0] Group 8 Loss: 4.9522 +[2025-09-09 13:41:26] [Rank 0] Group 9 Loss: 4.9167 +[2025-09-09 13:41:26] [Rank 0] Group 9 Loss: 4.9167 +[2025-09-09 13:41:26] [Rank 0] Group 10 Loss: 5.0492 +[2025-09-09 13:41:26] [Rank 0] Group 10 Loss: 5.0492 +[2025-09-09 13:41:26] [Rank 0] Group 11 Loss: 5.0682 +[2025-09-09 13:41:26] [Rank 0] Group 11 Loss: 5.0682 +[2025-09-09 13:41:26] [Rank 0] Group 12 Loss: 5.0276 +[2025-09-09 13:41:26] [Rank 0] Group 12 Loss: 5.0276 +[2025-09-09 13:41:26] [Rank 0] Group 13 Loss: 5.1367 +[2025-09-09 13:41:26] [Rank 0] Group 13 Loss: 5.1367 +[2025-09-09 13:41:26] [Rank 0] Group 14 Loss: 5.1040 +[2025-09-09 13:41:26] [Rank 0] Group 14 Loss: 5.1040 +[2025-09-09 13:41:26] [Rank 0] Group 15 Loss: 5.0437 +[2025-09-09 13:41:26] [Rank 0] Group 15 Loss: 5.0437 +[2025-09-09 13:41:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:41:26] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 13:41:26] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 13:41:26] [Rank 0] Group 14 FTA: 0.9900 +[2025-09-09 13:41:26] [Rank 0] Group 14 FTA: 0.9900 +[2025-09-09 13:41:26] [Rank 0] Group 15 FTA: 0.9200 +[2025-09-09 13:41:26] [Rank 0] Group 15 FTA: 0.9200 +[2025-09-09 13:41:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:41:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:41:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:41:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:41:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:41:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:41:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:41:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:41:28] [Rank 0] step:8001/10000 train_time:379766ms step_avg:47.46ms +[2025-09-09 13:41:28] [Rank 0] step:8001/10000 train_time:379766ms step_avg:47.46ms +[2025-09-09 13:41:29] [Rank 0] step:8021/10000 train_time:380658ms step_avg:47.46ms +[2025-09-09 13:41:29] [Rank 0] step:8021/10000 train_time:380658ms step_avg:47.46ms +[2025-09-09 13:41:30] [Rank 0] step:8041/10000 train_time:381487ms step_avg:47.44ms +[2025-09-09 13:41:30] [Rank 0] step:8041/10000 train_time:381487ms step_avg:47.44ms +[2025-09-09 13:41:30] [Rank 0] step:8061/10000 train_time:382313ms step_avg:47.43ms +[2025-09-09 13:41:30] [Rank 0] step:8061/10000 train_time:382313ms step_avg:47.43ms +[2025-09-09 13:41:31] [Rank 0] step:8081/10000 train_time:383140ms step_avg:47.41ms +[2025-09-09 13:41:31] [Rank 0] step:8081/10000 train_time:383140ms step_avg:47.41ms +[2025-09-09 13:41:32] [Rank 0] step:8101/10000 train_time:383968ms step_avg:47.40ms +[2025-09-09 13:41:32] [Rank 0] step:8101/10000 train_time:383968ms step_avg:47.40ms +[2025-09-09 13:41:33] [Rank 0] step:8121/10000 train_time:384795ms step_avg:47.38ms +[2025-09-09 13:41:33] [Rank 0] step:8121/10000 train_time:384795ms step_avg:47.38ms +[2025-09-09 13:41:34] [Rank 0] step:8141/10000 train_time:386134ms step_avg:47.43ms +[2025-09-09 13:41:34] [Rank 0] step:8141/10000 train_time:386134ms step_avg:47.43ms +[2025-09-09 13:41:35] [Rank 0] step:8161/10000 train_time:386961ms step_avg:47.42ms +[2025-09-09 13:41:35] [Rank 0] step:8161/10000 train_time:386961ms step_avg:47.42ms +[2025-09-09 13:41:36] [Rank 0] step:8181/10000 train_time:387788ms step_avg:47.40ms +[2025-09-09 13:41:36] [Rank 0] step:8181/10000 train_time:387788ms step_avg:47.40ms +[2025-09-09 13:41:37] [Rank 0] step:8201/10000 train_time:388616ms step_avg:47.39ms +[2025-09-09 13:41:37] [Rank 0] step:8201/10000 train_time:388616ms step_avg:47.39ms +[2025-09-09 13:41:38] [Rank 0] step:8221/10000 train_time:389444ms step_avg:47.37ms +[2025-09-09 13:41:38] [Rank 0] step:8221/10000 train_time:389444ms step_avg:47.37ms +[2025-09-09 13:41:38] [Rank 0] step:8241/10000 train_time:390272ms step_avg:47.36ms +[2025-09-09 13:41:38] [Rank 0] step:8241/10000 train_time:390272ms step_avg:47.36ms +[2025-09-09 13:41:39] [Rank 0] step:8261/10000 train_time:391099ms step_avg:47.34ms +[2025-09-09 13:41:39] [Rank 0] step:8261/10000 train_time:391099ms step_avg:47.34ms +[2025-09-09 13:41:40] [Rank 0] step:8281/10000 train_time:391929ms step_avg:47.33ms +[2025-09-09 13:41:40] [Rank 0] step:8281/10000 train_time:391929ms step_avg:47.33ms +[2025-09-09 13:41:41] [Rank 0] step:8301/10000 train_time:392754ms step_avg:47.31ms +[2025-09-09 13:41:41] [Rank 0] step:8301/10000 train_time:392754ms step_avg:47.31ms +[2025-09-09 13:41:42] [Rank 0] step:8321/10000 train_time:393582ms step_avg:47.30ms +[2025-09-09 13:41:42] [Rank 0] step:8321/10000 train_time:393582ms step_avg:47.30ms +[2025-09-09 13:41:43] [Rank 0] step:8341/10000 train_time:394411ms step_avg:47.29ms +[2025-09-09 13:41:43] [Rank 0] step:8341/10000 train_time:394411ms step_avg:47.29ms +[2025-09-09 13:41:43] [Rank 0] step:8361/10000 train_time:395239ms step_avg:47.27ms +[2025-09-09 13:41:43] [Rank 0] step:8361/10000 train_time:395239ms step_avg:47.27ms +[2025-09-09 13:41:44] [Rank 0] step:8381/10000 train_time:396067ms step_avg:47.26ms +[2025-09-09 13:41:44] [Rank 0] step:8381/10000 train_time:396067ms step_avg:47.26ms +[2025-09-09 13:41:45] [Rank 0] step:8401/10000 train_time:396895ms step_avg:47.24ms +[2025-09-09 13:41:45] [Rank 0] step:8401/10000 train_time:396895ms step_avg:47.24ms +[2025-09-09 13:41:46] [Rank 0] step:8421/10000 train_time:397723ms step_avg:47.23ms +[2025-09-09 13:41:46] [Rank 0] step:8421/10000 train_time:397723ms step_avg:47.23ms +[2025-09-09 13:41:47] [Rank 0] step:8441/10000 train_time:398552ms step_avg:47.22ms +[2025-09-09 13:41:47] [Rank 0] step:8441/10000 train_time:398552ms step_avg:47.22ms +[2025-09-09 13:41:48] [Rank 0] step:8461/10000 train_time:399380ms step_avg:47.20ms +[2025-09-09 13:41:48] [Rank 0] step:8461/10000 train_time:399380ms step_avg:47.20ms +[2025-09-09 13:41:48] [Rank 0] step:8481/10000 train_time:400208ms step_avg:47.19ms +[2025-09-09 13:41:48] [Rank 0] step:8481/10000 train_time:400208ms step_avg:47.19ms +[2025-09-09 13:41:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:41:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:41:50] [Rank 0] PRINT: step:8500/10000 train_loss:0.6113 val_loss:0.6070 train_time:401039ms step_avg:47.18ms +[2025-09-09 13:41:50] [Rank 0] PRINT: step:8500/10000 train_loss:0.6113 val_loss:0.6070 train_time:401039ms step_avg:47.18ms +[2025-09-09 13:41:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:41:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:41:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:41:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:43:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:43:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:43:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:43:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:43:11] [Rank 0] Total Loss: 4.8791 +[2025-09-09 13:43:11] [Rank 0] Total Loss: 4.8791 +[2025-09-09 13:43:11] [Rank 0] Total FTA (Unweighted): 0.9950 +[2025-09-09 13:43:11] [Rank 0] Total FTA (Unweighted): 0.9950 +[2025-09-09 13:43:11] [Rank 0] Total FTA (Weighted): 0.9950 +[2025-09-09 13:43:11] [Rank 0] Total FTA (Weighted): 0.9950 +[2025-09-09 13:43:11] [Rank 0] Group 0 Loss: 4.7948 +[2025-09-09 13:43:11] [Rank 0] Group 0 Loss: 4.7948 +[2025-09-09 13:43:11] [Rank 0] Group 1 Loss: 4.4906 +[2025-09-09 13:43:11] [Rank 0] Group 1 Loss: 4.4906 +[2025-09-09 13:43:11] [Rank 0] Group 2 Loss: 4.3946 +[2025-09-09 13:43:11] [Rank 0] Group 2 Loss: 4.3946 +[2025-09-09 13:43:11] [Rank 0] Group 3 Loss: 4.7706 +[2025-09-09 13:43:11] [Rank 0] Group 3 Loss: 4.7706 +[2025-09-09 13:43:11] [Rank 0] Group 4 Loss: 4.7514 +[2025-09-09 13:43:11] [Rank 0] Group 4 Loss: 4.7514 +[2025-09-09 13:43:11] [Rank 0] Group 5 Loss: 4.7994 +[2025-09-09 13:43:11] [Rank 0] Group 5 Loss: 4.7994 +[2025-09-09 13:43:11] [Rank 0] Group 6 Loss: 4.7446 +[2025-09-09 13:43:11] [Rank 0] Group 6 Loss: 4.7446 +[2025-09-09 13:43:11] [Rank 0] Group 7 Loss: 4.8119 +[2025-09-09 13:43:11] [Rank 0] Group 7 Loss: 4.8119 +[2025-09-09 13:43:11] [Rank 0] Group 8 Loss: 4.9739 +[2025-09-09 13:43:11] [Rank 0] Group 8 Loss: 4.9739 +[2025-09-09 13:43:11] [Rank 0] Group 9 Loss: 4.9295 +[2025-09-09 13:43:11] [Rank 0] Group 9 Loss: 4.9295 +[2025-09-09 13:43:11] [Rank 0] Group 10 Loss: 5.0631 +[2025-09-09 13:43:11] [Rank 0] Group 10 Loss: 5.0631 +[2025-09-09 13:43:11] [Rank 0] Group 11 Loss: 5.0947 +[2025-09-09 13:43:11] [Rank 0] Group 11 Loss: 5.0947 +[2025-09-09 13:43:11] [Rank 0] Group 12 Loss: 5.0655 +[2025-09-09 13:43:11] [Rank 0] Group 12 Loss: 5.0655 +[2025-09-09 13:43:11] [Rank 0] Group 13 Loss: 5.1574 +[2025-09-09 13:43:11] [Rank 0] Group 13 Loss: 5.1574 +[2025-09-09 13:43:11] [Rank 0] Group 14 Loss: 5.1381 +[2025-09-09 13:43:11] [Rank 0] Group 14 Loss: 5.1381 +[2025-09-09 13:43:11] [Rank 0] Group 15 Loss: 5.0859 +[2025-09-09 13:43:11] [Rank 0] Group 15 Loss: 5.0859 +[2025-09-09 13:43:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 13:43:11] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 13:43:11] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 13:43:11] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 13:43:12] [Rank 0] Group 15 FTA: 0.9300 +[2025-09-09 13:43:12] [Rank 0] Group 15 FTA: 0.9300 +[2025-09-09 13:43:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:43:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:43:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:43:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:43:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:43:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:43:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:43:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:43:13] [Rank 0] step:8501/10000 train_time:401057ms step_avg:47.18ms +[2025-09-09 13:43:13] [Rank 0] step:8501/10000 train_time:401057ms step_avg:47.18ms +[2025-09-09 13:43:14] [Rank 0] step:8521/10000 train_time:401902ms step_avg:47.17ms +[2025-09-09 13:43:14] [Rank 0] step:8521/10000 train_time:401902ms step_avg:47.17ms +[2025-09-09 13:43:15] [Rank 0] step:8541/10000 train_time:402729ms step_avg:47.15ms +[2025-09-09 13:43:15] [Rank 0] step:8541/10000 train_time:402729ms step_avg:47.15ms +[2025-09-09 13:43:15] [Rank 0] step:8561/10000 train_time:403557ms step_avg:47.14ms +[2025-09-09 13:43:15] [Rank 0] step:8561/10000 train_time:403557ms step_avg:47.14ms +[2025-09-09 13:43:16] [Rank 0] step:8581/10000 train_time:404387ms step_avg:47.13ms +[2025-09-09 13:43:16] [Rank 0] step:8581/10000 train_time:404387ms step_avg:47.13ms +[2025-09-09 13:43:17] [Rank 0] step:8601/10000 train_time:405214ms step_avg:47.11ms +[2025-09-09 13:43:17] [Rank 0] step:8601/10000 train_time:405214ms step_avg:47.11ms +[2025-09-09 13:43:18] [Rank 0] step:8621/10000 train_time:406042ms step_avg:47.10ms +[2025-09-09 13:43:18] [Rank 0] step:8621/10000 train_time:406042ms step_avg:47.10ms +[2025-09-09 13:43:19] [Rank 0] step:8641/10000 train_time:406870ms step_avg:47.09ms +[2025-09-09 13:43:19] [Rank 0] step:8641/10000 train_time:406870ms step_avg:47.09ms +[2025-09-09 13:43:20] [Rank 0] step:8661/10000 train_time:407698ms step_avg:47.07ms +[2025-09-09 13:43:20] [Rank 0] step:8661/10000 train_time:407698ms step_avg:47.07ms +[2025-09-09 13:43:20] [Rank 0] step:8681/10000 train_time:408526ms step_avg:47.06ms +[2025-09-09 13:43:20] [Rank 0] step:8681/10000 train_time:408526ms step_avg:47.06ms +[2025-09-09 13:43:21] [Rank 0] step:8701/10000 train_time:409355ms step_avg:47.05ms +[2025-09-09 13:43:21] [Rank 0] step:8701/10000 train_time:409355ms step_avg:47.05ms +[2025-09-09 13:43:22] [Rank 0] step:8721/10000 train_time:410184ms step_avg:47.03ms +[2025-09-09 13:43:22] [Rank 0] step:8721/10000 train_time:410184ms step_avg:47.03ms +[2025-09-09 13:43:23] [Rank 0] step:8741/10000 train_time:411013ms step_avg:47.02ms +[2025-09-09 13:43:23] [Rank 0] step:8741/10000 train_time:411013ms step_avg:47.02ms +[2025-09-09 13:43:24] [Rank 0] step:8761/10000 train_time:411841ms step_avg:47.01ms +[2025-09-09 13:43:24] [Rank 0] step:8761/10000 train_time:411841ms step_avg:47.01ms +[2025-09-09 13:43:25] [Rank 0] step:8781/10000 train_time:412669ms step_avg:47.00ms +[2025-09-09 13:43:25] [Rank 0] step:8781/10000 train_time:412669ms step_avg:47.00ms +[2025-09-09 13:43:25] [Rank 0] step:8801/10000 train_time:413497ms step_avg:46.98ms +[2025-09-09 13:43:25] [Rank 0] step:8801/10000 train_time:413497ms step_avg:46.98ms +[2025-09-09 13:43:26] [Rank 0] step:8821/10000 train_time:414325ms step_avg:46.97ms +[2025-09-09 13:43:26] [Rank 0] step:8821/10000 train_time:414325ms step_avg:46.97ms +[2025-09-09 13:43:28] [Rank 0] step:8841/10000 train_time:415854ms step_avg:47.04ms +[2025-09-09 13:43:28] [Rank 0] step:8841/10000 train_time:415854ms step_avg:47.04ms +[2025-09-09 13:43:29] [Rank 0] step:8861/10000 train_time:416681ms step_avg:47.02ms +[2025-09-09 13:43:29] [Rank 0] step:8861/10000 train_time:416681ms step_avg:47.02ms +[2025-09-09 13:43:29] [Rank 0] step:8881/10000 train_time:417511ms step_avg:47.01ms +[2025-09-09 13:43:29] [Rank 0] step:8881/10000 train_time:417511ms step_avg:47.01ms +[2025-09-09 13:43:30] [Rank 0] step:8901/10000 train_time:418336ms step_avg:47.00ms +[2025-09-09 13:43:30] [Rank 0] step:8901/10000 train_time:418336ms step_avg:47.00ms +[2025-09-09 13:43:31] [Rank 0] step:8921/10000 train_time:419163ms step_avg:46.99ms +[2025-09-09 13:43:31] [Rank 0] step:8921/10000 train_time:419163ms step_avg:46.99ms +[2025-09-09 13:43:32] [Rank 0] step:8941/10000 train_time:419990ms step_avg:46.97ms +[2025-09-09 13:43:32] [Rank 0] step:8941/10000 train_time:419990ms step_avg:46.97ms +[2025-09-09 13:43:33] [Rank 0] step:8961/10000 train_time:420816ms step_avg:46.96ms +[2025-09-09 13:43:33] [Rank 0] step:8961/10000 train_time:420816ms step_avg:46.96ms +[2025-09-09 13:43:34] [Rank 0] step:8981/10000 train_time:421642ms step_avg:46.95ms +[2025-09-09 13:43:34] [Rank 0] step:8981/10000 train_time:421642ms step_avg:46.95ms +[2025-09-09 13:43:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:43:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:43:35] [Rank 0] PRINT: step:9000/10000 train_loss:0.6094 val_loss:0.6060 train_time:422471ms step_avg:46.94ms +[2025-09-09 13:43:35] [Rank 0] PRINT: step:9000/10000 train_loss:0.6094 val_loss:0.6060 train_time:422471ms step_avg:46.94ms +[2025-09-09 13:43:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:43:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:43:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:43:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:44:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:44:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:44:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:44:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:44:57] [Rank 0] Total Loss: 4.8970 +[2025-09-09 13:44:57] [Rank 0] Total Loss: 4.8970 +[2025-09-09 13:44:57] [Rank 0] Total FTA (Unweighted): 0.9981 +[2025-09-09 13:44:57] [Rank 0] Total FTA (Unweighted): 0.9981 +[2025-09-09 13:44:57] [Rank 0] Total FTA (Weighted): 0.9981 +[2025-09-09 13:44:57] [Rank 0] Total FTA (Weighted): 0.9981 +[2025-09-09 13:44:57] [Rank 0] Group 0 Loss: 4.7802 +[2025-09-09 13:44:57] [Rank 0] Group 0 Loss: 4.7802 +[2025-09-09 13:44:57] [Rank 0] Group 1 Loss: 4.5352 +[2025-09-09 13:44:57] [Rank 0] Group 1 Loss: 4.5352 +[2025-09-09 13:44:57] [Rank 0] Group 2 Loss: 4.4241 +[2025-09-09 13:44:57] [Rank 0] Group 2 Loss: 4.4241 +[2025-09-09 13:44:57] [Rank 0] Group 3 Loss: 4.7857 +[2025-09-09 13:44:57] [Rank 0] Group 3 Loss: 4.7857 +[2025-09-09 13:44:57] [Rank 0] Group 4 Loss: 4.7515 +[2025-09-09 13:44:57] [Rank 0] Group 4 Loss: 4.7515 +[2025-09-09 13:44:57] [Rank 0] Group 5 Loss: 4.8230 +[2025-09-09 13:44:57] [Rank 0] Group 5 Loss: 4.8230 +[2025-09-09 13:44:57] [Rank 0] Group 6 Loss: 4.7843 +[2025-09-09 13:44:57] [Rank 0] Group 6 Loss: 4.7843 +[2025-09-09 13:44:57] [Rank 0] Group 7 Loss: 4.8215 +[2025-09-09 13:44:57] [Rank 0] Group 7 Loss: 4.8215 +[2025-09-09 13:44:57] [Rank 0] Group 8 Loss: 5.0035 +[2025-09-09 13:44:57] [Rank 0] Group 8 Loss: 5.0035 +[2025-09-09 13:44:57] [Rank 0] Group 9 Loss: 4.9576 +[2025-09-09 13:44:57] [Rank 0] Group 9 Loss: 4.9576 +[2025-09-09 13:44:57] [Rank 0] Group 10 Loss: 5.0829 +[2025-09-09 13:44:57] [Rank 0] Group 10 Loss: 5.0829 +[2025-09-09 13:44:57] [Rank 0] Group 11 Loss: 5.1226 +[2025-09-09 13:44:57] [Rank 0] Group 11 Loss: 5.1226 +[2025-09-09 13:44:57] [Rank 0] Group 12 Loss: 5.0741 +[2025-09-09 13:44:57] [Rank 0] Group 12 Loss: 5.0741 +[2025-09-09 13:44:57] [Rank 0] Group 13 Loss: 5.1793 +[2025-09-09 13:44:57] [Rank 0] Group 13 Loss: 5.1793 +[2025-09-09 13:44:57] [Rank 0] Group 14 Loss: 5.1420 +[2025-09-09 13:44:57] [Rank 0] Group 14 Loss: 5.1420 +[2025-09-09 13:44:57] [Rank 0] Group 15 Loss: 5.0841 +[2025-09-09 13:44:57] [Rank 0] Group 15 Loss: 5.0841 +[2025-09-09 13:44:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 13:44:57] [Rank 0] Group 15 FTA: 0.9700 +[2025-09-09 13:44:57] [Rank 0] Group 15 FTA: 0.9700 +[2025-09-09 13:44:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:44:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:44:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:44:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:44:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:44:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:44:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:44:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:44:58] [Rank 0] step:9001/10000 train_time:422489ms step_avg:46.94ms +[2025-09-09 13:44:58] [Rank 0] step:9001/10000 train_time:422489ms step_avg:46.94ms +[2025-09-09 13:44:59] [Rank 0] step:9021/10000 train_time:423324ms step_avg:46.93ms +[2025-09-09 13:44:59] [Rank 0] step:9021/10000 train_time:423324ms step_avg:46.93ms +[2025-09-09 13:45:00] [Rank 0] step:9041/10000 train_time:424151ms step_avg:46.91ms +[2025-09-09 13:45:00] [Rank 0] step:9041/10000 train_time:424151ms step_avg:46.91ms +[2025-09-09 13:45:01] [Rank 0] step:9061/10000 train_time:424980ms step_avg:46.90ms +[2025-09-09 13:45:01] [Rank 0] step:9061/10000 train_time:424980ms step_avg:46.90ms +[2025-09-09 13:45:02] [Rank 0] step:9081/10000 train_time:425900ms step_avg:46.90ms +[2025-09-09 13:45:02] [Rank 0] step:9081/10000 train_time:425900ms step_avg:46.90ms +[2025-09-09 13:45:02] [Rank 0] step:9101/10000 train_time:426727ms step_avg:46.89ms +[2025-09-09 13:45:02] [Rank 0] step:9101/10000 train_time:426727ms step_avg:46.89ms +[2025-09-09 13:45:03] [Rank 0] step:9121/10000 train_time:427554ms step_avg:46.88ms +[2025-09-09 13:45:03] [Rank 0] step:9121/10000 train_time:427554ms step_avg:46.88ms +[2025-09-09 13:45:04] [Rank 0] step:9141/10000 train_time:428382ms step_avg:46.86ms +[2025-09-09 13:45:04] [Rank 0] step:9141/10000 train_time:428382ms step_avg:46.86ms +[2025-09-09 13:45:05] [Rank 0] step:9161/10000 train_time:429209ms step_avg:46.85ms +[2025-09-09 13:45:05] [Rank 0] step:9161/10000 train_time:429209ms step_avg:46.85ms +[2025-09-09 13:45:06] [Rank 0] step:9181/10000 train_time:430037ms step_avg:46.84ms +[2025-09-09 13:45:06] [Rank 0] step:9181/10000 train_time:430037ms step_avg:46.84ms +[2025-09-09 13:45:07] [Rank 0] step:9201/10000 train_time:430865ms step_avg:46.83ms +[2025-09-09 13:45:07] [Rank 0] step:9201/10000 train_time:430865ms step_avg:46.83ms +[2025-09-09 13:45:07] [Rank 0] step:9221/10000 train_time:431691ms step_avg:46.82ms +[2025-09-09 13:45:07] [Rank 0] step:9221/10000 train_time:431691ms step_avg:46.82ms +[2025-09-09 13:45:08] [Rank 0] step:9241/10000 train_time:432518ms step_avg:46.80ms +[2025-09-09 13:45:08] [Rank 0] step:9241/10000 train_time:432518ms step_avg:46.80ms +[2025-09-09 13:45:09] [Rank 0] step:9261/10000 train_time:433346ms step_avg:46.79ms +[2025-09-09 13:45:09] [Rank 0] step:9261/10000 train_time:433346ms step_avg:46.79ms +[2025-09-09 13:45:10] [Rank 0] step:9281/10000 train_time:434173ms step_avg:46.78ms +[2025-09-09 13:45:10] [Rank 0] step:9281/10000 train_time:434173ms step_avg:46.78ms +[2025-09-09 13:45:11] [Rank 0] step:9301/10000 train_time:435001ms step_avg:46.77ms +[2025-09-09 13:45:11] [Rank 0] step:9301/10000 train_time:435001ms step_avg:46.77ms +[2025-09-09 13:45:11] [Rank 0] step:9321/10000 train_time:435828ms step_avg:46.76ms +[2025-09-09 13:45:11] [Rank 0] step:9321/10000 train_time:435828ms step_avg:46.76ms +[2025-09-09 13:45:12] [Rank 0] step:9341/10000 train_time:436655ms step_avg:46.75ms +[2025-09-09 13:45:12] [Rank 0] step:9341/10000 train_time:436655ms step_avg:46.75ms +[2025-09-09 13:45:13] [Rank 0] step:9361/10000 train_time:437485ms step_avg:46.73ms +[2025-09-09 13:45:13] [Rank 0] step:9361/10000 train_time:437485ms step_avg:46.73ms +[2025-09-09 13:45:14] [Rank 0] step:9381/10000 train_time:438309ms step_avg:46.72ms +[2025-09-09 13:45:14] [Rank 0] step:9381/10000 train_time:438309ms step_avg:46.72ms +[2025-09-09 13:45:15] [Rank 0] step:9401/10000 train_time:439136ms step_avg:46.71ms +[2025-09-09 13:45:15] [Rank 0] step:9401/10000 train_time:439136ms step_avg:46.71ms +[2025-09-09 13:45:16] [Rank 0] step:9421/10000 train_time:439964ms step_avg:46.70ms +[2025-09-09 13:45:16] [Rank 0] step:9421/10000 train_time:439964ms step_avg:46.70ms +[2025-09-09 13:45:16] [Rank 0] step:9441/10000 train_time:440791ms step_avg:46.69ms +[2025-09-09 13:45:16] [Rank 0] step:9441/10000 train_time:440791ms step_avg:46.69ms +[2025-09-09 13:45:17] [Rank 0] step:9461/10000 train_time:441619ms step_avg:46.68ms +[2025-09-09 13:45:17] [Rank 0] step:9461/10000 train_time:441619ms step_avg:46.68ms +[2025-09-09 13:45:18] [Rank 0] step:9481/10000 train_time:442448ms step_avg:46.67ms +[2025-09-09 13:45:18] [Rank 0] step:9481/10000 train_time:442448ms step_avg:46.67ms +[2025-09-09 13:45:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:45:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:45:19] [Rank 0] PRINT: step:9500/10000 train_loss:0.6081 val_loss:0.6051 train_time:443279ms step_avg:46.66ms +[2025-09-09 13:45:19] [Rank 0] PRINT: step:9500/10000 train_loss:0.6081 val_loss:0.6051 train_time:443279ms step_avg:46.66ms +[2025-09-09 13:45:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:45:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:45:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:45:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:46:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:46:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:46:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:46:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:46:40] [Rank 0] Total Loss: 4.9292 +[2025-09-09 13:46:40] [Rank 0] Total Loss: 4.9292 +[2025-09-09 13:46:40] [Rank 0] Total FTA (Unweighted): 0.9988 +[2025-09-09 13:46:40] [Rank 0] Total FTA (Unweighted): 0.9988 +[2025-09-09 13:46:40] [Rank 0] Total FTA (Weighted): 0.9988 +[2025-09-09 13:46:40] [Rank 0] Total FTA (Weighted): 0.9988 +[2025-09-09 13:46:40] [Rank 0] Group 0 Loss: 4.7932 +[2025-09-09 13:46:40] [Rank 0] Group 0 Loss: 4.7932 +[2025-09-09 13:46:40] [Rank 0] Group 1 Loss: 4.5672 +[2025-09-09 13:46:40] [Rank 0] Group 1 Loss: 4.5672 +[2025-09-09 13:46:40] [Rank 0] Group 2 Loss: 4.4719 +[2025-09-09 13:46:40] [Rank 0] Group 2 Loss: 4.4719 +[2025-09-09 13:46:40] [Rank 0] Group 3 Loss: 4.8369 +[2025-09-09 13:46:40] [Rank 0] Group 3 Loss: 4.8369 +[2025-09-09 13:46:40] [Rank 0] Group 4 Loss: 4.7896 +[2025-09-09 13:46:40] [Rank 0] Group 4 Loss: 4.7896 +[2025-09-09 13:46:40] [Rank 0] Group 5 Loss: 4.8491 +[2025-09-09 13:46:40] [Rank 0] Group 5 Loss: 4.8491 +[2025-09-09 13:46:40] [Rank 0] Group 6 Loss: 4.8248 +[2025-09-09 13:46:40] [Rank 0] Group 6 Loss: 4.8248 +[2025-09-09 13:46:40] [Rank 0] Group 7 Loss: 4.8428 +[2025-09-09 13:46:40] [Rank 0] Group 7 Loss: 4.8428 +[2025-09-09 13:46:40] [Rank 0] Group 8 Loss: 5.0241 +[2025-09-09 13:46:40] [Rank 0] Group 8 Loss: 5.0241 +[2025-09-09 13:46:40] [Rank 0] Group 9 Loss: 4.9743 +[2025-09-09 13:46:40] [Rank 0] Group 9 Loss: 4.9743 +[2025-09-09 13:46:40] [Rank 0] Group 10 Loss: 5.1146 +[2025-09-09 13:46:40] [Rank 0] Group 10 Loss: 5.1146 +[2025-09-09 13:46:40] [Rank 0] Group 11 Loss: 5.1553 +[2025-09-09 13:46:40] [Rank 0] Group 11 Loss: 5.1553 +[2025-09-09 13:46:40] [Rank 0] Group 12 Loss: 5.1041 +[2025-09-09 13:46:40] [Rank 0] Group 12 Loss: 5.1041 +[2025-09-09 13:46:40] [Rank 0] Group 13 Loss: 5.2117 +[2025-09-09 13:46:40] [Rank 0] Group 13 Loss: 5.2117 +[2025-09-09 13:46:40] [Rank 0] Group 14 Loss: 5.1801 +[2025-09-09 13:46:40] [Rank 0] Group 14 Loss: 5.1801 +[2025-09-09 13:46:40] [Rank 0] Group 15 Loss: 5.1277 +[2025-09-09 13:46:40] [Rank 0] Group 15 Loss: 5.1277 +[2025-09-09 13:46:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 13:46:40] [Rank 0] Group 15 FTA: 0.9800 +[2025-09-09 13:46:40] [Rank 0] Group 15 FTA: 0.9800 +[2025-09-09 13:46:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:46:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:46:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:46:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:46:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:46:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:46:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:46:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:46:42] [Rank 0] step:9501/10000 train_time:443296ms step_avg:46.66ms +[2025-09-09 13:46:42] [Rank 0] step:9501/10000 train_time:443296ms step_avg:46.66ms +[2025-09-09 13:46:43] [Rank 0] step:9521/10000 train_time:444124ms step_avg:46.65ms +[2025-09-09 13:46:43] [Rank 0] step:9521/10000 train_time:444124ms step_avg:46.65ms +[2025-09-09 13:46:44] [Rank 0] step:9541/10000 train_time:444951ms step_avg:46.64ms +[2025-09-09 13:46:44] [Rank 0] step:9541/10000 train_time:444951ms step_avg:46.64ms +[2025-09-09 13:46:44] [Rank 0] step:9561/10000 train_time:445778ms step_avg:46.62ms +[2025-09-09 13:46:44] [Rank 0] step:9561/10000 train_time:445778ms step_avg:46.62ms +[2025-09-09 13:46:45] [Rank 0] step:9581/10000 train_time:446604ms step_avg:46.61ms +[2025-09-09 13:46:45] [Rank 0] step:9581/10000 train_time:446604ms step_avg:46.61ms +[2025-09-09 13:46:46] [Rank 0] step:9601/10000 train_time:447432ms step_avg:46.60ms +[2025-09-09 13:46:46] [Rank 0] step:9601/10000 train_time:447432ms step_avg:46.60ms +[2025-09-09 13:46:47] [Rank 0] step:9621/10000 train_time:448270ms step_avg:46.59ms +[2025-09-09 13:46:47] [Rank 0] step:9621/10000 train_time:448270ms step_avg:46.59ms +[2025-09-09 13:46:48] [Rank 0] step:9641/10000 train_time:449095ms step_avg:46.58ms +[2025-09-09 13:46:48] [Rank 0] step:9641/10000 train_time:449095ms step_avg:46.58ms +[2025-09-09 13:46:49] [Rank 0] step:9661/10000 train_time:450203ms step_avg:46.60ms +[2025-09-09 13:46:49] [Rank 0] step:9661/10000 train_time:450203ms step_avg:46.60ms +[2025-09-09 13:46:50] [Rank 0] step:9681/10000 train_time:451535ms step_avg:46.64ms +[2025-09-09 13:46:50] [Rank 0] step:9681/10000 train_time:451535ms step_avg:46.64ms +[2025-09-09 13:46:51] [Rank 0] step:9701/10000 train_time:452362ms step_avg:46.63ms +[2025-09-09 13:46:51] [Rank 0] step:9701/10000 train_time:452362ms step_avg:46.63ms +[2025-09-09 13:46:52] [Rank 0] step:9721/10000 train_time:453189ms step_avg:46.62ms +[2025-09-09 13:46:52] [Rank 0] step:9721/10000 train_time:453189ms step_avg:46.62ms +[2025-09-09 13:46:53] [Rank 0] step:9741/10000 train_time:454016ms step_avg:46.61ms +[2025-09-09 13:46:53] [Rank 0] step:9741/10000 train_time:454016ms step_avg:46.61ms +[2025-09-09 13:46:53] [Rank 0] step:9761/10000 train_time:454843ms step_avg:46.60ms +[2025-09-09 13:46:53] [Rank 0] step:9761/10000 train_time:454843ms step_avg:46.60ms +[2025-09-09 13:46:54] [Rank 0] step:9781/10000 train_time:455670ms step_avg:46.59ms +[2025-09-09 13:46:54] [Rank 0] step:9781/10000 train_time:455670ms step_avg:46.59ms +[2025-09-09 13:46:55] [Rank 0] step:9801/10000 train_time:456497ms step_avg:46.58ms +[2025-09-09 13:46:55] [Rank 0] step:9801/10000 train_time:456497ms step_avg:46.58ms +[2025-09-09 13:46:56] [Rank 0] step:9821/10000 train_time:457325ms step_avg:46.57ms +[2025-09-09 13:46:56] [Rank 0] step:9821/10000 train_time:457325ms step_avg:46.57ms +[2025-09-09 13:46:57] [Rank 0] step:9841/10000 train_time:458152ms step_avg:46.56ms +[2025-09-09 13:46:57] [Rank 0] step:9841/10000 train_time:458152ms step_avg:46.56ms +[2025-09-09 13:46:58] [Rank 0] step:9861/10000 train_time:458978ms step_avg:46.54ms +[2025-09-09 13:46:58] [Rank 0] step:9861/10000 train_time:458978ms step_avg:46.54ms +[2025-09-09 13:46:58] [Rank 0] step:9881/10000 train_time:459805ms step_avg:46.53ms +[2025-09-09 13:46:58] [Rank 0] step:9881/10000 train_time:459805ms step_avg:46.53ms +[2025-09-09 13:46:59] [Rank 0] step:9901/10000 train_time:460631ms step_avg:46.52ms +[2025-09-09 13:46:59] [Rank 0] step:9901/10000 train_time:460631ms step_avg:46.52ms +[2025-09-09 13:47:00] [Rank 0] step:9921/10000 train_time:461460ms step_avg:46.51ms +[2025-09-09 13:47:00] [Rank 0] step:9921/10000 train_time:461460ms step_avg:46.51ms +[2025-09-09 13:47:01] [Rank 0] step:9941/10000 train_time:462287ms step_avg:46.50ms +[2025-09-09 13:47:01] [Rank 0] step:9941/10000 train_time:462287ms step_avg:46.50ms +[2025-09-09 13:47:02] [Rank 0] step:9961/10000 train_time:463114ms step_avg:46.49ms +[2025-09-09 13:47:02] [Rank 0] step:9961/10000 train_time:463114ms step_avg:46.49ms +[2025-09-09 13:47:03] [Rank 0] step:9981/10000 train_time:463940ms step_avg:46.48ms +[2025-09-09 13:47:03] [Rank 0] step:9981/10000 train_time:463940ms step_avg:46.48ms +[2025-09-09 13:47:03] [Rank 0] step:10000/10000 train_time:464725ms step_avg:46.47ms +[2025-09-09 13:47:03] [Rank 0] step:10000/10000 train_time:464725ms step_avg:46.47ms +[2025-09-09 13:47:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:47:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:47:04] [Rank 0] PRINT: step:10000/10000 train_loss:0.6071 val_loss:0.6045 train_time:464776ms step_avg:46.48ms +[2025-09-09 13:47:04] [Rank 0] PRINT: step:10000/10000 train_loss:0.6071 val_loss:0.6045 train_time:464776ms step_avg:46.48ms +[2025-09-09 13:47:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:47:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:47:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:47:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:48:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:48:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:48:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:48:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:48:26] [Rank 0] Total Loss: 4.9196 +[2025-09-09 13:48:26] [Rank 0] Total Loss: 4.9196 +[2025-09-09 13:48:26] [Rank 0] Total FTA (Unweighted): 1.0000 +[2025-09-09 13:48:26] [Rank 0] Total FTA (Unweighted): 1.0000 +[2025-09-09 13:48:26] [Rank 0] Total FTA (Weighted): 1.0000 +[2025-09-09 13:48:26] [Rank 0] Total FTA (Weighted): 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 0 Loss: 4.7534 +[2025-09-09 13:48:26] [Rank 0] Group 0 Loss: 4.7534 +[2025-09-09 13:48:26] [Rank 0] Group 1 Loss: 4.5849 +[2025-09-09 13:48:26] [Rank 0] Group 1 Loss: 4.5849 +[2025-09-09 13:48:26] [Rank 0] Group 2 Loss: 4.4312 +[2025-09-09 13:48:26] [Rank 0] Group 2 Loss: 4.4312 +[2025-09-09 13:48:26] [Rank 0] Group 3 Loss: 4.8126 +[2025-09-09 13:48:26] [Rank 0] Group 3 Loss: 4.8126 +[2025-09-09 13:48:26] [Rank 0] Group 4 Loss: 4.7892 +[2025-09-09 13:48:26] [Rank 0] Group 4 Loss: 4.7892 +[2025-09-09 13:48:26] [Rank 0] Group 5 Loss: 4.8457 +[2025-09-09 13:48:26] [Rank 0] Group 5 Loss: 4.8457 +[2025-09-09 13:48:26] [Rank 0] Group 6 Loss: 4.8220 +[2025-09-09 13:48:26] [Rank 0] Group 6 Loss: 4.8220 +[2025-09-09 13:48:26] [Rank 0] Group 7 Loss: 4.8365 +[2025-09-09 13:48:26] [Rank 0] Group 7 Loss: 4.8365 +[2025-09-09 13:48:26] [Rank 0] Group 8 Loss: 5.0250 +[2025-09-09 13:48:26] [Rank 0] Group 8 Loss: 5.0250 +[2025-09-09 13:48:26] [Rank 0] Group 9 Loss: 4.9776 +[2025-09-09 13:48:26] [Rank 0] Group 9 Loss: 4.9776 +[2025-09-09 13:48:26] [Rank 0] Group 10 Loss: 5.1144 +[2025-09-09 13:48:26] [Rank 0] Group 10 Loss: 5.1144 +[2025-09-09 13:48:26] [Rank 0] Group 11 Loss: 5.1490 +[2025-09-09 13:48:26] [Rank 0] Group 11 Loss: 5.1490 +[2025-09-09 13:48:26] [Rank 0] Group 12 Loss: 5.0866 +[2025-09-09 13:48:26] [Rank 0] Group 12 Loss: 5.0866 +[2025-09-09 13:48:26] [Rank 0] Group 13 Loss: 5.1953 +[2025-09-09 13:48:26] [Rank 0] Group 13 Loss: 5.1953 +[2025-09-09 13:48:26] [Rank 0] Group 14 Loss: 5.1642 +[2025-09-09 13:48:26] [Rank 0] Group 14 Loss: 5.1642 +[2025-09-09 13:48:26] [Rank 0] Group 15 Loss: 5.1256 +[2025-09-09 13:48:26] [Rank 0] Group 15 Loss: 5.1256 +[2025-09-09 13:48:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 15 FTA: 1.0000 +[2025-09-09 13:48:26] [Rank 0] Group 15 FTA: 1.0000 +[2025-09-09 13:48:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:48:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_loss_curves.png +[2025-09-09 13:48:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:48:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/per_class_acc_curves.png +[2025-09-09 13:48:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:48:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_loss_curve.png +[2025-09-09 13:48:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:48:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_42/total_acc_curve.png +[2025-09-09 13:48:28] [Rank 0] step:10001/10000 train_time:464794ms step_avg:46.47ms +[2025-09-09 13:48:28] [Rank 0] step:10001/10000 train_time:464794ms step_avg:46.47ms +[2025-09-09 13:48:28] [Rank 0] PRINT: --- Training Finished: Tue Sep 9 13:48:28 2025 --- +[2025-09-09 13:48:28] [Rank 0] PRINT: --- Training Finished: Tue Sep 9 13:48:28 2025 --- +[2025-09-09 13:48:28] [Rank 0] PRINT: Peak memory allocated: 3880 MiB reserved: 6268 MiB +[2025-09-09 13:48:28] [Rank 0] PRINT: Peak memory allocated: 3880 MiB reserved: 6268 MiB diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/config.json b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..11358013f77bcc03059bb21673579be4c78dabce --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 7, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.001, + "adam_lr": 0.002, + "base_dir": "logs_qa_muon_gated/diff_mode", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "b4df7762-1924-45fe-af5d-3dfc741bc957", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/fixed_eval_indices.json b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..617fd8e33981b184627324c370fd4e360cbadf34 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14fcebff260d532f829ebb286fb8284a8ee7282ba1c902a0c255e98ac3c6aa34 +size 328094 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7e3cbf1e02251d5c853b522aa82b1f1fed721e4e --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:909b6b8521495ae860e4e0f12893322c66808d06c799ac7cac4ec1f4e8acef43 +size 424421 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..328463e304b95cb65c1f51d48ee2113be4e8ab6e --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c34fd046176694ac943f9dd5ddd295cacd56b4b16bfefd77b9ed8e71d608c9d +size 94333 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..9addeb1d3ea92af81a8870b9895118e501450ee9 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bf43d726fbb15f3ad7543532b28a0db7b8f2f7e851997a8a9a689ecccef2008 +size 111619 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/training_log_b4df7762-1924-45fe-af5d-3dfc741bc957.txt b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/training_log_b4df7762-1924-45fe-af5d-3dfc741bc957.txt new file mode 100644 index 0000000000000000000000000000000000000000..9c0025c165d7115c3f3df039224c61e158a81dc1 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/training_log_b4df7762-1924-45fe-af5d-3dfc741bc957.txt @@ -0,0 +1,5618 @@ +[2025-09-09 13:48:54] [Rank 0] PRINT: --- Script Start: Tue Sep 9 13:48:54 2025 --- +[2025-09-09 13:48:54] [Rank 0] PRINT: --- Script Start: Tue Sep 9 13:48:54 2025 --- +[2025-09-09 13:48:54] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=7, model_parameterization='gated', per_group_k=100, muon_lr=0.001, adam_lr=0.002, base_dir='logs_qa_muon_gated/diff_mode', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-09 13:48:54] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=7, model_parameterization='gated', per_group_k=100, muon_lr=0.001, adam_lr=0.002, base_dir='logs_qa_muon_gated/diff_mode', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-09 13:48:54] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-09 13:48:54] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-09 13:48:54] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-09 13:48:54] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-09 13:48:54] [Rank 0] PRINT: Run directory: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43 +[2025-09-09 13:48:54] [Rank 0] PRINT: Run directory: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43 +[2025-09-09 13:48:54] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + muon_lr = exp_args.muon_lr + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-09 13:48:54] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + muon_lr = exp_args.muon_lr + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-09 13:48:54] [Rank 0] PRINT: Constructing model... +[2025-09-09 13:48:54] [Rank 0] PRINT: Constructing model... +[2025-09-09 13:48:56] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-09 13:48:56] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-09 13:48:56] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-09 13:48:56] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-09 13:48:56] [Rank 0] PRINT: Testing model forward function: +[2025-09-09 13:48:56] [Rank 0] PRINT: Testing model forward function: +[2025-09-09 13:49:00] [Rank 0] PRINT: Model test - Result type: +[2025-09-09 13:49:00] [Rank 0] PRINT: Model test - Result type: +[2025-09-09 13:49:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-09 13:49:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-09 13:49:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-09 13:49:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-09 13:49:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-09 13:49:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-09 13:49:00] [Rank 0] PRINT: Model returns: +[2025-09-09 13:49:00] [Rank 0] PRINT: Model returns: +[2025-09-09 13:49:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-09 13:49:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-09 13:49:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 7 +[2025-09-09 13:49:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 7 +[2025-09-09 13:49:00] [Rank 0] PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: 0.002). +[2025-09-09 13:49:00] [Rank 0] PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: 0.002). +[2025-09-09 13:49:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-09 13:49:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-09 13:49:00] [Rank 0] PRINT: Muon optimizer is active with 48 parameters. +[2025-09-09 13:49:00] [Rank 0] PRINT: Muon optimizer is active with 48 parameters. +[2025-09-09 13:49:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-09 13:49:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-09 13:49:05] [Rank 0] PRINT: Model compilation complete. +[2025-09-09 13:49:05] [Rank 0] PRINT: Model compilation complete. +[2025-09-09 13:49:05] [Rank 0] PRINT: Starting warmup... +[2025-09-09 13:49:05] [Rank 0] PRINT: Starting warmup... +[2025-09-09 13:50:09] [Rank 0] PRINT: Warmup complete. +[2025-09-09 13:50:09] [Rank 0] PRINT: Warmup complete. +[2025-09-09 13:50:09] [Rank 0] PRINT: Starting training... +[2025-09-09 13:50:09] [Rank 0] PRINT: Starting training... +[2025-09-09 13:50:16] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/fixed_eval_indices.json +[2025-09-09 13:50:16] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/fixed_eval_indices.json +[2025-09-09 13:50:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:50:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:50:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-09 13:50:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-09 13:50:55] [Rank 0] step:21/10000 train_time:34129ms step_avg:1625.17ms +[2025-09-09 13:50:55] [Rank 0] step:21/10000 train_time:34129ms step_avg:1625.17ms +[2025-09-09 13:50:55] [Rank 0] step:41/10000 train_time:34938ms step_avg:852.14ms +[2025-09-09 13:50:55] [Rank 0] step:41/10000 train_time:34938ms step_avg:852.14ms +[2025-09-09 13:50:56] [Rank 0] step:61/10000 train_time:35747ms step_avg:586.02ms +[2025-09-09 13:50:56] [Rank 0] step:61/10000 train_time:35747ms step_avg:586.02ms +[2025-09-09 13:50:57] [Rank 0] step:81/10000 train_time:36553ms step_avg:451.27ms +[2025-09-09 13:50:57] [Rank 0] step:81/10000 train_time:36553ms step_avg:451.27ms +[2025-09-09 13:50:58] [Rank 0] step:101/10000 train_time:37361ms step_avg:369.91ms +[2025-09-09 13:50:58] [Rank 0] step:101/10000 train_time:37361ms step_avg:369.91ms +[2025-09-09 13:50:59] [Rank 0] step:121/10000 train_time:38169ms step_avg:315.45ms +[2025-09-09 13:50:59] [Rank 0] step:121/10000 train_time:38169ms step_avg:315.45ms +[2025-09-09 13:50:59] [Rank 0] step:141/10000 train_time:38978ms step_avg:276.44ms +[2025-09-09 13:50:59] [Rank 0] step:141/10000 train_time:38978ms step_avg:276.44ms +[2025-09-09 13:51:00] [Rank 0] step:161/10000 train_time:39785ms step_avg:247.11ms +[2025-09-09 13:51:00] [Rank 0] step:161/10000 train_time:39785ms step_avg:247.11ms +[2025-09-09 13:51:01] [Rank 0] step:181/10000 train_time:40594ms step_avg:224.28ms +[2025-09-09 13:51:01] [Rank 0] step:181/10000 train_time:40594ms step_avg:224.28ms +[2025-09-09 13:51:02] [Rank 0] step:201/10000 train_time:41569ms step_avg:206.81ms +[2025-09-09 13:51:02] [Rank 0] step:201/10000 train_time:41569ms step_avg:206.81ms +[2025-09-09 13:51:03] [Rank 0] step:221/10000 train_time:42708ms step_avg:193.25ms +[2025-09-09 13:51:03] [Rank 0] step:221/10000 train_time:42708ms step_avg:193.25ms +[2025-09-09 13:51:04] [Rank 0] step:241/10000 train_time:43517ms step_avg:180.57ms +[2025-09-09 13:51:04] [Rank 0] step:241/10000 train_time:43517ms step_avg:180.57ms +[2025-09-09 13:51:05] [Rank 0] step:261/10000 train_time:44325ms step_avg:169.83ms +[2025-09-09 13:51:05] [Rank 0] step:261/10000 train_time:44325ms step_avg:169.83ms +[2025-09-09 13:51:06] [Rank 0] step:281/10000 train_time:45133ms step_avg:160.62ms +[2025-09-09 13:51:06] [Rank 0] step:281/10000 train_time:45133ms step_avg:160.62ms +[2025-09-09 13:51:06] [Rank 0] step:301/10000 train_time:45942ms step_avg:152.63ms +[2025-09-09 13:51:06] [Rank 0] step:301/10000 train_time:45942ms step_avg:152.63ms +[2025-09-09 13:51:07] [Rank 0] step:321/10000 train_time:46749ms step_avg:145.64ms +[2025-09-09 13:51:07] [Rank 0] step:321/10000 train_time:46749ms step_avg:145.64ms +[2025-09-09 13:51:08] [Rank 0] step:341/10000 train_time:47556ms step_avg:139.46ms +[2025-09-09 13:51:08] [Rank 0] step:341/10000 train_time:47556ms step_avg:139.46ms +[2025-09-09 13:51:09] [Rank 0] step:361/10000 train_time:48364ms step_avg:133.97ms +[2025-09-09 13:51:09] [Rank 0] step:361/10000 train_time:48364ms step_avg:133.97ms +[2025-09-09 13:51:10] [Rank 0] step:381/10000 train_time:49173ms step_avg:129.06ms +[2025-09-09 13:51:10] [Rank 0] step:381/10000 train_time:49173ms step_avg:129.06ms +[2025-09-09 13:51:10] [Rank 0] step:401/10000 train_time:49984ms step_avg:124.65ms +[2025-09-09 13:51:10] [Rank 0] step:401/10000 train_time:49984ms step_avg:124.65ms +[2025-09-09 13:51:11] [Rank 0] step:421/10000 train_time:50792ms step_avg:120.65ms +[2025-09-09 13:51:11] [Rank 0] step:421/10000 train_time:50792ms step_avg:120.65ms +[2025-09-09 13:51:12] [Rank 0] step:441/10000 train_time:51601ms step_avg:117.01ms +[2025-09-09 13:51:12] [Rank 0] step:441/10000 train_time:51601ms step_avg:117.01ms +[2025-09-09 13:51:13] [Rank 0] step:461/10000 train_time:52411ms step_avg:113.69ms +[2025-09-09 13:51:13] [Rank 0] step:461/10000 train_time:52411ms step_avg:113.69ms +[2025-09-09 13:51:14] [Rank 0] step:481/10000 train_time:53220ms step_avg:110.65ms +[2025-09-09 13:51:14] [Rank 0] step:481/10000 train_time:53220ms step_avg:110.65ms +[2025-09-09 13:51:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:51:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:51:15] [Rank 0] PRINT: step:500/10000 train_loss:2.9185 val_loss:1.0501 train_time:54032ms step_avg:108.06ms +[2025-09-09 13:51:15] [Rank 0] PRINT: step:500/10000 train_loss:2.9185 val_loss:1.0501 train_time:54032ms step_avg:108.06ms +[2025-09-09 13:51:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:51:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:51:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:51:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:52:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:52:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:52:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:52:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:52:36] [Rank 0] Total Loss: 3.6592 +[2025-09-09 13:52:36] [Rank 0] Total Loss: 3.6592 +[2025-09-09 13:52:36] [Rank 0] Total FTA (Unweighted): 0.5331 +[2025-09-09 13:52:36] [Rank 0] Total FTA (Unweighted): 0.5331 +[2025-09-09 13:52:36] [Rank 0] Total FTA (Weighted): 0.5331 +[2025-09-09 13:52:36] [Rank 0] Total FTA (Weighted): 0.5331 +[2025-09-09 13:52:36] [Rank 0] Group 0 Loss: 3.2561 +[2025-09-09 13:52:36] [Rank 0] Group 0 Loss: 3.2561 +[2025-09-09 13:52:36] [Rank 0] Group 1 Loss: 3.0892 +[2025-09-09 13:52:36] [Rank 0] Group 1 Loss: 3.0892 +[2025-09-09 13:52:36] [Rank 0] Group 2 Loss: 2.9634 +[2025-09-09 13:52:36] [Rank 0] Group 2 Loss: 2.9634 +[2025-09-09 13:52:36] [Rank 0] Group 3 Loss: 3.2718 +[2025-09-09 13:52:36] [Rank 0] Group 3 Loss: 3.2718 +[2025-09-09 13:52:36] [Rank 0] Group 4 Loss: 3.2945 +[2025-09-09 13:52:36] [Rank 0] Group 4 Loss: 3.2945 +[2025-09-09 13:52:36] [Rank 0] Group 5 Loss: 3.3340 +[2025-09-09 13:52:36] [Rank 0] Group 5 Loss: 3.3340 +[2025-09-09 13:52:36] [Rank 0] Group 6 Loss: 3.3472 +[2025-09-09 13:52:36] [Rank 0] Group 6 Loss: 3.3472 +[2025-09-09 13:52:36] [Rank 0] Group 7 Loss: 3.4762 +[2025-09-09 13:52:36] [Rank 0] Group 7 Loss: 3.4762 +[2025-09-09 13:52:36] [Rank 0] Group 8 Loss: 3.7174 +[2025-09-09 13:52:36] [Rank 0] Group 8 Loss: 3.7174 +[2025-09-09 13:52:36] [Rank 0] Group 9 Loss: 3.8040 +[2025-09-09 13:52:36] [Rank 0] Group 9 Loss: 3.8040 +[2025-09-09 13:52:36] [Rank 0] Group 10 Loss: 3.9958 +[2025-09-09 13:52:36] [Rank 0] Group 10 Loss: 3.9958 +[2025-09-09 13:52:36] [Rank 0] Group 11 Loss: 4.0557 +[2025-09-09 13:52:36] [Rank 0] Group 11 Loss: 4.0557 +[2025-09-09 13:52:36] [Rank 0] Group 12 Loss: 4.1431 +[2025-09-09 13:52:36] [Rank 0] Group 12 Loss: 4.1431 +[2025-09-09 13:52:36] [Rank 0] Group 13 Loss: 4.2405 +[2025-09-09 13:52:36] [Rank 0] Group 13 Loss: 4.2405 +[2025-09-09 13:52:36] [Rank 0] Group 14 Loss: 4.2589 +[2025-09-09 13:52:36] [Rank 0] Group 14 Loss: 4.2589 +[2025-09-09 13:52:36] [Rank 0] Group 15 Loss: 4.2986 +[2025-09-09 13:52:36] [Rank 0] Group 15 Loss: 4.2986 +[2025-09-09 13:52:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:52:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:52:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:52:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:52:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:52:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:52:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:52:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:52:36] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:52:36] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:52:36] [Rank 0] Group 5 FTA: 0.8700 +[2025-09-09 13:52:36] [Rank 0] Group 5 FTA: 0.8700 +[2025-09-09 13:52:36] [Rank 0] Group 6 FTA: 0.5700 +[2025-09-09 13:52:36] [Rank 0] Group 6 FTA: 0.5700 +[2025-09-09 13:52:36] [Rank 0] Group 7 FTA: 0.5300 +[2025-09-09 13:52:36] [Rank 0] Group 7 FTA: 0.5300 +[2025-09-09 13:52:36] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-09 13:52:36] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-09 13:52:36] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-09 13:52:36] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-09 13:52:36] [Rank 0] Group 10 FTA: 0.2500 +[2025-09-09 13:52:36] [Rank 0] Group 10 FTA: 0.2500 +[2025-09-09 13:52:36] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-09 13:52:36] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-09 13:52:36] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-09 13:52:36] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-09 13:52:36] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-09 13:52:36] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-09 13:52:36] [Rank 0] Group 14 FTA: 0.0900 +[2025-09-09 13:52:36] [Rank 0] Group 14 FTA: 0.0900 +[2025-09-09 13:52:36] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-09 13:52:36] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-09 13:52:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 13:52:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 13:52:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 13:52:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 13:52:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 13:52:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 13:52:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 13:52:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 13:52:38] [Rank 0] step:501/10000 train_time:54050ms step_avg:107.88ms +[2025-09-09 13:52:38] [Rank 0] step:501/10000 train_time:54050ms step_avg:107.88ms +[2025-09-09 13:52:39] [Rank 0] step:521/10000 train_time:54858ms step_avg:105.29ms +[2025-09-09 13:52:39] [Rank 0] step:521/10000 train_time:54858ms step_avg:105.29ms +[2025-09-09 13:52:40] [Rank 0] step:541/10000 train_time:55667ms step_avg:102.90ms +[2025-09-09 13:52:40] [Rank 0] step:541/10000 train_time:55667ms step_avg:102.90ms +[2025-09-09 13:52:41] [Rank 0] step:561/10000 train_time:56476ms step_avg:100.67ms +[2025-09-09 13:52:41] [Rank 0] step:561/10000 train_time:56476ms step_avg:100.67ms +[2025-09-09 13:52:41] [Rank 0] step:581/10000 train_time:57284ms step_avg:98.60ms +[2025-09-09 13:52:41] [Rank 0] step:581/10000 train_time:57284ms step_avg:98.60ms +[2025-09-09 13:52:42] [Rank 0] step:601/10000 train_time:58093ms step_avg:96.66ms +[2025-09-09 13:52:42] [Rank 0] step:601/10000 train_time:58093ms step_avg:96.66ms +[2025-09-09 13:52:43] [Rank 0] step:621/10000 train_time:58901ms step_avg:94.85ms +[2025-09-09 13:52:43] [Rank 0] step:621/10000 train_time:58901ms step_avg:94.85ms +[2025-09-09 13:52:44] [Rank 0] step:641/10000 train_time:59708ms step_avg:93.15ms +[2025-09-09 13:52:44] [Rank 0] step:641/10000 train_time:59708ms step_avg:93.15ms +[2025-09-09 13:52:45] [Rank 0] step:661/10000 train_time:60516ms step_avg:91.55ms +[2025-09-09 13:52:45] [Rank 0] step:661/10000 train_time:60516ms step_avg:91.55ms +[2025-09-09 13:52:45] [Rank 0] step:681/10000 train_time:61324ms step_avg:90.05ms +[2025-09-09 13:52:45] [Rank 0] step:681/10000 train_time:61324ms step_avg:90.05ms +[2025-09-09 13:52:46] [Rank 0] step:701/10000 train_time:62134ms step_avg:88.64ms +[2025-09-09 13:52:46] [Rank 0] step:701/10000 train_time:62134ms step_avg:88.64ms +[2025-09-09 13:52:47] [Rank 0] step:721/10000 train_time:62940ms step_avg:87.29ms +[2025-09-09 13:52:47] [Rank 0] step:721/10000 train_time:62940ms step_avg:87.29ms +[2025-09-09 13:52:48] [Rank 0] step:741/10000 train_time:63747ms step_avg:86.03ms +[2025-09-09 13:52:48] [Rank 0] step:741/10000 train_time:63747ms step_avg:86.03ms +[2025-09-09 13:52:49] [Rank 0] step:761/10000 train_time:64561ms step_avg:84.84ms +[2025-09-09 13:52:49] [Rank 0] step:761/10000 train_time:64561ms step_avg:84.84ms +[2025-09-09 13:52:50] [Rank 0] step:781/10000 train_time:65373ms step_avg:83.70ms +[2025-09-09 13:52:50] [Rank 0] step:781/10000 train_time:65373ms step_avg:83.70ms +[2025-09-09 13:52:50] [Rank 0] step:801/10000 train_time:66187ms step_avg:82.63ms +[2025-09-09 13:52:50] [Rank 0] step:801/10000 train_time:66187ms step_avg:82.63ms +[2025-09-09 13:52:52] [Rank 0] step:821/10000 train_time:67704ms step_avg:82.46ms +[2025-09-09 13:52:52] [Rank 0] step:821/10000 train_time:67704ms step_avg:82.46ms +[2025-09-09 13:52:53] [Rank 0] step:841/10000 train_time:68518ms step_avg:81.47ms +[2025-09-09 13:52:53] [Rank 0] step:841/10000 train_time:68518ms step_avg:81.47ms +[2025-09-09 13:52:53] [Rank 0] step:861/10000 train_time:69333ms step_avg:80.53ms +[2025-09-09 13:52:53] [Rank 0] step:861/10000 train_time:69333ms step_avg:80.53ms +[2025-09-09 13:52:54] [Rank 0] step:881/10000 train_time:70147ms step_avg:79.62ms +[2025-09-09 13:52:54] [Rank 0] step:881/10000 train_time:70147ms step_avg:79.62ms +[2025-09-09 13:52:55] [Rank 0] step:901/10000 train_time:70961ms step_avg:78.76ms +[2025-09-09 13:52:55] [Rank 0] step:901/10000 train_time:70961ms step_avg:78.76ms +[2025-09-09 13:52:56] [Rank 0] step:921/10000 train_time:71775ms step_avg:77.93ms +[2025-09-09 13:52:56] [Rank 0] step:921/10000 train_time:71775ms step_avg:77.93ms +[2025-09-09 13:52:57] [Rank 0] step:941/10000 train_time:72590ms step_avg:77.14ms +[2025-09-09 13:52:57] [Rank 0] step:941/10000 train_time:72590ms step_avg:77.14ms +[2025-09-09 13:52:58] [Rank 0] step:961/10000 train_time:73404ms step_avg:76.38ms +[2025-09-09 13:52:58] [Rank 0] step:961/10000 train_time:73404ms step_avg:76.38ms +[2025-09-09 13:52:58] [Rank 0] step:981/10000 train_time:74218ms step_avg:75.66ms +[2025-09-09 13:52:58] [Rank 0] step:981/10000 train_time:74218ms step_avg:75.66ms +[2025-09-09 13:52:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:52:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:53:00] [Rank 0] PRINT: step:1000/10000 train_loss:0.9209 val_loss:0.8226 train_time:75035ms step_avg:75.04ms +[2025-09-09 13:53:00] [Rank 0] PRINT: step:1000/10000 train_loss:0.9209 val_loss:0.8226 train_time:75035ms step_avg:75.04ms +[2025-09-09 13:53:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:53:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:53:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:53:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:54:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:54:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:54:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:54:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:54:21] [Rank 0] Total Loss: 4.0952 +[2025-09-09 13:54:21] [Rank 0] Total Loss: 4.0952 +[2025-09-09 13:54:21] [Rank 0] Total FTA (Unweighted): 0.7231 +[2025-09-09 13:54:21] [Rank 0] Total FTA (Unweighted): 0.7231 +[2025-09-09 13:54:21] [Rank 0] Total FTA (Weighted): 0.7231 +[2025-09-09 13:54:21] [Rank 0] Total FTA (Weighted): 0.7231 +[2025-09-09 13:54:21] [Rank 0] Group 0 Loss: 3.8910 +[2025-09-09 13:54:21] [Rank 0] Group 0 Loss: 3.8910 +[2025-09-09 13:54:21] [Rank 0] Group 1 Loss: 3.6938 +[2025-09-09 13:54:21] [Rank 0] Group 1 Loss: 3.6938 +[2025-09-09 13:54:21] [Rank 0] Group 2 Loss: 3.5024 +[2025-09-09 13:54:21] [Rank 0] Group 2 Loss: 3.5024 +[2025-09-09 13:54:21] [Rank 0] Group 3 Loss: 3.9823 +[2025-09-09 13:54:21] [Rank 0] Group 3 Loss: 3.9823 +[2025-09-09 13:54:21] [Rank 0] Group 4 Loss: 3.8948 +[2025-09-09 13:54:21] [Rank 0] Group 4 Loss: 3.8948 +[2025-09-09 13:54:21] [Rank 0] Group 5 Loss: 3.8821 +[2025-09-09 13:54:21] [Rank 0] Group 5 Loss: 3.8821 +[2025-09-09 13:54:21] [Rank 0] Group 6 Loss: 3.8170 +[2025-09-09 13:54:21] [Rank 0] Group 6 Loss: 3.8170 +[2025-09-09 13:54:21] [Rank 0] Group 7 Loss: 3.8897 +[2025-09-09 13:54:21] [Rank 0] Group 7 Loss: 3.8897 +[2025-09-09 13:54:21] [Rank 0] Group 8 Loss: 4.0584 +[2025-09-09 13:54:21] [Rank 0] Group 8 Loss: 4.0584 +[2025-09-09 13:54:21] [Rank 0] Group 9 Loss: 4.0564 +[2025-09-09 13:54:21] [Rank 0] Group 9 Loss: 4.0564 +[2025-09-09 13:54:21] [Rank 0] Group 10 Loss: 4.2332 +[2025-09-09 13:54:21] [Rank 0] Group 10 Loss: 4.2332 +[2025-09-09 13:54:21] [Rank 0] Group 11 Loss: 4.3615 +[2025-09-09 13:54:21] [Rank 0] Group 11 Loss: 4.3615 +[2025-09-09 13:54:21] [Rank 0] Group 12 Loss: 4.4290 +[2025-09-09 13:54:21] [Rank 0] Group 12 Loss: 4.4290 +[2025-09-09 13:54:21] [Rank 0] Group 13 Loss: 4.5484 +[2025-09-09 13:54:21] [Rank 0] Group 13 Loss: 4.5484 +[2025-09-09 13:54:21] [Rank 0] Group 14 Loss: 4.5741 +[2025-09-09 13:54:21] [Rank 0] Group 14 Loss: 4.5741 +[2025-09-09 13:54:21] [Rank 0] Group 15 Loss: 4.7083 +[2025-09-09 13:54:21] [Rank 0] Group 15 Loss: 4.7083 +[2025-09-09 13:54:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:54:21] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-09 13:54:21] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-09 13:54:21] [Rank 0] Group 8 FTA: 0.8600 +[2025-09-09 13:54:21] [Rank 0] Group 8 FTA: 0.8600 +[2025-09-09 13:54:21] [Rank 0] Group 9 FTA: 0.7000 +[2025-09-09 13:54:21] [Rank 0] Group 9 FTA: 0.7000 +[2025-09-09 13:54:21] [Rank 0] Group 10 FTA: 0.7300 +[2025-09-09 13:54:21] [Rank 0] Group 10 FTA: 0.7300 +[2025-09-09 13:54:21] [Rank 0] Group 11 FTA: 0.5200 +[2025-09-09 13:54:21] [Rank 0] Group 11 FTA: 0.5200 +[2025-09-09 13:54:21] [Rank 0] Group 12 FTA: 0.3600 +[2025-09-09 13:54:21] [Rank 0] Group 12 FTA: 0.3600 +[2025-09-09 13:54:21] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-09 13:54:21] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-09 13:54:21] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-09 13:54:21] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-09 13:54:21] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-09 13:54:21] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-09 13:54:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 13:54:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 13:54:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 13:54:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 13:54:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 13:54:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 13:54:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 13:54:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 13:54:22] [Rank 0] step:1001/10000 train_time:75052ms step_avg:74.98ms +[2025-09-09 13:54:22] [Rank 0] step:1001/10000 train_time:75052ms step_avg:74.98ms +[2025-09-09 13:54:23] [Rank 0] step:1021/10000 train_time:75864ms step_avg:74.30ms +[2025-09-09 13:54:23] [Rank 0] step:1021/10000 train_time:75864ms step_avg:74.30ms +[2025-09-09 13:54:24] [Rank 0] step:1041/10000 train_time:76678ms step_avg:73.66ms +[2025-09-09 13:54:24] [Rank 0] step:1041/10000 train_time:76678ms step_avg:73.66ms +[2025-09-09 13:54:25] [Rank 0] step:1061/10000 train_time:77492ms step_avg:73.04ms +[2025-09-09 13:54:25] [Rank 0] step:1061/10000 train_time:77492ms step_avg:73.04ms +[2025-09-09 13:54:26] [Rank 0] step:1081/10000 train_time:78306ms step_avg:72.44ms +[2025-09-09 13:54:26] [Rank 0] step:1081/10000 train_time:78306ms step_avg:72.44ms +[2025-09-09 13:54:26] [Rank 0] step:1101/10000 train_time:79120ms step_avg:71.86ms +[2025-09-09 13:54:26] [Rank 0] step:1101/10000 train_time:79120ms step_avg:71.86ms +[2025-09-09 13:54:27] [Rank 0] step:1121/10000 train_time:79933ms step_avg:71.31ms +[2025-09-09 13:54:27] [Rank 0] step:1121/10000 train_time:79933ms step_avg:71.31ms +[2025-09-09 13:54:28] [Rank 0] step:1141/10000 train_time:80747ms step_avg:70.77ms +[2025-09-09 13:54:28] [Rank 0] step:1141/10000 train_time:80747ms step_avg:70.77ms +[2025-09-09 13:54:29] [Rank 0] step:1161/10000 train_time:81559ms step_avg:70.25ms +[2025-09-09 13:54:29] [Rank 0] step:1161/10000 train_time:81559ms step_avg:70.25ms +[2025-09-09 13:54:30] [Rank 0] step:1181/10000 train_time:82372ms step_avg:69.75ms +[2025-09-09 13:54:30] [Rank 0] step:1181/10000 train_time:82372ms step_avg:69.75ms +[2025-09-09 13:54:30] [Rank 0] step:1201/10000 train_time:83185ms step_avg:69.26ms +[2025-09-09 13:54:30] [Rank 0] step:1201/10000 train_time:83185ms step_avg:69.26ms +[2025-09-09 13:54:31] [Rank 0] step:1221/10000 train_time:83998ms step_avg:68.79ms +[2025-09-09 13:54:31] [Rank 0] step:1221/10000 train_time:83998ms step_avg:68.79ms +[2025-09-09 13:54:32] [Rank 0] step:1241/10000 train_time:84811ms step_avg:68.34ms +[2025-09-09 13:54:32] [Rank 0] step:1241/10000 train_time:84811ms step_avg:68.34ms +[2025-09-09 13:54:33] [Rank 0] step:1261/10000 train_time:85624ms step_avg:67.90ms +[2025-09-09 13:54:33] [Rank 0] step:1261/10000 train_time:85624ms step_avg:67.90ms +[2025-09-09 13:54:34] [Rank 0] step:1281/10000 train_time:86436ms step_avg:67.48ms +[2025-09-09 13:54:34] [Rank 0] step:1281/10000 train_time:86436ms step_avg:67.48ms +[2025-09-09 13:54:35] [Rank 0] step:1301/10000 train_time:87249ms step_avg:67.06ms +[2025-09-09 13:54:35] [Rank 0] step:1301/10000 train_time:87249ms step_avg:67.06ms +[2025-09-09 13:54:35] [Rank 0] step:1321/10000 train_time:88061ms step_avg:66.66ms +[2025-09-09 13:54:35] [Rank 0] step:1321/10000 train_time:88061ms step_avg:66.66ms +[2025-09-09 13:54:36] [Rank 0] step:1341/10000 train_time:88875ms step_avg:66.28ms +[2025-09-09 13:54:36] [Rank 0] step:1341/10000 train_time:88875ms step_avg:66.28ms +[2025-09-09 13:54:37] [Rank 0] step:1361/10000 train_time:89689ms step_avg:65.90ms +[2025-09-09 13:54:37] [Rank 0] step:1361/10000 train_time:89689ms step_avg:65.90ms +[2025-09-09 13:54:38] [Rank 0] step:1381/10000 train_time:90502ms step_avg:65.53ms +[2025-09-09 13:54:38] [Rank 0] step:1381/10000 train_time:90502ms step_avg:65.53ms +[2025-09-09 13:54:39] [Rank 0] step:1401/10000 train_time:91316ms step_avg:65.18ms +[2025-09-09 13:54:39] [Rank 0] step:1401/10000 train_time:91316ms step_avg:65.18ms +[2025-09-09 13:54:39] [Rank 0] step:1421/10000 train_time:92130ms step_avg:64.83ms +[2025-09-09 13:54:39] [Rank 0] step:1421/10000 train_time:92130ms step_avg:64.83ms +[2025-09-09 13:54:40] [Rank 0] step:1441/10000 train_time:92944ms step_avg:64.50ms +[2025-09-09 13:54:40] [Rank 0] step:1441/10000 train_time:92944ms step_avg:64.50ms +[2025-09-09 13:54:41] [Rank 0] step:1461/10000 train_time:93758ms step_avg:64.17ms +[2025-09-09 13:54:41] [Rank 0] step:1461/10000 train_time:93758ms step_avg:64.17ms +[2025-09-09 13:54:42] [Rank 0] step:1481/10000 train_time:94571ms step_avg:63.86ms +[2025-09-09 13:54:42] [Rank 0] step:1481/10000 train_time:94571ms step_avg:63.86ms +[2025-09-09 13:54:43] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:54:43] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:54:43] [Rank 0] PRINT: step:1500/10000 train_loss:0.7914 val_loss:0.7467 train_time:95388ms step_avg:63.59ms +[2025-09-09 13:54:43] [Rank 0] PRINT: step:1500/10000 train_loss:0.7914 val_loss:0.7467 train_time:95388ms step_avg:63.59ms +[2025-09-09 13:54:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:54:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:54:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:54:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:56:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:56:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:56:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:56:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:56:05] [Rank 0] Total Loss: 4.3032 +[2025-09-09 13:56:05] [Rank 0] Total Loss: 4.3032 +[2025-09-09 13:56:05] [Rank 0] Total FTA (Unweighted): 0.8037 +[2025-09-09 13:56:05] [Rank 0] Total FTA (Unweighted): 0.8037 +[2025-09-09 13:56:05] [Rank 0] Total FTA (Weighted): 0.8037 +[2025-09-09 13:56:05] [Rank 0] Total FTA (Weighted): 0.8037 +[2025-09-09 13:56:05] [Rank 0] Group 0 Loss: 4.1917 +[2025-09-09 13:56:05] [Rank 0] Group 0 Loss: 4.1917 +[2025-09-09 13:56:05] [Rank 0] Group 1 Loss: 3.9781 +[2025-09-09 13:56:05] [Rank 0] Group 1 Loss: 3.9781 +[2025-09-09 13:56:05] [Rank 0] Group 2 Loss: 3.7862 +[2025-09-09 13:56:05] [Rank 0] Group 2 Loss: 3.7862 +[2025-09-09 13:56:05] [Rank 0] Group 3 Loss: 4.2006 +[2025-09-09 13:56:05] [Rank 0] Group 3 Loss: 4.2006 +[2025-09-09 13:56:05] [Rank 0] Group 4 Loss: 4.0995 +[2025-09-09 13:56:05] [Rank 0] Group 4 Loss: 4.0995 +[2025-09-09 13:56:05] [Rank 0] Group 5 Loss: 4.1666 +[2025-09-09 13:56:05] [Rank 0] Group 5 Loss: 4.1666 +[2025-09-09 13:56:05] [Rank 0] Group 6 Loss: 4.0758 +[2025-09-09 13:56:05] [Rank 0] Group 6 Loss: 4.0758 +[2025-09-09 13:56:05] [Rank 0] Group 7 Loss: 4.1439 +[2025-09-09 13:56:05] [Rank 0] Group 7 Loss: 4.1439 +[2025-09-09 13:56:05] [Rank 0] Group 8 Loss: 4.2628 +[2025-09-09 13:56:05] [Rank 0] Group 8 Loss: 4.2628 +[2025-09-09 13:56:05] [Rank 0] Group 9 Loss: 4.2523 +[2025-09-09 13:56:05] [Rank 0] Group 9 Loss: 4.2523 +[2025-09-09 13:56:05] [Rank 0] Group 10 Loss: 4.4233 +[2025-09-09 13:56:05] [Rank 0] Group 10 Loss: 4.4233 +[2025-09-09 13:56:05] [Rank 0] Group 11 Loss: 4.4911 +[2025-09-09 13:56:05] [Rank 0] Group 11 Loss: 4.4911 +[2025-09-09 13:56:05] [Rank 0] Group 12 Loss: 4.5141 +[2025-09-09 13:56:05] [Rank 0] Group 12 Loss: 4.5141 +[2025-09-09 13:56:05] [Rank 0] Group 13 Loss: 4.6869 +[2025-09-09 13:56:05] [Rank 0] Group 13 Loss: 4.6869 +[2025-09-09 13:56:05] [Rank 0] Group 14 Loss: 4.7329 +[2025-09-09 13:56:05] [Rank 0] Group 14 Loss: 4.7329 +[2025-09-09 13:56:05] [Rank 0] Group 15 Loss: 4.8460 +[2025-09-09 13:56:05] [Rank 0] Group 15 Loss: 4.8460 +[2025-09-09 13:56:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:56:05] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-09 13:56:05] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-09 13:56:05] [Rank 0] Group 9 FTA: 0.9000 +[2025-09-09 13:56:05] [Rank 0] Group 9 FTA: 0.9000 +[2025-09-09 13:56:05] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-09 13:56:05] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-09 13:56:05] [Rank 0] Group 11 FTA: 0.8000 +[2025-09-09 13:56:05] [Rank 0] Group 11 FTA: 0.8000 +[2025-09-09 13:56:05] [Rank 0] Group 12 FTA: 0.6200 +[2025-09-09 13:56:05] [Rank 0] Group 12 FTA: 0.6200 +[2025-09-09 13:56:05] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-09 13:56:05] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-09 13:56:05] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-09 13:56:05] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-09 13:56:05] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-09 13:56:05] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-09 13:56:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 13:56:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 13:56:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 13:56:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 13:56:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 13:56:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 13:56:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 13:56:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 13:56:06] [Rank 0] step:1501/10000 train_time:95405ms step_avg:63.56ms +[2025-09-09 13:56:06] [Rank 0] step:1501/10000 train_time:95405ms step_avg:63.56ms +[2025-09-09 13:56:07] [Rank 0] step:1521/10000 train_time:96222ms step_avg:63.26ms +[2025-09-09 13:56:07] [Rank 0] step:1521/10000 train_time:96222ms step_avg:63.26ms +[2025-09-09 13:56:08] [Rank 0] step:1541/10000 train_time:97036ms step_avg:62.97ms +[2025-09-09 13:56:08] [Rank 0] step:1541/10000 train_time:97036ms step_avg:62.97ms +[2025-09-09 13:56:08] [Rank 0] step:1561/10000 train_time:97850ms step_avg:62.68ms +[2025-09-09 13:56:08] [Rank 0] step:1561/10000 train_time:97850ms step_avg:62.68ms +[2025-09-09 13:56:09] [Rank 0] step:1581/10000 train_time:98664ms step_avg:62.41ms +[2025-09-09 13:56:09] [Rank 0] step:1581/10000 train_time:98664ms step_avg:62.41ms +[2025-09-09 13:56:10] [Rank 0] step:1601/10000 train_time:99479ms step_avg:62.14ms +[2025-09-09 13:56:10] [Rank 0] step:1601/10000 train_time:99479ms step_avg:62.14ms +[2025-09-09 13:56:11] [Rank 0] step:1621/10000 train_time:100293ms step_avg:61.87ms +[2025-09-09 13:56:11] [Rank 0] step:1621/10000 train_time:100293ms step_avg:61.87ms +[2025-09-09 13:56:12] [Rank 0] step:1641/10000 train_time:101378ms step_avg:61.78ms +[2025-09-09 13:56:12] [Rank 0] step:1641/10000 train_time:101378ms step_avg:61.78ms +[2025-09-09 13:56:13] [Rank 0] step:1661/10000 train_time:102192ms step_avg:61.52ms +[2025-09-09 13:56:13] [Rank 0] step:1661/10000 train_time:102192ms step_avg:61.52ms +[2025-09-09 13:56:14] [Rank 0] step:1681/10000 train_time:103006ms step_avg:61.28ms +[2025-09-09 13:56:14] [Rank 0] step:1681/10000 train_time:103006ms step_avg:61.28ms +[2025-09-09 13:56:14] [Rank 0] step:1701/10000 train_time:103820ms step_avg:61.03ms +[2025-09-09 13:56:14] [Rank 0] step:1701/10000 train_time:103820ms step_avg:61.03ms +[2025-09-09 13:56:15] [Rank 0] step:1721/10000 train_time:104634ms step_avg:60.80ms +[2025-09-09 13:56:15] [Rank 0] step:1721/10000 train_time:104634ms step_avg:60.80ms +[2025-09-09 13:56:16] [Rank 0] step:1741/10000 train_time:105447ms step_avg:60.57ms +[2025-09-09 13:56:16] [Rank 0] step:1741/10000 train_time:105447ms step_avg:60.57ms +[2025-09-09 13:56:17] [Rank 0] step:1761/10000 train_time:106260ms step_avg:60.34ms +[2025-09-09 13:56:17] [Rank 0] step:1761/10000 train_time:106260ms step_avg:60.34ms +[2025-09-09 13:56:18] [Rank 0] step:1781/10000 train_time:107073ms step_avg:60.12ms +[2025-09-09 13:56:18] [Rank 0] step:1781/10000 train_time:107073ms step_avg:60.12ms +[2025-09-09 13:56:19] [Rank 0] step:1801/10000 train_time:108142ms step_avg:60.05ms +[2025-09-09 13:56:19] [Rank 0] step:1801/10000 train_time:108142ms step_avg:60.05ms +[2025-09-09 13:56:20] [Rank 0] step:1821/10000 train_time:109166ms step_avg:59.95ms +[2025-09-09 13:56:20] [Rank 0] step:1821/10000 train_time:109166ms step_avg:59.95ms +[2025-09-09 13:56:21] [Rank 0] step:1841/10000 train_time:109979ms step_avg:59.74ms +[2025-09-09 13:56:21] [Rank 0] step:1841/10000 train_time:109979ms step_avg:59.74ms +[2025-09-09 13:56:21] [Rank 0] step:1861/10000 train_time:110792ms step_avg:59.53ms +[2025-09-09 13:56:21] [Rank 0] step:1861/10000 train_time:110792ms step_avg:59.53ms +[2025-09-09 13:56:22] [Rank 0] step:1881/10000 train_time:111605ms step_avg:59.33ms +[2025-09-09 13:56:22] [Rank 0] step:1881/10000 train_time:111605ms step_avg:59.33ms +[2025-09-09 13:56:23] [Rank 0] step:1901/10000 train_time:112418ms step_avg:59.14ms +[2025-09-09 13:56:23] [Rank 0] step:1901/10000 train_time:112418ms step_avg:59.14ms +[2025-09-09 13:56:24] [Rank 0] step:1921/10000 train_time:113231ms step_avg:58.94ms +[2025-09-09 13:56:24] [Rank 0] step:1921/10000 train_time:113231ms step_avg:58.94ms +[2025-09-09 13:56:25] [Rank 0] step:1941/10000 train_time:114045ms step_avg:58.76ms +[2025-09-09 13:56:25] [Rank 0] step:1941/10000 train_time:114045ms step_avg:58.76ms +[2025-09-09 13:56:25] [Rank 0] step:1961/10000 train_time:114859ms step_avg:58.57ms +[2025-09-09 13:56:25] [Rank 0] step:1961/10000 train_time:114859ms step_avg:58.57ms +[2025-09-09 13:56:26] [Rank 0] step:1981/10000 train_time:115673ms step_avg:58.39ms +[2025-09-09 13:56:26] [Rank 0] step:1981/10000 train_time:115673ms step_avg:58.39ms +[2025-09-09 13:56:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:56:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:56:28] [Rank 0] PRINT: step:2000/10000 train_loss:0.7391 val_loss:0.7076 train_time:116489ms step_avg:58.24ms +[2025-09-09 13:56:28] [Rank 0] PRINT: step:2000/10000 train_loss:0.7391 val_loss:0.7076 train_time:116489ms step_avg:58.24ms +[2025-09-09 13:56:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:56:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:56:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:56:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:57:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:57:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:57:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:57:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:57:49] [Rank 0] Total Loss: 4.5772 +[2025-09-09 13:57:49] [Rank 0] Total Loss: 4.5772 +[2025-09-09 13:57:49] [Rank 0] Total FTA (Unweighted): 0.8375 +[2025-09-09 13:57:49] [Rank 0] Total FTA (Unweighted): 0.8375 +[2025-09-09 13:57:49] [Rank 0] Total FTA (Weighted): 0.8375 +[2025-09-09 13:57:49] [Rank 0] Total FTA (Weighted): 0.8375 +[2025-09-09 13:57:49] [Rank 0] Group 0 Loss: 4.4984 +[2025-09-09 13:57:49] [Rank 0] Group 0 Loss: 4.4984 +[2025-09-09 13:57:49] [Rank 0] Group 1 Loss: 4.1783 +[2025-09-09 13:57:49] [Rank 0] Group 1 Loss: 4.1783 +[2025-09-09 13:57:49] [Rank 0] Group 2 Loss: 4.0447 +[2025-09-09 13:57:49] [Rank 0] Group 2 Loss: 4.0447 +[2025-09-09 13:57:49] [Rank 0] Group 3 Loss: 4.4879 +[2025-09-09 13:57:49] [Rank 0] Group 3 Loss: 4.4879 +[2025-09-09 13:57:49] [Rank 0] Group 4 Loss: 4.3961 +[2025-09-09 13:57:49] [Rank 0] Group 4 Loss: 4.3961 +[2025-09-09 13:57:49] [Rank 0] Group 5 Loss: 4.4107 +[2025-09-09 13:57:49] [Rank 0] Group 5 Loss: 4.4107 +[2025-09-09 13:57:49] [Rank 0] Group 6 Loss: 4.4028 +[2025-09-09 13:57:49] [Rank 0] Group 6 Loss: 4.4028 +[2025-09-09 13:57:49] [Rank 0] Group 7 Loss: 4.4139 +[2025-09-09 13:57:49] [Rank 0] Group 7 Loss: 4.4139 +[2025-09-09 13:57:49] [Rank 0] Group 8 Loss: 4.5710 +[2025-09-09 13:57:49] [Rank 0] Group 8 Loss: 4.5710 +[2025-09-09 13:57:49] [Rank 0] Group 9 Loss: 4.5572 +[2025-09-09 13:57:49] [Rank 0] Group 9 Loss: 4.5572 +[2025-09-09 13:57:49] [Rank 0] Group 10 Loss: 4.7186 +[2025-09-09 13:57:49] [Rank 0] Group 10 Loss: 4.7186 +[2025-09-09 13:57:49] [Rank 0] Group 11 Loss: 4.8036 +[2025-09-09 13:57:49] [Rank 0] Group 11 Loss: 4.8036 +[2025-09-09 13:57:49] [Rank 0] Group 12 Loss: 4.7693 +[2025-09-09 13:57:49] [Rank 0] Group 12 Loss: 4.7693 +[2025-09-09 13:57:49] [Rank 0] Group 13 Loss: 4.9412 +[2025-09-09 13:57:49] [Rank 0] Group 13 Loss: 4.9412 +[2025-09-09 13:57:49] [Rank 0] Group 14 Loss: 4.9315 +[2025-09-09 13:57:49] [Rank 0] Group 14 Loss: 4.9315 +[2025-09-09 13:57:49] [Rank 0] Group 15 Loss: 5.1098 +[2025-09-09 13:57:49] [Rank 0] Group 15 Loss: 5.1098 +[2025-09-09 13:57:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:57:49] [Rank 0] Group 9 FTA: 0.9600 +[2025-09-09 13:57:49] [Rank 0] Group 9 FTA: 0.9600 +[2025-09-09 13:57:49] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-09 13:57:49] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-09 13:57:49] [Rank 0] Group 11 FTA: 0.8600 +[2025-09-09 13:57:49] [Rank 0] Group 11 FTA: 0.8600 +[2025-09-09 13:57:49] [Rank 0] Group 12 FTA: 0.7500 +[2025-09-09 13:57:49] [Rank 0] Group 12 FTA: 0.7500 +[2025-09-09 13:57:49] [Rank 0] Group 13 FTA: 0.4600 +[2025-09-09 13:57:49] [Rank 0] Group 13 FTA: 0.4600 +[2025-09-09 13:57:49] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-09 13:57:49] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-09 13:57:49] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-09 13:57:49] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-09 13:57:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 13:57:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 13:57:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 13:57:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 13:57:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 13:57:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 13:57:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 13:57:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 13:57:51] [Rank 0] step:2001/10000 train_time:116505ms step_avg:58.22ms +[2025-09-09 13:57:51] [Rank 0] step:2001/10000 train_time:116505ms step_avg:58.22ms +[2025-09-09 13:57:52] [Rank 0] step:2021/10000 train_time:117588ms step_avg:58.18ms +[2025-09-09 13:57:52] [Rank 0] step:2021/10000 train_time:117588ms step_avg:58.18ms +[2025-09-09 13:57:52] [Rank 0] step:2041/10000 train_time:118403ms step_avg:58.01ms +[2025-09-09 13:57:52] [Rank 0] step:2041/10000 train_time:118403ms step_avg:58.01ms +[2025-09-09 13:57:53] [Rank 0] step:2061/10000 train_time:119218ms step_avg:57.84ms +[2025-09-09 13:57:53] [Rank 0] step:2061/10000 train_time:119218ms step_avg:57.84ms +[2025-09-09 13:57:54] [Rank 0] step:2081/10000 train_time:120033ms step_avg:57.68ms +[2025-09-09 13:57:54] [Rank 0] step:2081/10000 train_time:120033ms step_avg:57.68ms +[2025-09-09 13:57:55] [Rank 0] step:2101/10000 train_time:120847ms step_avg:57.52ms +[2025-09-09 13:57:55] [Rank 0] step:2101/10000 train_time:120847ms step_avg:57.52ms +[2025-09-09 13:57:56] [Rank 0] step:2121/10000 train_time:121661ms step_avg:57.36ms +[2025-09-09 13:57:56] [Rank 0] step:2121/10000 train_time:121661ms step_avg:57.36ms +[2025-09-09 13:57:57] [Rank 0] step:2141/10000 train_time:122475ms step_avg:57.20ms +[2025-09-09 13:57:57] [Rank 0] step:2141/10000 train_time:122475ms step_avg:57.20ms +[2025-09-09 13:57:57] [Rank 0] step:2161/10000 train_time:123289ms step_avg:57.05ms +[2025-09-09 13:57:57] [Rank 0] step:2161/10000 train_time:123289ms step_avg:57.05ms +[2025-09-09 13:57:58] [Rank 0] step:2181/10000 train_time:124103ms step_avg:56.90ms +[2025-09-09 13:57:58] [Rank 0] step:2181/10000 train_time:124103ms step_avg:56.90ms +[2025-09-09 13:57:59] [Rank 0] step:2201/10000 train_time:124917ms step_avg:56.75ms +[2025-09-09 13:57:59] [Rank 0] step:2201/10000 train_time:124917ms step_avg:56.75ms +[2025-09-09 13:58:00] [Rank 0] step:2221/10000 train_time:125732ms step_avg:56.61ms +[2025-09-09 13:58:00] [Rank 0] step:2221/10000 train_time:125732ms step_avg:56.61ms +[2025-09-09 13:58:01] [Rank 0] step:2241/10000 train_time:126551ms step_avg:56.47ms +[2025-09-09 13:58:01] [Rank 0] step:2241/10000 train_time:126551ms step_avg:56.47ms +[2025-09-09 13:58:01] [Rank 0] step:2261/10000 train_time:127371ms step_avg:56.33ms +[2025-09-09 13:58:01] [Rank 0] step:2261/10000 train_time:127371ms step_avg:56.33ms +[2025-09-09 13:58:02] [Rank 0] step:2281/10000 train_time:128192ms step_avg:56.20ms +[2025-09-09 13:58:02] [Rank 0] step:2281/10000 train_time:128192ms step_avg:56.20ms +[2025-09-09 13:58:03] [Rank 0] step:2301/10000 train_time:129013ms step_avg:56.07ms +[2025-09-09 13:58:03] [Rank 0] step:2301/10000 train_time:129013ms step_avg:56.07ms +[2025-09-09 13:58:04] [Rank 0] step:2321/10000 train_time:129834ms step_avg:55.94ms +[2025-09-09 13:58:04] [Rank 0] step:2321/10000 train_time:129834ms step_avg:55.94ms +[2025-09-09 13:58:05] [Rank 0] step:2341/10000 train_time:130654ms step_avg:55.81ms +[2025-09-09 13:58:05] [Rank 0] step:2341/10000 train_time:130654ms step_avg:55.81ms +[2025-09-09 13:58:06] [Rank 0] step:2361/10000 train_time:131473ms step_avg:55.69ms +[2025-09-09 13:58:06] [Rank 0] step:2361/10000 train_time:131473ms step_avg:55.69ms +[2025-09-09 13:58:06] [Rank 0] step:2381/10000 train_time:132293ms step_avg:55.56ms +[2025-09-09 13:58:06] [Rank 0] step:2381/10000 train_time:132293ms step_avg:55.56ms +[2025-09-09 13:58:07] [Rank 0] step:2401/10000 train_time:133113ms step_avg:55.44ms +[2025-09-09 13:58:07] [Rank 0] step:2401/10000 train_time:133113ms step_avg:55.44ms +[2025-09-09 13:58:08] [Rank 0] step:2421/10000 train_time:133933ms step_avg:55.32ms +[2025-09-09 13:58:08] [Rank 0] step:2421/10000 train_time:133933ms step_avg:55.32ms +[2025-09-09 13:58:09] [Rank 0] step:2441/10000 train_time:134755ms step_avg:55.20ms +[2025-09-09 13:58:09] [Rank 0] step:2441/10000 train_time:134755ms step_avg:55.20ms +[2025-09-09 13:58:10] [Rank 0] step:2461/10000 train_time:135574ms step_avg:55.09ms +[2025-09-09 13:58:10] [Rank 0] step:2461/10000 train_time:135574ms step_avg:55.09ms +[2025-09-09 13:58:10] [Rank 0] step:2481/10000 train_time:136392ms step_avg:54.97ms +[2025-09-09 13:58:10] [Rank 0] step:2481/10000 train_time:136392ms step_avg:54.97ms +[2025-09-09 13:58:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:58:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:58:12] [Rank 0] PRINT: step:2500/10000 train_loss:0.7086 val_loss:0.6821 train_time:137214ms step_avg:54.89ms +[2025-09-09 13:58:12] [Rank 0] PRINT: step:2500/10000 train_loss:0.7086 val_loss:0.6821 train_time:137214ms step_avg:54.89ms +[2025-09-09 13:58:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:58:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:58:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:58:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:59:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:59:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 13:59:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:59:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 13:59:34] [Rank 0] Total Loss: 4.6805 +[2025-09-09 13:59:34] [Rank 0] Total Loss: 4.6805 +[2025-09-09 13:59:34] [Rank 0] Total FTA (Unweighted): 0.8737 +[2025-09-09 13:59:34] [Rank 0] Total FTA (Unweighted): 0.8737 +[2025-09-09 13:59:34] [Rank 0] Total FTA (Weighted): 0.8738 +[2025-09-09 13:59:34] [Rank 0] Total FTA (Weighted): 0.8738 +[2025-09-09 13:59:34] [Rank 0] Group 0 Loss: 4.5942 +[2025-09-09 13:59:34] [Rank 0] Group 0 Loss: 4.5942 +[2025-09-09 13:59:34] [Rank 0] Group 1 Loss: 4.4119 +[2025-09-09 13:59:34] [Rank 0] Group 1 Loss: 4.4119 +[2025-09-09 13:59:34] [Rank 0] Group 2 Loss: 4.1392 +[2025-09-09 13:59:34] [Rank 0] Group 2 Loss: 4.1392 +[2025-09-09 13:59:34] [Rank 0] Group 3 Loss: 4.5479 +[2025-09-09 13:59:34] [Rank 0] Group 3 Loss: 4.5479 +[2025-09-09 13:59:34] [Rank 0] Group 4 Loss: 4.5467 +[2025-09-09 13:59:34] [Rank 0] Group 4 Loss: 4.5467 +[2025-09-09 13:59:34] [Rank 0] Group 5 Loss: 4.5549 +[2025-09-09 13:59:34] [Rank 0] Group 5 Loss: 4.5549 +[2025-09-09 13:59:34] [Rank 0] Group 6 Loss: 4.4980 +[2025-09-09 13:59:34] [Rank 0] Group 6 Loss: 4.4980 +[2025-09-09 13:59:34] [Rank 0] Group 7 Loss: 4.5499 +[2025-09-09 13:59:34] [Rank 0] Group 7 Loss: 4.5499 +[2025-09-09 13:59:34] [Rank 0] Group 8 Loss: 4.6937 +[2025-09-09 13:59:34] [Rank 0] Group 8 Loss: 4.6937 +[2025-09-09 13:59:34] [Rank 0] Group 9 Loss: 4.6788 +[2025-09-09 13:59:34] [Rank 0] Group 9 Loss: 4.6788 +[2025-09-09 13:59:34] [Rank 0] Group 10 Loss: 4.8204 +[2025-09-09 13:59:34] [Rank 0] Group 10 Loss: 4.8204 +[2025-09-09 13:59:34] [Rank 0] Group 11 Loss: 4.9016 +[2025-09-09 13:59:34] [Rank 0] Group 11 Loss: 4.9016 +[2025-09-09 13:59:34] [Rank 0] Group 12 Loss: 4.8565 +[2025-09-09 13:59:34] [Rank 0] Group 12 Loss: 4.8565 +[2025-09-09 13:59:34] [Rank 0] Group 13 Loss: 5.0019 +[2025-09-09 13:59:34] [Rank 0] Group 13 Loss: 5.0019 +[2025-09-09 13:59:34] [Rank 0] Group 14 Loss: 4.9918 +[2025-09-09 13:59:34] [Rank 0] Group 14 Loss: 4.9918 +[2025-09-09 13:59:34] [Rank 0] Group 15 Loss: 5.1012 +[2025-09-09 13:59:34] [Rank 0] Group 15 Loss: 5.1012 +[2025-09-09 13:59:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 13:59:34] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-09 13:59:34] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-09 13:59:34] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-09 13:59:34] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-09 13:59:34] [Rank 0] Group 12 FTA: 0.8700 +[2025-09-09 13:59:34] [Rank 0] Group 12 FTA: 0.8700 +[2025-09-09 13:59:34] [Rank 0] Group 13 FTA: 0.6800 +[2025-09-09 13:59:34] [Rank 0] Group 13 FTA: 0.6800 +[2025-09-09 13:59:34] [Rank 0] Group 14 FTA: 0.2700 +[2025-09-09 13:59:34] [Rank 0] Group 14 FTA: 0.2700 +[2025-09-09 13:59:34] [Rank 0] Group 15 FTA: 0.2200 +[2025-09-09 13:59:34] [Rank 0] Group 15 FTA: 0.2200 +[2025-09-09 13:59:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 13:59:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 13:59:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 13:59:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 13:59:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 13:59:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 13:59:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 13:59:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 13:59:35] [Rank 0] step:2501/10000 train_time:137230ms step_avg:54.87ms +[2025-09-09 13:59:35] [Rank 0] step:2501/10000 train_time:137230ms step_avg:54.87ms +[2025-09-09 13:59:36] [Rank 0] step:2521/10000 train_time:138054ms step_avg:54.76ms +[2025-09-09 13:59:36] [Rank 0] step:2521/10000 train_time:138054ms step_avg:54.76ms +[2025-09-09 13:59:37] [Rank 0] step:2541/10000 train_time:138873ms step_avg:54.65ms +[2025-09-09 13:59:37] [Rank 0] step:2541/10000 train_time:138873ms step_avg:54.65ms +[2025-09-09 13:59:38] [Rank 0] step:2561/10000 train_time:139692ms step_avg:54.55ms +[2025-09-09 13:59:38] [Rank 0] step:2561/10000 train_time:139692ms step_avg:54.55ms +[2025-09-09 13:59:39] [Rank 0] step:2581/10000 train_time:140512ms step_avg:54.44ms +[2025-09-09 13:59:39] [Rank 0] step:2581/10000 train_time:140512ms step_avg:54.44ms +[2025-09-09 13:59:39] [Rank 0] step:2601/10000 train_time:141332ms step_avg:54.34ms +[2025-09-09 13:59:39] [Rank 0] step:2601/10000 train_time:141332ms step_avg:54.34ms +[2025-09-09 13:59:40] [Rank 0] step:2621/10000 train_time:142153ms step_avg:54.24ms +[2025-09-09 13:59:40] [Rank 0] step:2621/10000 train_time:142153ms step_avg:54.24ms +[2025-09-09 13:59:41] [Rank 0] step:2641/10000 train_time:142973ms step_avg:54.14ms +[2025-09-09 13:59:41] [Rank 0] step:2641/10000 train_time:142973ms step_avg:54.14ms +[2025-09-09 13:59:42] [Rank 0] step:2661/10000 train_time:143792ms step_avg:54.04ms +[2025-09-09 13:59:42] [Rank 0] step:2661/10000 train_time:143792ms step_avg:54.04ms +[2025-09-09 13:59:43] [Rank 0] step:2681/10000 train_time:144614ms step_avg:53.94ms +[2025-09-09 13:59:43] [Rank 0] step:2681/10000 train_time:144614ms step_avg:53.94ms +[2025-09-09 13:59:43] [Rank 0] step:2701/10000 train_time:145433ms step_avg:53.84ms +[2025-09-09 13:59:43] [Rank 0] step:2701/10000 train_time:145433ms step_avg:53.84ms +[2025-09-09 13:59:44] [Rank 0] step:2721/10000 train_time:146253ms step_avg:53.75ms +[2025-09-09 13:59:44] [Rank 0] step:2721/10000 train_time:146253ms step_avg:53.75ms +[2025-09-09 13:59:45] [Rank 0] step:2741/10000 train_time:147073ms step_avg:53.66ms +[2025-09-09 13:59:45] [Rank 0] step:2741/10000 train_time:147073ms step_avg:53.66ms +[2025-09-09 13:59:46] [Rank 0] step:2761/10000 train_time:147893ms step_avg:53.57ms +[2025-09-09 13:59:46] [Rank 0] step:2761/10000 train_time:147893ms step_avg:53.57ms +[2025-09-09 13:59:47] [Rank 0] step:2781/10000 train_time:148713ms step_avg:53.47ms +[2025-09-09 13:59:47] [Rank 0] step:2781/10000 train_time:148713ms step_avg:53.47ms +[2025-09-09 13:59:48] [Rank 0] step:2801/10000 train_time:149533ms step_avg:53.39ms +[2025-09-09 13:59:48] [Rank 0] step:2801/10000 train_time:149533ms step_avg:53.39ms +[2025-09-09 13:59:49] [Rank 0] step:2821/10000 train_time:151043ms step_avg:53.54ms +[2025-09-09 13:59:49] [Rank 0] step:2821/10000 train_time:151043ms step_avg:53.54ms +[2025-09-09 13:59:50] [Rank 0] step:2841/10000 train_time:151864ms step_avg:53.45ms +[2025-09-09 13:59:50] [Rank 0] step:2841/10000 train_time:151864ms step_avg:53.45ms +[2025-09-09 13:59:51] [Rank 0] step:2861/10000 train_time:152683ms step_avg:53.37ms +[2025-09-09 13:59:51] [Rank 0] step:2861/10000 train_time:152683ms step_avg:53.37ms +[2025-09-09 13:59:52] [Rank 0] step:2881/10000 train_time:153503ms step_avg:53.28ms +[2025-09-09 13:59:52] [Rank 0] step:2881/10000 train_time:153503ms step_avg:53.28ms +[2025-09-09 13:59:52] [Rank 0] step:2901/10000 train_time:154323ms step_avg:53.20ms +[2025-09-09 13:59:52] [Rank 0] step:2901/10000 train_time:154323ms step_avg:53.20ms +[2025-09-09 13:59:53] [Rank 0] step:2921/10000 train_time:155143ms step_avg:53.11ms +[2025-09-09 13:59:53] [Rank 0] step:2921/10000 train_time:155143ms step_avg:53.11ms +[2025-09-09 13:59:54] [Rank 0] step:2941/10000 train_time:155962ms step_avg:53.03ms +[2025-09-09 13:59:54] [Rank 0] step:2941/10000 train_time:155962ms step_avg:53.03ms +[2025-09-09 13:59:55] [Rank 0] step:2961/10000 train_time:156783ms step_avg:52.95ms +[2025-09-09 13:59:55] [Rank 0] step:2961/10000 train_time:156783ms step_avg:52.95ms +[2025-09-09 13:59:56] [Rank 0] step:2981/10000 train_time:157603ms step_avg:52.87ms +[2025-09-09 13:59:56] [Rank 0] step:2981/10000 train_time:157603ms step_avg:52.87ms +[2025-09-09 13:59:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:59:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 13:59:57] [Rank 0] PRINT: step:3000/10000 train_loss:0.6870 val_loss:0.6659 train_time:158426ms step_avg:52.81ms +[2025-09-09 13:59:57] [Rank 0] PRINT: step:3000/10000 train_loss:0.6870 val_loss:0.6659 train_time:158426ms step_avg:52.81ms +[2025-09-09 13:59:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:59:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 13:59:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 13:59:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:01:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:01:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:01:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:01:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:01:18] [Rank 0] Total Loss: 4.6773 +[2025-09-09 14:01:18] [Rank 0] Total Loss: 4.6773 +[2025-09-09 14:01:18] [Rank 0] Total FTA (Unweighted): 0.8975 +[2025-09-09 14:01:18] [Rank 0] Total FTA (Unweighted): 0.8975 +[2025-09-09 14:01:18] [Rank 0] Total FTA (Weighted): 0.8975 +[2025-09-09 14:01:18] [Rank 0] Total FTA (Weighted): 0.8975 +[2025-09-09 14:01:18] [Rank 0] Group 0 Loss: 4.6360 +[2025-09-09 14:01:18] [Rank 0] Group 0 Loss: 4.6360 +[2025-09-09 14:01:18] [Rank 0] Group 1 Loss: 4.2830 +[2025-09-09 14:01:18] [Rank 0] Group 1 Loss: 4.2830 +[2025-09-09 14:01:18] [Rank 0] Group 2 Loss: 4.0995 +[2025-09-09 14:01:18] [Rank 0] Group 2 Loss: 4.0995 +[2025-09-09 14:01:18] [Rank 0] Group 3 Loss: 4.6529 +[2025-09-09 14:01:18] [Rank 0] Group 3 Loss: 4.6529 +[2025-09-09 14:01:18] [Rank 0] Group 4 Loss: 4.5808 +[2025-09-09 14:01:18] [Rank 0] Group 4 Loss: 4.5808 +[2025-09-09 14:01:18] [Rank 0] Group 5 Loss: 4.5723 +[2025-09-09 14:01:18] [Rank 0] Group 5 Loss: 4.5723 +[2025-09-09 14:01:18] [Rank 0] Group 6 Loss: 4.4894 +[2025-09-09 14:01:18] [Rank 0] Group 6 Loss: 4.4894 +[2025-09-09 14:01:18] [Rank 0] Group 7 Loss: 4.5713 +[2025-09-09 14:01:18] [Rank 0] Group 7 Loss: 4.5713 +[2025-09-09 14:01:18] [Rank 0] Group 8 Loss: 4.7157 +[2025-09-09 14:01:18] [Rank 0] Group 8 Loss: 4.7157 +[2025-09-09 14:01:18] [Rank 0] Group 9 Loss: 4.7623 +[2025-09-09 14:01:18] [Rank 0] Group 9 Loss: 4.7623 +[2025-09-09 14:01:18] [Rank 0] Group 10 Loss: 4.8459 +[2025-09-09 14:01:18] [Rank 0] Group 10 Loss: 4.8459 +[2025-09-09 14:01:18] [Rank 0] Group 11 Loss: 4.8772 +[2025-09-09 14:01:18] [Rank 0] Group 11 Loss: 4.8772 +[2025-09-09 14:01:18] [Rank 0] Group 12 Loss: 4.8647 +[2025-09-09 14:01:18] [Rank 0] Group 12 Loss: 4.8647 +[2025-09-09 14:01:18] [Rank 0] Group 13 Loss: 4.9332 +[2025-09-09 14:01:18] [Rank 0] Group 13 Loss: 4.9332 +[2025-09-09 14:01:18] [Rank 0] Group 14 Loss: 4.9397 +[2025-09-09 14:01:18] [Rank 0] Group 14 Loss: 4.9397 +[2025-09-09 14:01:18] [Rank 0] Group 15 Loss: 5.0130 +[2025-09-09 14:01:18] [Rank 0] Group 15 Loss: 5.0130 +[2025-09-09 14:01:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:01:18] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-09 14:01:18] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-09 14:01:18] [Rank 0] Group 12 FTA: 0.9300 +[2025-09-09 14:01:18] [Rank 0] Group 12 FTA: 0.9300 +[2025-09-09 14:01:18] [Rank 0] Group 13 FTA: 0.7800 +[2025-09-09 14:01:18] [Rank 0] Group 13 FTA: 0.7800 +[2025-09-09 14:01:18] [Rank 0] Group 14 FTA: 0.4500 +[2025-09-09 14:01:18] [Rank 0] Group 14 FTA: 0.4500 +[2025-09-09 14:01:18] [Rank 0] Group 15 FTA: 0.2200 +[2025-09-09 14:01:18] [Rank 0] Group 15 FTA: 0.2200 +[2025-09-09 14:01:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:01:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:01:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:01:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:01:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:01:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:01:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:01:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:01:21] [Rank 0] step:3001/10000 train_time:158443ms step_avg:52.80ms +[2025-09-09 14:01:21] [Rank 0] step:3001/10000 train_time:158443ms step_avg:52.80ms +[2025-09-09 14:01:21] [Rank 0] step:3021/10000 train_time:159271ms step_avg:52.72ms +[2025-09-09 14:01:21] [Rank 0] step:3021/10000 train_time:159271ms step_avg:52.72ms +[2025-09-09 14:01:22] [Rank 0] step:3041/10000 train_time:160091ms step_avg:52.64ms +[2025-09-09 14:01:22] [Rank 0] step:3041/10000 train_time:160091ms step_avg:52.64ms +[2025-09-09 14:01:23] [Rank 0] step:3061/10000 train_time:160911ms step_avg:52.57ms +[2025-09-09 14:01:23] [Rank 0] step:3061/10000 train_time:160911ms step_avg:52.57ms +[2025-09-09 14:01:24] [Rank 0] step:3081/10000 train_time:161732ms step_avg:52.49ms +[2025-09-09 14:01:24] [Rank 0] step:3081/10000 train_time:161732ms step_avg:52.49ms +[2025-09-09 14:01:25] [Rank 0] step:3101/10000 train_time:162552ms step_avg:52.42ms +[2025-09-09 14:01:25] [Rank 0] step:3101/10000 train_time:162552ms step_avg:52.42ms +[2025-09-09 14:01:26] [Rank 0] step:3121/10000 train_time:163372ms step_avg:52.35ms +[2025-09-09 14:01:26] [Rank 0] step:3121/10000 train_time:163372ms step_avg:52.35ms +[2025-09-09 14:01:26] [Rank 0] step:3141/10000 train_time:164193ms step_avg:52.27ms +[2025-09-09 14:01:26] [Rank 0] step:3141/10000 train_time:164193ms step_avg:52.27ms +[2025-09-09 14:01:27] [Rank 0] step:3161/10000 train_time:165013ms step_avg:52.20ms +[2025-09-09 14:01:27] [Rank 0] step:3161/10000 train_time:165013ms step_avg:52.20ms +[2025-09-09 14:01:28] [Rank 0] step:3181/10000 train_time:165833ms step_avg:52.13ms +[2025-09-09 14:01:28] [Rank 0] step:3181/10000 train_time:165833ms step_avg:52.13ms +[2025-09-09 14:01:29] [Rank 0] step:3201/10000 train_time:166652ms step_avg:52.06ms +[2025-09-09 14:01:29] [Rank 0] step:3201/10000 train_time:166652ms step_avg:52.06ms +[2025-09-09 14:01:30] [Rank 0] step:3221/10000 train_time:167473ms step_avg:51.99ms +[2025-09-09 14:01:30] [Rank 0] step:3221/10000 train_time:167473ms step_avg:51.99ms +[2025-09-09 14:01:30] [Rank 0] step:3241/10000 train_time:168291ms step_avg:51.93ms +[2025-09-09 14:01:30] [Rank 0] step:3241/10000 train_time:168291ms step_avg:51.93ms +[2025-09-09 14:01:31] [Rank 0] step:3261/10000 train_time:169111ms step_avg:51.86ms +[2025-09-09 14:01:31] [Rank 0] step:3261/10000 train_time:169111ms step_avg:51.86ms +[2025-09-09 14:01:32] [Rank 0] step:3281/10000 train_time:169930ms step_avg:51.79ms +[2025-09-09 14:01:32] [Rank 0] step:3281/10000 train_time:169930ms step_avg:51.79ms +[2025-09-09 14:01:33] [Rank 0] step:3301/10000 train_time:170749ms step_avg:51.73ms +[2025-09-09 14:01:33] [Rank 0] step:3301/10000 train_time:170749ms step_avg:51.73ms +[2025-09-09 14:01:34] [Rank 0] step:3321/10000 train_time:171570ms step_avg:51.66ms +[2025-09-09 14:01:34] [Rank 0] step:3321/10000 train_time:171570ms step_avg:51.66ms +[2025-09-09 14:01:35] [Rank 0] step:3341/10000 train_time:172757ms step_avg:51.71ms +[2025-09-09 14:01:35] [Rank 0] step:3341/10000 train_time:172757ms step_avg:51.71ms +[2025-09-09 14:01:36] [Rank 0] step:3361/10000 train_time:173735ms step_avg:51.69ms +[2025-09-09 14:01:36] [Rank 0] step:3361/10000 train_time:173735ms step_avg:51.69ms +[2025-09-09 14:01:37] [Rank 0] step:3381/10000 train_time:174554ms step_avg:51.63ms +[2025-09-09 14:01:37] [Rank 0] step:3381/10000 train_time:174554ms step_avg:51.63ms +[2025-09-09 14:01:38] [Rank 0] step:3401/10000 train_time:175374ms step_avg:51.57ms +[2025-09-09 14:01:38] [Rank 0] step:3401/10000 train_time:175374ms step_avg:51.57ms +[2025-09-09 14:01:38] [Rank 0] step:3421/10000 train_time:176193ms step_avg:51.50ms +[2025-09-09 14:01:38] [Rank 0] step:3421/10000 train_time:176193ms step_avg:51.50ms +[2025-09-09 14:01:39] [Rank 0] step:3441/10000 train_time:177014ms step_avg:51.44ms +[2025-09-09 14:01:39] [Rank 0] step:3441/10000 train_time:177014ms step_avg:51.44ms +[2025-09-09 14:01:40] [Rank 0] step:3461/10000 train_time:177834ms step_avg:51.38ms +[2025-09-09 14:01:40] [Rank 0] step:3461/10000 train_time:177834ms step_avg:51.38ms +[2025-09-09 14:01:41] [Rank 0] step:3481/10000 train_time:178653ms step_avg:51.32ms +[2025-09-09 14:01:41] [Rank 0] step:3481/10000 train_time:178653ms step_avg:51.32ms +[2025-09-09 14:01:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:01:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:01:42] [Rank 0] PRINT: step:3500/10000 train_loss:0.6731 val_loss:0.6547 train_time:179474ms step_avg:51.28ms +[2025-09-09 14:01:42] [Rank 0] PRINT: step:3500/10000 train_loss:0.6731 val_loss:0.6547 train_time:179474ms step_avg:51.28ms +[2025-09-09 14:01:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:01:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:01:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:01:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:03:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:03:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:03:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:03:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:03:03] [Rank 0] Total Loss: 4.6539 +[2025-09-09 14:03:03] [Rank 0] Total Loss: 4.6539 +[2025-09-09 14:03:03] [Rank 0] Total FTA (Unweighted): 0.9019 +[2025-09-09 14:03:03] [Rank 0] Total FTA (Unweighted): 0.9019 +[2025-09-09 14:03:03] [Rank 0] Total FTA (Weighted): 0.9019 +[2025-09-09 14:03:03] [Rank 0] Total FTA (Weighted): 0.9019 +[2025-09-09 14:03:03] [Rank 0] Group 0 Loss: 4.5410 +[2025-09-09 14:03:03] [Rank 0] Group 0 Loss: 4.5410 +[2025-09-09 14:03:03] [Rank 0] Group 1 Loss: 4.2920 +[2025-09-09 14:03:03] [Rank 0] Group 1 Loss: 4.2920 +[2025-09-09 14:03:03] [Rank 0] Group 2 Loss: 4.0871 +[2025-09-09 14:03:03] [Rank 0] Group 2 Loss: 4.0871 +[2025-09-09 14:03:03] [Rank 0] Group 3 Loss: 4.6080 +[2025-09-09 14:03:03] [Rank 0] Group 3 Loss: 4.6080 +[2025-09-09 14:03:03] [Rank 0] Group 4 Loss: 4.5773 +[2025-09-09 14:03:03] [Rank 0] Group 4 Loss: 4.5773 +[2025-09-09 14:03:03] [Rank 0] Group 5 Loss: 4.5806 +[2025-09-09 14:03:03] [Rank 0] Group 5 Loss: 4.5806 +[2025-09-09 14:03:03] [Rank 0] Group 6 Loss: 4.5018 +[2025-09-09 14:03:03] [Rank 0] Group 6 Loss: 4.5018 +[2025-09-09 14:03:03] [Rank 0] Group 7 Loss: 4.5586 +[2025-09-09 14:03:03] [Rank 0] Group 7 Loss: 4.5586 +[2025-09-09 14:03:03] [Rank 0] Group 8 Loss: 4.7087 +[2025-09-09 14:03:03] [Rank 0] Group 8 Loss: 4.7087 +[2025-09-09 14:03:03] [Rank 0] Group 9 Loss: 4.7135 +[2025-09-09 14:03:03] [Rank 0] Group 9 Loss: 4.7135 +[2025-09-09 14:03:03] [Rank 0] Group 10 Loss: 4.8409 +[2025-09-09 14:03:03] [Rank 0] Group 10 Loss: 4.8409 +[2025-09-09 14:03:03] [Rank 0] Group 11 Loss: 4.8822 +[2025-09-09 14:03:03] [Rank 0] Group 11 Loss: 4.8822 +[2025-09-09 14:03:03] [Rank 0] Group 12 Loss: 4.8409 +[2025-09-09 14:03:03] [Rank 0] Group 12 Loss: 4.8409 +[2025-09-09 14:03:03] [Rank 0] Group 13 Loss: 4.9098 +[2025-09-09 14:03:03] [Rank 0] Group 13 Loss: 4.9098 +[2025-09-09 14:03:03] [Rank 0] Group 14 Loss: 4.8871 +[2025-09-09 14:03:03] [Rank 0] Group 14 Loss: 4.8871 +[2025-09-09 14:03:03] [Rank 0] Group 15 Loss: 4.9323 +[2025-09-09 14:03:03] [Rank 0] Group 15 Loss: 4.9323 +[2025-09-09 14:03:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:03:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:03:04] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 14:03:04] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 14:03:04] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-09 14:03:04] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-09 14:03:04] [Rank 0] Group 12 FTA: 0.9600 +[2025-09-09 14:03:04] [Rank 0] Group 12 FTA: 0.9600 +[2025-09-09 14:03:04] [Rank 0] Group 13 FTA: 0.8500 +[2025-09-09 14:03:04] [Rank 0] Group 13 FTA: 0.8500 +[2025-09-09 14:03:04] [Rank 0] Group 14 FTA: 0.4600 +[2025-09-09 14:03:04] [Rank 0] Group 14 FTA: 0.4600 +[2025-09-09 14:03:04] [Rank 0] Group 15 FTA: 0.1900 +[2025-09-09 14:03:04] [Rank 0] Group 15 FTA: 0.1900 +[2025-09-09 14:03:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:03:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:03:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:03:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:03:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:03:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:03:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:03:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:03:05] [Rank 0] step:3501/10000 train_time:179491ms step_avg:51.27ms +[2025-09-09 14:03:05] [Rank 0] step:3501/10000 train_time:179491ms step_avg:51.27ms +[2025-09-09 14:03:06] [Rank 0] step:3521/10000 train_time:180308ms step_avg:51.21ms +[2025-09-09 14:03:06] [Rank 0] step:3521/10000 train_time:180308ms step_avg:51.21ms +[2025-09-09 14:03:07] [Rank 0] step:3541/10000 train_time:181127ms step_avg:51.15ms +[2025-09-09 14:03:07] [Rank 0] step:3541/10000 train_time:181127ms step_avg:51.15ms +[2025-09-09 14:03:07] [Rank 0] step:3561/10000 train_time:181948ms step_avg:51.09ms +[2025-09-09 14:03:07] [Rank 0] step:3561/10000 train_time:181948ms step_avg:51.09ms +[2025-09-09 14:03:08] [Rank 0] step:3581/10000 train_time:182768ms step_avg:51.04ms +[2025-09-09 14:03:08] [Rank 0] step:3581/10000 train_time:182768ms step_avg:51.04ms +[2025-09-09 14:03:09] [Rank 0] step:3601/10000 train_time:183588ms step_avg:50.98ms +[2025-09-09 14:03:09] [Rank 0] step:3601/10000 train_time:183588ms step_avg:50.98ms +[2025-09-09 14:03:10] [Rank 0] step:3621/10000 train_time:184407ms step_avg:50.93ms +[2025-09-09 14:03:10] [Rank 0] step:3621/10000 train_time:184407ms step_avg:50.93ms +[2025-09-09 14:03:11] [Rank 0] step:3641/10000 train_time:185922ms step_avg:51.06ms +[2025-09-09 14:03:11] [Rank 0] step:3641/10000 train_time:185922ms step_avg:51.06ms +[2025-09-09 14:03:12] [Rank 0] step:3661/10000 train_time:186741ms step_avg:51.01ms +[2025-09-09 14:03:12] [Rank 0] step:3661/10000 train_time:186741ms step_avg:51.01ms +[2025-09-09 14:03:13] [Rank 0] step:3681/10000 train_time:187564ms step_avg:50.95ms +[2025-09-09 14:03:13] [Rank 0] step:3681/10000 train_time:187564ms step_avg:50.95ms +[2025-09-09 14:03:14] [Rank 0] step:3701/10000 train_time:188383ms step_avg:50.90ms +[2025-09-09 14:03:14] [Rank 0] step:3701/10000 train_time:188383ms step_avg:50.90ms +[2025-09-09 14:03:15] [Rank 0] step:3721/10000 train_time:189201ms step_avg:50.85ms +[2025-09-09 14:03:15] [Rank 0] step:3721/10000 train_time:189201ms step_avg:50.85ms +[2025-09-09 14:03:16] [Rank 0] step:3741/10000 train_time:190020ms step_avg:50.79ms +[2025-09-09 14:03:16] [Rank 0] step:3741/10000 train_time:190020ms step_avg:50.79ms +[2025-09-09 14:03:16] [Rank 0] step:3761/10000 train_time:190840ms step_avg:50.74ms +[2025-09-09 14:03:16] [Rank 0] step:3761/10000 train_time:190840ms step_avg:50.74ms +[2025-09-09 14:03:17] [Rank 0] step:3781/10000 train_time:191660ms step_avg:50.69ms +[2025-09-09 14:03:17] [Rank 0] step:3781/10000 train_time:191660ms step_avg:50.69ms +[2025-09-09 14:03:18] [Rank 0] step:3801/10000 train_time:192479ms step_avg:50.64ms +[2025-09-09 14:03:18] [Rank 0] step:3801/10000 train_time:192479ms step_avg:50.64ms +[2025-09-09 14:03:19] [Rank 0] step:3821/10000 train_time:193299ms step_avg:50.59ms +[2025-09-09 14:03:19] [Rank 0] step:3821/10000 train_time:193299ms step_avg:50.59ms +[2025-09-09 14:03:20] [Rank 0] step:3841/10000 train_time:194118ms step_avg:50.54ms +[2025-09-09 14:03:20] [Rank 0] step:3841/10000 train_time:194118ms step_avg:50.54ms +[2025-09-09 14:03:20] [Rank 0] step:3861/10000 train_time:194937ms step_avg:50.49ms +[2025-09-09 14:03:20] [Rank 0] step:3861/10000 train_time:194937ms step_avg:50.49ms +[2025-09-09 14:03:21] [Rank 0] step:3881/10000 train_time:195755ms step_avg:50.44ms +[2025-09-09 14:03:21] [Rank 0] step:3881/10000 train_time:195755ms step_avg:50.44ms +[2025-09-09 14:03:22] [Rank 0] step:3901/10000 train_time:196575ms step_avg:50.39ms +[2025-09-09 14:03:22] [Rank 0] step:3901/10000 train_time:196575ms step_avg:50.39ms +[2025-09-09 14:03:23] [Rank 0] step:3921/10000 train_time:197394ms step_avg:50.34ms +[2025-09-09 14:03:23] [Rank 0] step:3921/10000 train_time:197394ms step_avg:50.34ms +[2025-09-09 14:03:24] [Rank 0] step:3941/10000 train_time:198212ms step_avg:50.29ms +[2025-09-09 14:03:24] [Rank 0] step:3941/10000 train_time:198212ms step_avg:50.29ms +[2025-09-09 14:03:25] [Rank 0] step:3961/10000 train_time:199031ms step_avg:50.25ms +[2025-09-09 14:03:25] [Rank 0] step:3961/10000 train_time:199031ms step_avg:50.25ms +[2025-09-09 14:03:25] [Rank 0] step:3981/10000 train_time:199850ms step_avg:50.20ms +[2025-09-09 14:03:25] [Rank 0] step:3981/10000 train_time:199850ms step_avg:50.20ms +[2025-09-09 14:03:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:03:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:03:27] [Rank 0] PRINT: step:4000/10000 train_loss:0.6625 val_loss:0.6443 train_time:200672ms step_avg:50.17ms +[2025-09-09 14:03:27] [Rank 0] PRINT: step:4000/10000 train_loss:0.6625 val_loss:0.6443 train_time:200672ms step_avg:50.17ms +[2025-09-09 14:03:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:03:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:03:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:03:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:04:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:04:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:04:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:04:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:04:48] [Rank 0] Total Loss: 4.6880 +[2025-09-09 14:04:48] [Rank 0] Total Loss: 4.6880 +[2025-09-09 14:04:48] [Rank 0] Total FTA (Unweighted): 0.9281 +[2025-09-09 14:04:48] [Rank 0] Total FTA (Unweighted): 0.9281 +[2025-09-09 14:04:48] [Rank 0] Total FTA (Weighted): 0.9281 +[2025-09-09 14:04:48] [Rank 0] Total FTA (Weighted): 0.9281 +[2025-09-09 14:04:48] [Rank 0] Group 0 Loss: 4.6414 +[2025-09-09 14:04:48] [Rank 0] Group 0 Loss: 4.6414 +[2025-09-09 14:04:48] [Rank 0] Group 1 Loss: 4.2856 +[2025-09-09 14:04:48] [Rank 0] Group 1 Loss: 4.2856 +[2025-09-09 14:04:48] [Rank 0] Group 2 Loss: 4.1515 +[2025-09-09 14:04:48] [Rank 0] Group 2 Loss: 4.1515 +[2025-09-09 14:04:48] [Rank 0] Group 3 Loss: 4.6605 +[2025-09-09 14:04:48] [Rank 0] Group 3 Loss: 4.6605 +[2025-09-09 14:04:48] [Rank 0] Group 4 Loss: 4.5832 +[2025-09-09 14:04:48] [Rank 0] Group 4 Loss: 4.5832 +[2025-09-09 14:04:48] [Rank 0] Group 5 Loss: 4.6021 +[2025-09-09 14:04:48] [Rank 0] Group 5 Loss: 4.6021 +[2025-09-09 14:04:48] [Rank 0] Group 6 Loss: 4.5468 +[2025-09-09 14:04:48] [Rank 0] Group 6 Loss: 4.5468 +[2025-09-09 14:04:48] [Rank 0] Group 7 Loss: 4.5935 +[2025-09-09 14:04:48] [Rank 0] Group 7 Loss: 4.5935 +[2025-09-09 14:04:48] [Rank 0] Group 8 Loss: 4.7588 +[2025-09-09 14:04:48] [Rank 0] Group 8 Loss: 4.7588 +[2025-09-09 14:04:48] [Rank 0] Group 9 Loss: 4.7550 +[2025-09-09 14:04:48] [Rank 0] Group 9 Loss: 4.7550 +[2025-09-09 14:04:48] [Rank 0] Group 10 Loss: 4.8737 +[2025-09-09 14:04:48] [Rank 0] Group 10 Loss: 4.8737 +[2025-09-09 14:04:48] [Rank 0] Group 11 Loss: 4.8708 +[2025-09-09 14:04:48] [Rank 0] Group 11 Loss: 4.8708 +[2025-09-09 14:04:48] [Rank 0] Group 12 Loss: 4.8479 +[2025-09-09 14:04:48] [Rank 0] Group 12 Loss: 4.8479 +[2025-09-09 14:04:48] [Rank 0] Group 13 Loss: 4.9447 +[2025-09-09 14:04:48] [Rank 0] Group 13 Loss: 4.9447 +[2025-09-09 14:04:48] [Rank 0] Group 14 Loss: 4.9150 +[2025-09-09 14:04:48] [Rank 0] Group 14 Loss: 4.9150 +[2025-09-09 14:04:48] [Rank 0] Group 15 Loss: 4.9768 +[2025-09-09 14:04:48] [Rank 0] Group 15 Loss: 4.9768 +[2025-09-09 14:04:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:04:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:04:49] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 14:04:49] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 14:04:49] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:04:49] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:04:49] [Rank 0] Group 13 FTA: 0.8900 +[2025-09-09 14:04:49] [Rank 0] Group 13 FTA: 0.8900 +[2025-09-09 14:04:49] [Rank 0] Group 14 FTA: 0.5900 +[2025-09-09 14:04:49] [Rank 0] Group 14 FTA: 0.5900 +[2025-09-09 14:04:49] [Rank 0] Group 15 FTA: 0.3900 +[2025-09-09 14:04:49] [Rank 0] Group 15 FTA: 0.3900 +[2025-09-09 14:04:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:04:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:04:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:04:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:04:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:04:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:04:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:04:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:04:50] [Rank 0] step:4001/10000 train_time:200689ms step_avg:50.16ms +[2025-09-09 14:04:50] [Rank 0] step:4001/10000 train_time:200689ms step_avg:50.16ms +[2025-09-09 14:04:51] [Rank 0] step:4021/10000 train_time:201681ms step_avg:50.16ms +[2025-09-09 14:04:51] [Rank 0] step:4021/10000 train_time:201681ms step_avg:50.16ms +[2025-09-09 14:04:52] [Rank 0] step:4041/10000 train_time:202499ms step_avg:50.11ms +[2025-09-09 14:04:52] [Rank 0] step:4041/10000 train_time:202499ms step_avg:50.11ms +[2025-09-09 14:04:53] [Rank 0] step:4061/10000 train_time:203320ms step_avg:50.07ms +[2025-09-09 14:04:53] [Rank 0] step:4061/10000 train_time:203320ms step_avg:50.07ms +[2025-09-09 14:04:54] [Rank 0] step:4081/10000 train_time:204141ms step_avg:50.02ms +[2025-09-09 14:04:54] [Rank 0] step:4081/10000 train_time:204141ms step_avg:50.02ms +[2025-09-09 14:04:54] [Rank 0] step:4101/10000 train_time:204962ms step_avg:49.98ms +[2025-09-09 14:04:54] [Rank 0] step:4101/10000 train_time:204962ms step_avg:49.98ms +[2025-09-09 14:04:55] [Rank 0] step:4121/10000 train_time:205780ms step_avg:49.93ms +[2025-09-09 14:04:55] [Rank 0] step:4121/10000 train_time:205780ms step_avg:49.93ms +[2025-09-09 14:04:56] [Rank 0] step:4141/10000 train_time:206600ms step_avg:49.89ms +[2025-09-09 14:04:56] [Rank 0] step:4141/10000 train_time:206600ms step_avg:49.89ms +[2025-09-09 14:04:57] [Rank 0] step:4161/10000 train_time:207420ms step_avg:49.85ms +[2025-09-09 14:04:57] [Rank 0] step:4161/10000 train_time:207420ms step_avg:49.85ms +[2025-09-09 14:04:58] [Rank 0] step:4181/10000 train_time:208240ms step_avg:49.81ms +[2025-09-09 14:04:58] [Rank 0] step:4181/10000 train_time:208240ms step_avg:49.81ms +[2025-09-09 14:04:58] [Rank 0] step:4201/10000 train_time:209060ms step_avg:49.76ms +[2025-09-09 14:04:58] [Rank 0] step:4201/10000 train_time:209060ms step_avg:49.76ms +[2025-09-09 14:04:59] [Rank 0] step:4221/10000 train_time:209879ms step_avg:49.72ms +[2025-09-09 14:04:59] [Rank 0] step:4221/10000 train_time:209879ms step_avg:49.72ms +[2025-09-09 14:05:00] [Rank 0] step:4241/10000 train_time:210698ms step_avg:49.68ms +[2025-09-09 14:05:00] [Rank 0] step:4241/10000 train_time:210698ms step_avg:49.68ms +[2025-09-09 14:05:01] [Rank 0] step:4261/10000 train_time:211595ms step_avg:49.66ms +[2025-09-09 14:05:01] [Rank 0] step:4261/10000 train_time:211595ms step_avg:49.66ms +[2025-09-09 14:05:02] [Rank 0] step:4281/10000 train_time:212434ms step_avg:49.62ms +[2025-09-09 14:05:02] [Rank 0] step:4281/10000 train_time:212434ms step_avg:49.62ms +[2025-09-09 14:05:03] [Rank 0] step:4301/10000 train_time:213254ms step_avg:49.58ms +[2025-09-09 14:05:03] [Rank 0] step:4301/10000 train_time:213254ms step_avg:49.58ms +[2025-09-09 14:05:03] [Rank 0] step:4321/10000 train_time:214074ms step_avg:49.54ms +[2025-09-09 14:05:03] [Rank 0] step:4321/10000 train_time:214074ms step_avg:49.54ms +[2025-09-09 14:05:04] [Rank 0] step:4341/10000 train_time:214895ms step_avg:49.50ms +[2025-09-09 14:05:04] [Rank 0] step:4341/10000 train_time:214895ms step_avg:49.50ms +[2025-09-09 14:05:05] [Rank 0] step:4361/10000 train_time:215714ms step_avg:49.46ms +[2025-09-09 14:05:05] [Rank 0] step:4361/10000 train_time:215714ms step_avg:49.46ms +[2025-09-09 14:05:06] [Rank 0] step:4381/10000 train_time:216534ms step_avg:49.43ms +[2025-09-09 14:05:06] [Rank 0] step:4381/10000 train_time:216534ms step_avg:49.43ms +[2025-09-09 14:05:07] [Rank 0] step:4401/10000 train_time:217354ms step_avg:49.39ms +[2025-09-09 14:05:07] [Rank 0] step:4401/10000 train_time:217354ms step_avg:49.39ms +[2025-09-09 14:05:08] [Rank 0] step:4421/10000 train_time:218174ms step_avg:49.35ms +[2025-09-09 14:05:08] [Rank 0] step:4421/10000 train_time:218174ms step_avg:49.35ms +[2025-09-09 14:05:08] [Rank 0] step:4441/10000 train_time:218994ms step_avg:49.31ms +[2025-09-09 14:05:08] [Rank 0] step:4441/10000 train_time:218994ms step_avg:49.31ms +[2025-09-09 14:05:09] [Rank 0] step:4461/10000 train_time:219815ms step_avg:49.27ms +[2025-09-09 14:05:09] [Rank 0] step:4461/10000 train_time:219815ms step_avg:49.27ms +[2025-09-09 14:05:10] [Rank 0] step:4481/10000 train_time:220637ms step_avg:49.24ms +[2025-09-09 14:05:10] [Rank 0] step:4481/10000 train_time:220637ms step_avg:49.24ms +[2025-09-09 14:05:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:05:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:05:12] [Rank 0] PRINT: step:4500/10000 train_loss:0.6531 val_loss:0.6362 train_time:221458ms step_avg:49.21ms +[2025-09-09 14:05:12] [Rank 0] PRINT: step:4500/10000 train_loss:0.6531 val_loss:0.6362 train_time:221458ms step_avg:49.21ms +[2025-09-09 14:05:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:05:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:05:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:05:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:06:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:06:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:06:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:06:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:06:33] [Rank 0] Total Loss: 4.7212 +[2025-09-09 14:06:33] [Rank 0] Total Loss: 4.7212 +[2025-09-09 14:06:33] [Rank 0] Total FTA (Unweighted): 0.9400 +[2025-09-09 14:06:33] [Rank 0] Total FTA (Unweighted): 0.9400 +[2025-09-09 14:06:33] [Rank 0] Total FTA (Weighted): 0.9400 +[2025-09-09 14:06:33] [Rank 0] Total FTA (Weighted): 0.9400 +[2025-09-09 14:06:33] [Rank 0] Group 0 Loss: 4.6655 +[2025-09-09 14:06:33] [Rank 0] Group 0 Loss: 4.6655 +[2025-09-09 14:06:33] [Rank 0] Group 1 Loss: 4.3092 +[2025-09-09 14:06:33] [Rank 0] Group 1 Loss: 4.3092 +[2025-09-09 14:06:33] [Rank 0] Group 2 Loss: 4.1942 +[2025-09-09 14:06:33] [Rank 0] Group 2 Loss: 4.1942 +[2025-09-09 14:06:33] [Rank 0] Group 3 Loss: 4.6726 +[2025-09-09 14:06:33] [Rank 0] Group 3 Loss: 4.6726 +[2025-09-09 14:06:33] [Rank 0] Group 4 Loss: 4.6209 +[2025-09-09 14:06:33] [Rank 0] Group 4 Loss: 4.6209 +[2025-09-09 14:06:33] [Rank 0] Group 5 Loss: 4.6454 +[2025-09-09 14:06:33] [Rank 0] Group 5 Loss: 4.6454 +[2025-09-09 14:06:33] [Rank 0] Group 6 Loss: 4.5725 +[2025-09-09 14:06:33] [Rank 0] Group 6 Loss: 4.5725 +[2025-09-09 14:06:33] [Rank 0] Group 7 Loss: 4.6189 +[2025-09-09 14:06:33] [Rank 0] Group 7 Loss: 4.6189 +[2025-09-09 14:06:33] [Rank 0] Group 8 Loss: 4.7963 +[2025-09-09 14:06:33] [Rank 0] Group 8 Loss: 4.7963 +[2025-09-09 14:06:33] [Rank 0] Group 9 Loss: 4.7830 +[2025-09-09 14:06:33] [Rank 0] Group 9 Loss: 4.7830 +[2025-09-09 14:06:33] [Rank 0] Group 10 Loss: 4.9095 +[2025-09-09 14:06:33] [Rank 0] Group 10 Loss: 4.9095 +[2025-09-09 14:06:33] [Rank 0] Group 11 Loss: 4.9366 +[2025-09-09 14:06:33] [Rank 0] Group 11 Loss: 4.9366 +[2025-09-09 14:06:33] [Rank 0] Group 12 Loss: 4.8983 +[2025-09-09 14:06:33] [Rank 0] Group 12 Loss: 4.8983 +[2025-09-09 14:06:33] [Rank 0] Group 13 Loss: 4.9513 +[2025-09-09 14:06:33] [Rank 0] Group 13 Loss: 4.9513 +[2025-09-09 14:06:33] [Rank 0] Group 14 Loss: 4.9700 +[2025-09-09 14:06:33] [Rank 0] Group 14 Loss: 4.9700 +[2025-09-09 14:06:33] [Rank 0] Group 15 Loss: 4.9955 +[2025-09-09 14:06:33] [Rank 0] Group 15 Loss: 4.9955 +[2025-09-09 14:06:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:06:33] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:06:33] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:06:33] [Rank 0] Group 13 FTA: 0.9600 +[2025-09-09 14:06:33] [Rank 0] Group 13 FTA: 0.9600 +[2025-09-09 14:06:33] [Rank 0] Group 14 FTA: 0.6700 +[2025-09-09 14:06:33] [Rank 0] Group 14 FTA: 0.6700 +[2025-09-09 14:06:33] [Rank 0] Group 15 FTA: 0.4200 +[2025-09-09 14:06:33] [Rank 0] Group 15 FTA: 0.4200 +[2025-09-09 14:06:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:06:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:06:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:06:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:06:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:06:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:06:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:06:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:06:34] [Rank 0] step:4501/10000 train_time:221475ms step_avg:49.21ms +[2025-09-09 14:06:34] [Rank 0] step:4501/10000 train_time:221475ms step_avg:49.21ms +[2025-09-09 14:06:35] [Rank 0] step:4521/10000 train_time:222301ms step_avg:49.17ms +[2025-09-09 14:06:35] [Rank 0] step:4521/10000 train_time:222301ms step_avg:49.17ms +[2025-09-09 14:06:36] [Rank 0] step:4541/10000 train_time:223122ms step_avg:49.14ms +[2025-09-09 14:06:36] [Rank 0] step:4541/10000 train_time:223122ms step_avg:49.14ms +[2025-09-09 14:06:37] [Rank 0] step:4561/10000 train_time:223943ms step_avg:49.10ms +[2025-09-09 14:06:37] [Rank 0] step:4561/10000 train_time:223943ms step_avg:49.10ms +[2025-09-09 14:06:37] [Rank 0] step:4581/10000 train_time:224763ms step_avg:49.06ms +[2025-09-09 14:06:37] [Rank 0] step:4581/10000 train_time:224763ms step_avg:49.06ms +[2025-09-09 14:06:38] [Rank 0] step:4601/10000 train_time:225584ms step_avg:49.03ms +[2025-09-09 14:06:38] [Rank 0] step:4601/10000 train_time:225584ms step_avg:49.03ms +[2025-09-09 14:06:39] [Rank 0] step:4621/10000 train_time:226510ms step_avg:49.02ms +[2025-09-09 14:06:39] [Rank 0] step:4621/10000 train_time:226510ms step_avg:49.02ms +[2025-09-09 14:06:40] [Rank 0] step:4641/10000 train_time:227338ms step_avg:48.98ms +[2025-09-09 14:06:40] [Rank 0] step:4641/10000 train_time:227338ms step_avg:48.98ms +[2025-09-09 14:06:41] [Rank 0] step:4661/10000 train_time:228158ms step_avg:48.95ms +[2025-09-09 14:06:41] [Rank 0] step:4661/10000 train_time:228158ms step_avg:48.95ms +[2025-09-09 14:06:42] [Rank 0] step:4681/10000 train_time:228977ms step_avg:48.92ms +[2025-09-09 14:06:42] [Rank 0] step:4681/10000 train_time:228977ms step_avg:48.92ms +[2025-09-09 14:06:42] [Rank 0] step:4701/10000 train_time:229798ms step_avg:48.88ms +[2025-09-09 14:06:42] [Rank 0] step:4701/10000 train_time:229798ms step_avg:48.88ms +[2025-09-09 14:06:43] [Rank 0] step:4721/10000 train_time:230618ms step_avg:48.85ms +[2025-09-09 14:06:43] [Rank 0] step:4721/10000 train_time:230618ms step_avg:48.85ms +[2025-09-09 14:06:44] [Rank 0] step:4741/10000 train_time:231440ms step_avg:48.82ms +[2025-09-09 14:06:44] [Rank 0] step:4741/10000 train_time:231440ms step_avg:48.82ms +[2025-09-09 14:06:45] [Rank 0] step:4761/10000 train_time:232259ms step_avg:48.78ms +[2025-09-09 14:06:45] [Rank 0] step:4761/10000 train_time:232259ms step_avg:48.78ms +[2025-09-09 14:06:46] [Rank 0] step:4781/10000 train_time:233078ms step_avg:48.75ms +[2025-09-09 14:06:46] [Rank 0] step:4781/10000 train_time:233078ms step_avg:48.75ms +[2025-09-09 14:06:47] [Rank 0] step:4801/10000 train_time:233899ms step_avg:48.72ms +[2025-09-09 14:06:47] [Rank 0] step:4801/10000 train_time:233899ms step_avg:48.72ms +[2025-09-09 14:06:47] [Rank 0] step:4821/10000 train_time:234721ms step_avg:48.69ms +[2025-09-09 14:06:47] [Rank 0] step:4821/10000 train_time:234721ms step_avg:48.69ms +[2025-09-09 14:06:49] [Rank 0] step:4841/10000 train_time:235854ms step_avg:48.72ms +[2025-09-09 14:06:49] [Rank 0] step:4841/10000 train_time:235854ms step_avg:48.72ms +[2025-09-09 14:06:49] [Rank 0] step:4861/10000 train_time:236676ms step_avg:48.69ms +[2025-09-09 14:06:49] [Rank 0] step:4861/10000 train_time:236676ms step_avg:48.69ms +[2025-09-09 14:06:50] [Rank 0] step:4881/10000 train_time:237661ms step_avg:48.69ms +[2025-09-09 14:06:50] [Rank 0] step:4881/10000 train_time:237661ms step_avg:48.69ms +[2025-09-09 14:06:51] [Rank 0] step:4901/10000 train_time:238788ms step_avg:48.72ms +[2025-09-09 14:06:51] [Rank 0] step:4901/10000 train_time:238788ms step_avg:48.72ms +[2025-09-09 14:06:52] [Rank 0] step:4921/10000 train_time:239608ms step_avg:48.69ms +[2025-09-09 14:06:52] [Rank 0] step:4921/10000 train_time:239608ms step_avg:48.69ms +[2025-09-09 14:06:53] [Rank 0] step:4941/10000 train_time:240429ms step_avg:48.66ms +[2025-09-09 14:06:53] [Rank 0] step:4941/10000 train_time:240429ms step_avg:48.66ms +[2025-09-09 14:06:54] [Rank 0] step:4961/10000 train_time:241250ms step_avg:48.63ms +[2025-09-09 14:06:54] [Rank 0] step:4961/10000 train_time:241250ms step_avg:48.63ms +[2025-09-09 14:06:55] [Rank 0] step:4981/10000 train_time:242070ms step_avg:48.60ms +[2025-09-09 14:06:55] [Rank 0] step:4981/10000 train_time:242070ms step_avg:48.60ms +[2025-09-09 14:06:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:06:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:06:56] [Rank 0] PRINT: step:5000/10000 train_loss:0.6447 val_loss:0.6298 train_time:242894ms step_avg:48.58ms +[2025-09-09 14:06:56] [Rank 0] PRINT: step:5000/10000 train_loss:0.6447 val_loss:0.6298 train_time:242894ms step_avg:48.58ms +[2025-09-09 14:06:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:06:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:06:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:06:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:08:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:08:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:08:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:08:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:08:17] [Rank 0] Total Loss: 4.8359 +[2025-09-09 14:08:17] [Rank 0] Total Loss: 4.8359 +[2025-09-09 14:08:17] [Rank 0] Total FTA (Unweighted): 0.9494 +[2025-09-09 14:08:17] [Rank 0] Total FTA (Unweighted): 0.9494 +[2025-09-09 14:08:18] [Rank 0] Total FTA (Weighted): 0.9494 +[2025-09-09 14:08:18] [Rank 0] Total FTA (Weighted): 0.9494 +[2025-09-09 14:08:18] [Rank 0] Group 0 Loss: 4.7726 +[2025-09-09 14:08:18] [Rank 0] Group 0 Loss: 4.7726 +[2025-09-09 14:08:18] [Rank 0] Group 1 Loss: 4.5512 +[2025-09-09 14:08:18] [Rank 0] Group 1 Loss: 4.5512 +[2025-09-09 14:08:18] [Rank 0] Group 2 Loss: 4.2820 +[2025-09-09 14:08:18] [Rank 0] Group 2 Loss: 4.2820 +[2025-09-09 14:08:18] [Rank 0] Group 3 Loss: 4.7946 +[2025-09-09 14:08:18] [Rank 0] Group 3 Loss: 4.7946 +[2025-09-09 14:08:18] [Rank 0] Group 4 Loss: 4.7113 +[2025-09-09 14:08:18] [Rank 0] Group 4 Loss: 4.7113 +[2025-09-09 14:08:18] [Rank 0] Group 5 Loss: 4.7201 +[2025-09-09 14:08:18] [Rank 0] Group 5 Loss: 4.7201 +[2025-09-09 14:08:18] [Rank 0] Group 6 Loss: 4.6654 +[2025-09-09 14:08:18] [Rank 0] Group 6 Loss: 4.6654 +[2025-09-09 14:08:18] [Rank 0] Group 7 Loss: 4.7504 +[2025-09-09 14:08:18] [Rank 0] Group 7 Loss: 4.7504 +[2025-09-09 14:08:18] [Rank 0] Group 8 Loss: 4.9013 +[2025-09-09 14:08:18] [Rank 0] Group 8 Loss: 4.9013 +[2025-09-09 14:08:18] [Rank 0] Group 9 Loss: 4.9095 +[2025-09-09 14:08:18] [Rank 0] Group 9 Loss: 4.9095 +[2025-09-09 14:08:18] [Rank 0] Group 10 Loss: 4.9870 +[2025-09-09 14:08:18] [Rank 0] Group 10 Loss: 4.9870 +[2025-09-09 14:08:18] [Rank 0] Group 11 Loss: 5.0324 +[2025-09-09 14:08:18] [Rank 0] Group 11 Loss: 5.0324 +[2025-09-09 14:08:18] [Rank 0] Group 12 Loss: 5.0311 +[2025-09-09 14:08:18] [Rank 0] Group 12 Loss: 5.0311 +[2025-09-09 14:08:18] [Rank 0] Group 13 Loss: 5.0947 +[2025-09-09 14:08:18] [Rank 0] Group 13 Loss: 5.0947 +[2025-09-09 14:08:18] [Rank 0] Group 14 Loss: 5.0786 +[2025-09-09 14:08:18] [Rank 0] Group 14 Loss: 5.0786 +[2025-09-09 14:08:18] [Rank 0] Group 15 Loss: 5.0919 +[2025-09-09 14:08:18] [Rank 0] Group 15 Loss: 5.0919 +[2025-09-09 14:08:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 14:08:18] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 14:08:18] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:08:18] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:08:18] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:08:18] [Rank 0] Group 13 FTA: 0.9600 +[2025-09-09 14:08:18] [Rank 0] Group 13 FTA: 0.9600 +[2025-09-09 14:08:18] [Rank 0] Group 14 FTA: 0.7700 +[2025-09-09 14:08:18] [Rank 0] Group 14 FTA: 0.7700 +[2025-09-09 14:08:18] [Rank 0] Group 15 FTA: 0.4800 +[2025-09-09 14:08:18] [Rank 0] Group 15 FTA: 0.4800 +[2025-09-09 14:08:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:08:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:08:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:08:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:08:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:08:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:08:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:08:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:08:19] [Rank 0] step:5001/10000 train_time:242912ms step_avg:48.57ms +[2025-09-09 14:08:19] [Rank 0] step:5001/10000 train_time:242912ms step_avg:48.57ms +[2025-09-09 14:08:20] [Rank 0] step:5021/10000 train_time:243751ms step_avg:48.55ms +[2025-09-09 14:08:20] [Rank 0] step:5021/10000 train_time:243751ms step_avg:48.55ms +[2025-09-09 14:08:21] [Rank 0] step:5041/10000 train_time:244572ms step_avg:48.52ms +[2025-09-09 14:08:21] [Rank 0] step:5041/10000 train_time:244572ms step_avg:48.52ms +[2025-09-09 14:08:21] [Rank 0] step:5061/10000 train_time:245393ms step_avg:48.49ms +[2025-09-09 14:08:21] [Rank 0] step:5061/10000 train_time:245393ms step_avg:48.49ms +[2025-09-09 14:08:22] [Rank 0] step:5081/10000 train_time:246214ms step_avg:48.46ms +[2025-09-09 14:08:22] [Rank 0] step:5081/10000 train_time:246214ms step_avg:48.46ms +[2025-09-09 14:08:23] [Rank 0] step:5101/10000 train_time:247035ms step_avg:48.43ms +[2025-09-09 14:08:23] [Rank 0] step:5101/10000 train_time:247035ms step_avg:48.43ms +[2025-09-09 14:08:24] [Rank 0] step:5121/10000 train_time:247855ms step_avg:48.40ms +[2025-09-09 14:08:24] [Rank 0] step:5121/10000 train_time:247855ms step_avg:48.40ms +[2025-09-09 14:08:25] [Rank 0] step:5141/10000 train_time:248679ms step_avg:48.37ms +[2025-09-09 14:08:25] [Rank 0] step:5141/10000 train_time:248679ms step_avg:48.37ms +[2025-09-09 14:08:26] [Rank 0] step:5161/10000 train_time:249497ms step_avg:48.34ms +[2025-09-09 14:08:26] [Rank 0] step:5161/10000 train_time:249497ms step_avg:48.34ms +[2025-09-09 14:08:26] [Rank 0] step:5181/10000 train_time:250317ms step_avg:48.31ms +[2025-09-09 14:08:26] [Rank 0] step:5181/10000 train_time:250317ms step_avg:48.31ms +[2025-09-09 14:08:27] [Rank 0] step:5201/10000 train_time:251135ms step_avg:48.29ms +[2025-09-09 14:08:27] [Rank 0] step:5201/10000 train_time:251135ms step_avg:48.29ms +[2025-09-09 14:08:28] [Rank 0] step:5221/10000 train_time:251954ms step_avg:48.26ms +[2025-09-09 14:08:28] [Rank 0] step:5221/10000 train_time:251954ms step_avg:48.26ms +[2025-09-09 14:08:29] [Rank 0] step:5241/10000 train_time:252775ms step_avg:48.23ms +[2025-09-09 14:08:29] [Rank 0] step:5241/10000 train_time:252775ms step_avg:48.23ms +[2025-09-09 14:08:30] [Rank 0] step:5261/10000 train_time:253595ms step_avg:48.20ms +[2025-09-09 14:08:30] [Rank 0] step:5261/10000 train_time:253595ms step_avg:48.20ms +[2025-09-09 14:08:31] [Rank 0] step:5281/10000 train_time:254415ms step_avg:48.18ms +[2025-09-09 14:08:31] [Rank 0] step:5281/10000 train_time:254415ms step_avg:48.18ms +[2025-09-09 14:08:31] [Rank 0] step:5301/10000 train_time:255235ms step_avg:48.15ms +[2025-09-09 14:08:31] [Rank 0] step:5301/10000 train_time:255235ms step_avg:48.15ms +[2025-09-09 14:08:32] [Rank 0] step:5321/10000 train_time:256056ms step_avg:48.12ms +[2025-09-09 14:08:32] [Rank 0] step:5321/10000 train_time:256056ms step_avg:48.12ms +[2025-09-09 14:08:33] [Rank 0] step:5341/10000 train_time:256876ms step_avg:48.10ms +[2025-09-09 14:08:33] [Rank 0] step:5341/10000 train_time:256876ms step_avg:48.10ms +[2025-09-09 14:08:34] [Rank 0] step:5361/10000 train_time:257697ms step_avg:48.07ms +[2025-09-09 14:08:34] [Rank 0] step:5361/10000 train_time:257697ms step_avg:48.07ms +[2025-09-09 14:08:35] [Rank 0] step:5381/10000 train_time:258517ms step_avg:48.04ms +[2025-09-09 14:08:35] [Rank 0] step:5381/10000 train_time:258517ms step_avg:48.04ms +[2025-09-09 14:08:35] [Rank 0] step:5401/10000 train_time:259338ms step_avg:48.02ms +[2025-09-09 14:08:35] [Rank 0] step:5401/10000 train_time:259338ms step_avg:48.02ms +[2025-09-09 14:08:36] [Rank 0] step:5421/10000 train_time:260159ms step_avg:47.99ms +[2025-09-09 14:08:36] [Rank 0] step:5421/10000 train_time:260159ms step_avg:47.99ms +[2025-09-09 14:08:37] [Rank 0] step:5441/10000 train_time:260980ms step_avg:47.97ms +[2025-09-09 14:08:37] [Rank 0] step:5441/10000 train_time:260980ms step_avg:47.97ms +[2025-09-09 14:08:38] [Rank 0] step:5461/10000 train_time:261801ms step_avg:47.94ms +[2025-09-09 14:08:38] [Rank 0] step:5461/10000 train_time:261801ms step_avg:47.94ms +[2025-09-09 14:08:39] [Rank 0] step:5481/10000 train_time:262622ms step_avg:47.91ms +[2025-09-09 14:08:39] [Rank 0] step:5481/10000 train_time:262622ms step_avg:47.91ms +[2025-09-09 14:08:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:08:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:08:40] [Rank 0] PRINT: step:5500/10000 train_loss:0.6375 val_loss:0.6241 train_time:263445ms step_avg:47.90ms +[2025-09-09 14:08:40] [Rank 0] PRINT: step:5500/10000 train_loss:0.6375 val_loss:0.6241 train_time:263445ms step_avg:47.90ms +[2025-09-09 14:08:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:08:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:08:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:08:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:10:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:10:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:10:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:10:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:10:01] [Rank 0] Total Loss: 4.8014 +[2025-09-09 14:10:01] [Rank 0] Total Loss: 4.8014 +[2025-09-09 14:10:01] [Rank 0] Total FTA (Unweighted): 0.9600 +[2025-09-09 14:10:01] [Rank 0] Total FTA (Unweighted): 0.9600 +[2025-09-09 14:10:01] [Rank 0] Total FTA (Weighted): 0.9600 +[2025-09-09 14:10:01] [Rank 0] Total FTA (Weighted): 0.9600 +[2025-09-09 14:10:01] [Rank 0] Group 0 Loss: 4.6681 +[2025-09-09 14:10:01] [Rank 0] Group 0 Loss: 4.6681 +[2025-09-09 14:10:01] [Rank 0] Group 1 Loss: 4.4882 +[2025-09-09 14:10:01] [Rank 0] Group 1 Loss: 4.4882 +[2025-09-09 14:10:01] [Rank 0] Group 2 Loss: 4.2175 +[2025-09-09 14:10:01] [Rank 0] Group 2 Loss: 4.2175 +[2025-09-09 14:10:01] [Rank 0] Group 3 Loss: 4.7597 +[2025-09-09 14:10:01] [Rank 0] Group 3 Loss: 4.7597 +[2025-09-09 14:10:01] [Rank 0] Group 4 Loss: 4.6894 +[2025-09-09 14:10:01] [Rank 0] Group 4 Loss: 4.6894 +[2025-09-09 14:10:01] [Rank 0] Group 5 Loss: 4.7257 +[2025-09-09 14:10:01] [Rank 0] Group 5 Loss: 4.7257 +[2025-09-09 14:10:01] [Rank 0] Group 6 Loss: 4.6800 +[2025-09-09 14:10:01] [Rank 0] Group 6 Loss: 4.6800 +[2025-09-09 14:10:01] [Rank 0] Group 7 Loss: 4.7237 +[2025-09-09 14:10:01] [Rank 0] Group 7 Loss: 4.7237 +[2025-09-09 14:10:01] [Rank 0] Group 8 Loss: 4.8579 +[2025-09-09 14:10:01] [Rank 0] Group 8 Loss: 4.8579 +[2025-09-09 14:10:01] [Rank 0] Group 9 Loss: 4.8756 +[2025-09-09 14:10:01] [Rank 0] Group 9 Loss: 4.8756 +[2025-09-09 14:10:01] [Rank 0] Group 10 Loss: 5.0071 +[2025-09-09 14:10:01] [Rank 0] Group 10 Loss: 5.0071 +[2025-09-09 14:10:01] [Rank 0] Group 11 Loss: 5.0254 +[2025-09-09 14:10:01] [Rank 0] Group 11 Loss: 5.0254 +[2025-09-09 14:10:01] [Rank 0] Group 12 Loss: 4.9945 +[2025-09-09 14:10:01] [Rank 0] Group 12 Loss: 4.9945 +[2025-09-09 14:10:01] [Rank 0] Group 13 Loss: 5.0381 +[2025-09-09 14:10:01] [Rank 0] Group 13 Loss: 5.0381 +[2025-09-09 14:10:01] [Rank 0] Group 14 Loss: 5.0355 +[2025-09-09 14:10:01] [Rank 0] Group 14 Loss: 5.0355 +[2025-09-09 14:10:01] [Rank 0] Group 15 Loss: 5.0360 +[2025-09-09 14:10:01] [Rank 0] Group 15 Loss: 5.0360 +[2025-09-09 14:10:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 14:10:01] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 14:10:01] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:10:01] [Rank 0] Group 14 FTA: 0.9000 +[2025-09-09 14:10:01] [Rank 0] Group 14 FTA: 0.9000 +[2025-09-09 14:10:01] [Rank 0] Group 15 FTA: 0.4800 +[2025-09-09 14:10:01] [Rank 0] Group 15 FTA: 0.4800 +[2025-09-09 14:10:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:10:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:10:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:10:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:10:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:10:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:10:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:10:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:10:02] [Rank 0] step:5501/10000 train_time:263463ms step_avg:47.89ms +[2025-09-09 14:10:02] [Rank 0] step:5501/10000 train_time:263463ms step_avg:47.89ms +[2025-09-09 14:10:03] [Rank 0] step:5521/10000 train_time:264287ms step_avg:47.87ms +[2025-09-09 14:10:03] [Rank 0] step:5521/10000 train_time:264287ms step_avg:47.87ms +[2025-09-09 14:10:04] [Rank 0] step:5541/10000 train_time:265107ms step_avg:47.84ms +[2025-09-09 14:10:04] [Rank 0] step:5541/10000 train_time:265107ms step_avg:47.84ms +[2025-09-09 14:10:05] [Rank 0] step:5561/10000 train_time:265928ms step_avg:47.82ms +[2025-09-09 14:10:05] [Rank 0] step:5561/10000 train_time:265928ms step_avg:47.82ms +[2025-09-09 14:10:06] [Rank 0] step:5581/10000 train_time:266748ms step_avg:47.80ms +[2025-09-09 14:10:06] [Rank 0] step:5581/10000 train_time:266748ms step_avg:47.80ms +[2025-09-09 14:10:06] [Rank 0] step:5601/10000 train_time:267569ms step_avg:47.77ms +[2025-09-09 14:10:06] [Rank 0] step:5601/10000 train_time:267569ms step_avg:47.77ms +[2025-09-09 14:10:07] [Rank 0] step:5621/10000 train_time:268389ms step_avg:47.75ms +[2025-09-09 14:10:07] [Rank 0] step:5621/10000 train_time:268389ms step_avg:47.75ms +[2025-09-09 14:10:08] [Rank 0] step:5641/10000 train_time:269479ms step_avg:47.77ms +[2025-09-09 14:10:08] [Rank 0] step:5641/10000 train_time:269479ms step_avg:47.77ms +[2025-09-09 14:10:09] [Rank 0] step:5661/10000 train_time:270301ms step_avg:47.75ms +[2025-09-09 14:10:09] [Rank 0] step:5661/10000 train_time:270301ms step_avg:47.75ms +[2025-09-09 14:10:10] [Rank 0] step:5681/10000 train_time:271121ms step_avg:47.72ms +[2025-09-09 14:10:10] [Rank 0] step:5681/10000 train_time:271121ms step_avg:47.72ms +[2025-09-09 14:10:11] [Rank 0] step:5701/10000 train_time:271941ms step_avg:47.70ms +[2025-09-09 14:10:11] [Rank 0] step:5701/10000 train_time:271941ms step_avg:47.70ms +[2025-09-09 14:10:12] [Rank 0] step:5721/10000 train_time:272761ms step_avg:47.68ms +[2025-09-09 14:10:12] [Rank 0] step:5721/10000 train_time:272761ms step_avg:47.68ms +[2025-09-09 14:10:12] [Rank 0] step:5741/10000 train_time:273580ms step_avg:47.65ms +[2025-09-09 14:10:12] [Rank 0] step:5741/10000 train_time:273580ms step_avg:47.65ms +[2025-09-09 14:10:13] [Rank 0] step:5761/10000 train_time:274400ms step_avg:47.63ms +[2025-09-09 14:10:13] [Rank 0] step:5761/10000 train_time:274400ms step_avg:47.63ms +[2025-09-09 14:10:14] [Rank 0] step:5781/10000 train_time:275220ms step_avg:47.61ms +[2025-09-09 14:10:14] [Rank 0] step:5781/10000 train_time:275220ms step_avg:47.61ms +[2025-09-09 14:10:15] [Rank 0] step:5801/10000 train_time:276040ms step_avg:47.58ms +[2025-09-09 14:10:15] [Rank 0] step:5801/10000 train_time:276040ms step_avg:47.58ms +[2025-09-09 14:10:16] [Rank 0] step:5821/10000 train_time:276860ms step_avg:47.56ms +[2025-09-09 14:10:16] [Rank 0] step:5821/10000 train_time:276860ms step_avg:47.56ms +[2025-09-09 14:10:17] [Rank 0] step:5841/10000 train_time:277680ms step_avg:47.54ms +[2025-09-09 14:10:17] [Rank 0] step:5841/10000 train_time:277680ms step_avg:47.54ms +[2025-09-09 14:10:17] [Rank 0] step:5861/10000 train_time:278500ms step_avg:47.52ms +[2025-09-09 14:10:17] [Rank 0] step:5861/10000 train_time:278500ms step_avg:47.52ms +[2025-09-09 14:10:18] [Rank 0] step:5881/10000 train_time:279321ms step_avg:47.50ms +[2025-09-09 14:10:18] [Rank 0] step:5881/10000 train_time:279321ms step_avg:47.50ms +[2025-09-09 14:10:19] [Rank 0] step:5901/10000 train_time:280142ms step_avg:47.47ms +[2025-09-09 14:10:19] [Rank 0] step:5901/10000 train_time:280142ms step_avg:47.47ms +[2025-09-09 14:10:20] [Rank 0] step:5921/10000 train_time:280963ms step_avg:47.45ms +[2025-09-09 14:10:20] [Rank 0] step:5921/10000 train_time:280963ms step_avg:47.45ms +[2025-09-09 14:10:21] [Rank 0] step:5941/10000 train_time:281784ms step_avg:47.43ms +[2025-09-09 14:10:21] [Rank 0] step:5941/10000 train_time:281784ms step_avg:47.43ms +[2025-09-09 14:10:21] [Rank 0] step:5961/10000 train_time:282605ms step_avg:47.41ms +[2025-09-09 14:10:21] [Rank 0] step:5961/10000 train_time:282605ms step_avg:47.41ms +[2025-09-09 14:10:22] [Rank 0] step:5981/10000 train_time:283426ms step_avg:47.39ms +[2025-09-09 14:10:22] [Rank 0] step:5981/10000 train_time:283426ms step_avg:47.39ms +[2025-09-09 14:10:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:10:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:10:24] [Rank 0] PRINT: step:6000/10000 train_loss:0.6312 val_loss:0.6192 train_time:284250ms step_avg:47.38ms +[2025-09-09 14:10:24] [Rank 0] PRINT: step:6000/10000 train_loss:0.6312 val_loss:0.6192 train_time:284250ms step_avg:47.38ms +[2025-09-09 14:10:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:10:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:10:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:10:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:11:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:11:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:11:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:11:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:11:45] [Rank 0] Total Loss: 4.8435 +[2025-09-09 14:11:45] [Rank 0] Total Loss: 4.8435 +[2025-09-09 14:11:45] [Rank 0] Total FTA (Unweighted): 0.9713 +[2025-09-09 14:11:45] [Rank 0] Total FTA (Unweighted): 0.9713 +[2025-09-09 14:11:45] [Rank 0] Total FTA (Weighted): 0.9712 +[2025-09-09 14:11:45] [Rank 0] Total FTA (Weighted): 0.9712 +[2025-09-09 14:11:45] [Rank 0] Group 0 Loss: 4.7736 +[2025-09-09 14:11:45] [Rank 0] Group 0 Loss: 4.7736 +[2025-09-09 14:11:45] [Rank 0] Group 1 Loss: 4.5554 +[2025-09-09 14:11:45] [Rank 0] Group 1 Loss: 4.5554 +[2025-09-09 14:11:45] [Rank 0] Group 2 Loss: 4.2673 +[2025-09-09 14:11:45] [Rank 0] Group 2 Loss: 4.2673 +[2025-09-09 14:11:45] [Rank 0] Group 3 Loss: 4.8008 +[2025-09-09 14:11:45] [Rank 0] Group 3 Loss: 4.8008 +[2025-09-09 14:11:45] [Rank 0] Group 4 Loss: 4.7307 +[2025-09-09 14:11:45] [Rank 0] Group 4 Loss: 4.7307 +[2025-09-09 14:11:45] [Rank 0] Group 5 Loss: 4.7881 +[2025-09-09 14:11:45] [Rank 0] Group 5 Loss: 4.7881 +[2025-09-09 14:11:45] [Rank 0] Group 6 Loss: 4.7029 +[2025-09-09 14:11:45] [Rank 0] Group 6 Loss: 4.7029 +[2025-09-09 14:11:45] [Rank 0] Group 7 Loss: 4.7571 +[2025-09-09 14:11:45] [Rank 0] Group 7 Loss: 4.7571 +[2025-09-09 14:11:45] [Rank 0] Group 8 Loss: 4.9029 +[2025-09-09 14:11:45] [Rank 0] Group 8 Loss: 4.9029 +[2025-09-09 14:11:45] [Rank 0] Group 9 Loss: 4.9127 +[2025-09-09 14:11:45] [Rank 0] Group 9 Loss: 4.9127 +[2025-09-09 14:11:45] [Rank 0] Group 10 Loss: 5.0225 +[2025-09-09 14:11:45] [Rank 0] Group 10 Loss: 5.0225 +[2025-09-09 14:11:45] [Rank 0] Group 11 Loss: 5.0565 +[2025-09-09 14:11:45] [Rank 0] Group 11 Loss: 5.0565 +[2025-09-09 14:11:45] [Rank 0] Group 12 Loss: 5.0060 +[2025-09-09 14:11:45] [Rank 0] Group 12 Loss: 5.0060 +[2025-09-09 14:11:45] [Rank 0] Group 13 Loss: 5.0556 +[2025-09-09 14:11:45] [Rank 0] Group 13 Loss: 5.0556 +[2025-09-09 14:11:45] [Rank 0] Group 14 Loss: 5.0814 +[2025-09-09 14:11:45] [Rank 0] Group 14 Loss: 5.0814 +[2025-09-09 14:11:45] [Rank 0] Group 15 Loss: 5.0825 +[2025-09-09 14:11:45] [Rank 0] Group 15 Loss: 5.0825 +[2025-09-09 14:11:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:11:45] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 14:11:45] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 14:11:45] [Rank 0] Group 14 FTA: 0.9100 +[2025-09-09 14:11:45] [Rank 0] Group 14 FTA: 0.9100 +[2025-09-09 14:11:45] [Rank 0] Group 15 FTA: 0.6400 +[2025-09-09 14:11:45] [Rank 0] Group 15 FTA: 0.6400 +[2025-09-09 14:11:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:11:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:11:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:11:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:11:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:11:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:11:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:11:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:11:46] [Rank 0] step:6001/10000 train_time:284268ms step_avg:47.37ms +[2025-09-09 14:11:46] [Rank 0] step:6001/10000 train_time:284268ms step_avg:47.37ms +[2025-09-09 14:11:47] [Rank 0] step:6021/10000 train_time:285170ms step_avg:47.36ms +[2025-09-09 14:11:47] [Rank 0] step:6021/10000 train_time:285170ms step_avg:47.36ms +[2025-09-09 14:11:48] [Rank 0] step:6041/10000 train_time:285989ms step_avg:47.34ms +[2025-09-09 14:11:48] [Rank 0] step:6041/10000 train_time:285989ms step_avg:47.34ms +[2025-09-09 14:11:49] [Rank 0] step:6061/10000 train_time:286810ms step_avg:47.32ms +[2025-09-09 14:11:49] [Rank 0] step:6061/10000 train_time:286810ms step_avg:47.32ms +[2025-09-09 14:11:50] [Rank 0] step:6081/10000 train_time:287631ms step_avg:47.30ms +[2025-09-09 14:11:50] [Rank 0] step:6081/10000 train_time:287631ms step_avg:47.30ms +[2025-09-09 14:11:51] [Rank 0] step:6101/10000 train_time:288451ms step_avg:47.28ms +[2025-09-09 14:11:51] [Rank 0] step:6101/10000 train_time:288451ms step_avg:47.28ms +[2025-09-09 14:11:51] [Rank 0] step:6121/10000 train_time:289272ms step_avg:47.26ms +[2025-09-09 14:11:51] [Rank 0] step:6121/10000 train_time:289272ms step_avg:47.26ms +[2025-09-09 14:11:52] [Rank 0] step:6141/10000 train_time:290092ms step_avg:47.24ms +[2025-09-09 14:11:52] [Rank 0] step:6141/10000 train_time:290092ms step_avg:47.24ms +[2025-09-09 14:11:53] [Rank 0] step:6161/10000 train_time:290912ms step_avg:47.22ms +[2025-09-09 14:11:53] [Rank 0] step:6161/10000 train_time:290912ms step_avg:47.22ms +[2025-09-09 14:11:54] [Rank 0] step:6181/10000 train_time:291733ms step_avg:47.20ms +[2025-09-09 14:11:54] [Rank 0] step:6181/10000 train_time:291733ms step_avg:47.20ms +[2025-09-09 14:11:55] [Rank 0] step:6201/10000 train_time:292553ms step_avg:47.18ms +[2025-09-09 14:11:55] [Rank 0] step:6201/10000 train_time:292553ms step_avg:47.18ms +[2025-09-09 14:11:55] [Rank 0] step:6221/10000 train_time:293373ms step_avg:47.16ms +[2025-09-09 14:11:55] [Rank 0] step:6221/10000 train_time:293373ms step_avg:47.16ms +[2025-09-09 14:11:56] [Rank 0] step:6241/10000 train_time:294195ms step_avg:47.14ms +[2025-09-09 14:11:56] [Rank 0] step:6241/10000 train_time:294195ms step_avg:47.14ms +[2025-09-09 14:11:57] [Rank 0] step:6261/10000 train_time:295015ms step_avg:47.12ms +[2025-09-09 14:11:57] [Rank 0] step:6261/10000 train_time:295015ms step_avg:47.12ms +[2025-09-09 14:11:58] [Rank 0] step:6281/10000 train_time:295836ms step_avg:47.10ms +[2025-09-09 14:11:58] [Rank 0] step:6281/10000 train_time:295836ms step_avg:47.10ms +[2025-09-09 14:11:59] [Rank 0] step:6301/10000 train_time:296656ms step_avg:47.08ms +[2025-09-09 14:11:59] [Rank 0] step:6301/10000 train_time:296656ms step_avg:47.08ms +[2025-09-09 14:12:00] [Rank 0] step:6321/10000 train_time:297476ms step_avg:47.06ms +[2025-09-09 14:12:00] [Rank 0] step:6321/10000 train_time:297476ms step_avg:47.06ms +[2025-09-09 14:12:00] [Rank 0] step:6341/10000 train_time:298297ms step_avg:47.04ms +[2025-09-09 14:12:00] [Rank 0] step:6341/10000 train_time:298297ms step_avg:47.04ms +[2025-09-09 14:12:01] [Rank 0] step:6361/10000 train_time:299116ms step_avg:47.02ms +[2025-09-09 14:12:01] [Rank 0] step:6361/10000 train_time:299116ms step_avg:47.02ms +[2025-09-09 14:12:02] [Rank 0] step:6381/10000 train_time:299938ms step_avg:47.00ms +[2025-09-09 14:12:02] [Rank 0] step:6381/10000 train_time:299938ms step_avg:47.00ms +[2025-09-09 14:12:03] [Rank 0] step:6401/10000 train_time:300760ms step_avg:46.99ms +[2025-09-09 14:12:03] [Rank 0] step:6401/10000 train_time:300760ms step_avg:46.99ms +[2025-09-09 14:12:04] [Rank 0] step:6421/10000 train_time:301580ms step_avg:46.97ms +[2025-09-09 14:12:04] [Rank 0] step:6421/10000 train_time:301580ms step_avg:46.97ms +[2025-09-09 14:12:04] [Rank 0] step:6441/10000 train_time:302400ms step_avg:46.95ms +[2025-09-09 14:12:04] [Rank 0] step:6441/10000 train_time:302400ms step_avg:46.95ms +[2025-09-09 14:12:05] [Rank 0] step:6461/10000 train_time:303218ms step_avg:46.93ms +[2025-09-09 14:12:05] [Rank 0] step:6461/10000 train_time:303218ms step_avg:46.93ms +[2025-09-09 14:12:06] [Rank 0] step:6481/10000 train_time:304038ms step_avg:46.91ms +[2025-09-09 14:12:06] [Rank 0] step:6481/10000 train_time:304038ms step_avg:46.91ms +[2025-09-09 14:12:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:12:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:12:08] [Rank 0] PRINT: step:6500/10000 train_loss:0.6257 val_loss:0.6152 train_time:305006ms step_avg:46.92ms +[2025-09-09 14:12:08] [Rank 0] PRINT: step:6500/10000 train_loss:0.6257 val_loss:0.6152 train_time:305006ms step_avg:46.92ms +[2025-09-09 14:12:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:12:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:12:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:12:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:13:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:13:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:13:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:13:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:13:29] [Rank 0] Total Loss: 4.8329 +[2025-09-09 14:13:29] [Rank 0] Total Loss: 4.8329 +[2025-09-09 14:13:29] [Rank 0] Total FTA (Unweighted): 0.9844 +[2025-09-09 14:13:29] [Rank 0] Total FTA (Unweighted): 0.9844 +[2025-09-09 14:13:29] [Rank 0] Total FTA (Weighted): 0.9844 +[2025-09-09 14:13:29] [Rank 0] Total FTA (Weighted): 0.9844 +[2025-09-09 14:13:29] [Rank 0] Group 0 Loss: 4.6989 +[2025-09-09 14:13:29] [Rank 0] Group 0 Loss: 4.6989 +[2025-09-09 14:13:29] [Rank 0] Group 1 Loss: 4.5067 +[2025-09-09 14:13:29] [Rank 0] Group 1 Loss: 4.5067 +[2025-09-09 14:13:29] [Rank 0] Group 2 Loss: 4.2517 +[2025-09-09 14:13:29] [Rank 0] Group 2 Loss: 4.2517 +[2025-09-09 14:13:29] [Rank 0] Group 3 Loss: 4.7973 +[2025-09-09 14:13:29] [Rank 0] Group 3 Loss: 4.7973 +[2025-09-09 14:13:29] [Rank 0] Group 4 Loss: 4.7502 +[2025-09-09 14:13:29] [Rank 0] Group 4 Loss: 4.7502 +[2025-09-09 14:13:29] [Rank 0] Group 5 Loss: 4.7812 +[2025-09-09 14:13:29] [Rank 0] Group 5 Loss: 4.7812 +[2025-09-09 14:13:29] [Rank 0] Group 6 Loss: 4.6806 +[2025-09-09 14:13:29] [Rank 0] Group 6 Loss: 4.6806 +[2025-09-09 14:13:29] [Rank 0] Group 7 Loss: 4.7479 +[2025-09-09 14:13:29] [Rank 0] Group 7 Loss: 4.7479 +[2025-09-09 14:13:29] [Rank 0] Group 8 Loss: 4.9276 +[2025-09-09 14:13:29] [Rank 0] Group 8 Loss: 4.9276 +[2025-09-09 14:13:29] [Rank 0] Group 9 Loss: 4.9271 +[2025-09-09 14:13:29] [Rank 0] Group 9 Loss: 4.9271 +[2025-09-09 14:13:29] [Rank 0] Group 10 Loss: 5.0029 +[2025-09-09 14:13:29] [Rank 0] Group 10 Loss: 5.0029 +[2025-09-09 14:13:29] [Rank 0] Group 11 Loss: 5.0549 +[2025-09-09 14:13:29] [Rank 0] Group 11 Loss: 5.0549 +[2025-09-09 14:13:29] [Rank 0] Group 12 Loss: 5.0093 +[2025-09-09 14:13:29] [Rank 0] Group 12 Loss: 5.0093 +[2025-09-09 14:13:29] [Rank 0] Group 13 Loss: 5.0401 +[2025-09-09 14:13:29] [Rank 0] Group 13 Loss: 5.0401 +[2025-09-09 14:13:29] [Rank 0] Group 14 Loss: 5.0666 +[2025-09-09 14:13:29] [Rank 0] Group 14 Loss: 5.0666 +[2025-09-09 14:13:29] [Rank 0] Group 15 Loss: 5.0829 +[2025-09-09 14:13:29] [Rank 0] Group 15 Loss: 5.0829 +[2025-09-09 14:13:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:13:29] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:13:29] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:13:29] [Rank 0] Group 14 FTA: 0.9700 +[2025-09-09 14:13:29] [Rank 0] Group 14 FTA: 0.9700 +[2025-09-09 14:13:29] [Rank 0] Group 15 FTA: 0.7900 +[2025-09-09 14:13:29] [Rank 0] Group 15 FTA: 0.7900 +[2025-09-09 14:13:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:13:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:13:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:13:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:13:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:13:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:13:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:13:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:13:31] [Rank 0] step:6501/10000 train_time:305024ms step_avg:46.92ms +[2025-09-09 14:13:31] [Rank 0] step:6501/10000 train_time:305024ms step_avg:46.92ms +[2025-09-09 14:13:32] [Rank 0] step:6521/10000 train_time:305854ms step_avg:46.90ms +[2025-09-09 14:13:32] [Rank 0] step:6521/10000 train_time:305854ms step_avg:46.90ms +[2025-09-09 14:13:33] [Rank 0] step:6541/10000 train_time:306675ms step_avg:46.89ms +[2025-09-09 14:13:33] [Rank 0] step:6541/10000 train_time:306675ms step_avg:46.89ms +[2025-09-09 14:13:33] [Rank 0] step:6561/10000 train_time:307496ms step_avg:46.87ms +[2025-09-09 14:13:33] [Rank 0] step:6561/10000 train_time:307496ms step_avg:46.87ms +[2025-09-09 14:13:34] [Rank 0] step:6581/10000 train_time:308317ms step_avg:46.85ms +[2025-09-09 14:13:34] [Rank 0] step:6581/10000 train_time:308317ms step_avg:46.85ms +[2025-09-09 14:13:35] [Rank 0] step:6601/10000 train_time:309138ms step_avg:46.83ms +[2025-09-09 14:13:35] [Rank 0] step:6601/10000 train_time:309138ms step_avg:46.83ms +[2025-09-09 14:13:36] [Rank 0] step:6621/10000 train_time:309958ms step_avg:46.81ms +[2025-09-09 14:13:36] [Rank 0] step:6621/10000 train_time:309958ms step_avg:46.81ms +[2025-09-09 14:13:37] [Rank 0] step:6641/10000 train_time:310779ms step_avg:46.80ms +[2025-09-09 14:13:37] [Rank 0] step:6641/10000 train_time:310779ms step_avg:46.80ms +[2025-09-09 14:13:37] [Rank 0] step:6661/10000 train_time:311599ms step_avg:46.78ms +[2025-09-09 14:13:37] [Rank 0] step:6661/10000 train_time:311599ms step_avg:46.78ms +[2025-09-09 14:13:38] [Rank 0] step:6681/10000 train_time:312420ms step_avg:46.76ms +[2025-09-09 14:13:38] [Rank 0] step:6681/10000 train_time:312420ms step_avg:46.76ms +[2025-09-09 14:13:39] [Rank 0] step:6701/10000 train_time:313241ms step_avg:46.75ms +[2025-09-09 14:13:39] [Rank 0] step:6701/10000 train_time:313241ms step_avg:46.75ms +[2025-09-09 14:13:40] [Rank 0] step:6721/10000 train_time:314062ms step_avg:46.73ms +[2025-09-09 14:13:40] [Rank 0] step:6721/10000 train_time:314062ms step_avg:46.73ms +[2025-09-09 14:13:41] [Rank 0] step:6741/10000 train_time:314883ms step_avg:46.71ms +[2025-09-09 14:13:41] [Rank 0] step:6741/10000 train_time:314883ms step_avg:46.71ms +[2025-09-09 14:13:42] [Rank 0] step:6761/10000 train_time:315703ms step_avg:46.69ms +[2025-09-09 14:13:42] [Rank 0] step:6761/10000 train_time:315703ms step_avg:46.69ms +[2025-09-09 14:13:42] [Rank 0] step:6781/10000 train_time:316524ms step_avg:46.68ms +[2025-09-09 14:13:42] [Rank 0] step:6781/10000 train_time:316524ms step_avg:46.68ms +[2025-09-09 14:13:43] [Rank 0] step:6801/10000 train_time:317345ms step_avg:46.66ms +[2025-09-09 14:13:43] [Rank 0] step:6801/10000 train_time:317345ms step_avg:46.66ms +[2025-09-09 14:13:44] [Rank 0] step:6821/10000 train_time:318166ms step_avg:46.65ms +[2025-09-09 14:13:44] [Rank 0] step:6821/10000 train_time:318166ms step_avg:46.65ms +[2025-09-09 14:13:46] [Rank 0] step:6841/10000 train_time:319680ms step_avg:46.73ms +[2025-09-09 14:13:46] [Rank 0] step:6841/10000 train_time:319680ms step_avg:46.73ms +[2025-09-09 14:13:46] [Rank 0] step:6861/10000 train_time:320501ms step_avg:46.71ms +[2025-09-09 14:13:46] [Rank 0] step:6861/10000 train_time:320501ms step_avg:46.71ms +[2025-09-09 14:13:47] [Rank 0] step:6881/10000 train_time:321321ms step_avg:46.70ms +[2025-09-09 14:13:47] [Rank 0] step:6881/10000 train_time:321321ms step_avg:46.70ms +[2025-09-09 14:13:48] [Rank 0] step:6901/10000 train_time:322141ms step_avg:46.68ms +[2025-09-09 14:13:48] [Rank 0] step:6901/10000 train_time:322141ms step_avg:46.68ms +[2025-09-09 14:13:49] [Rank 0] step:6921/10000 train_time:322960ms step_avg:46.66ms +[2025-09-09 14:13:49] [Rank 0] step:6921/10000 train_time:322960ms step_avg:46.66ms +[2025-09-09 14:13:50] [Rank 0] step:6941/10000 train_time:323780ms step_avg:46.65ms +[2025-09-09 14:13:50] [Rank 0] step:6941/10000 train_time:323780ms step_avg:46.65ms +[2025-09-09 14:13:50] [Rank 0] step:6961/10000 train_time:324601ms step_avg:46.63ms +[2025-09-09 14:13:50] [Rank 0] step:6961/10000 train_time:324601ms step_avg:46.63ms +[2025-09-09 14:13:51] [Rank 0] step:6981/10000 train_time:325420ms step_avg:46.62ms +[2025-09-09 14:13:51] [Rank 0] step:6981/10000 train_time:325420ms step_avg:46.62ms +[2025-09-09 14:13:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:13:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:13:53] [Rank 0] PRINT: step:7000/10000 train_loss:0.6206 val_loss:0.6124 train_time:326243ms step_avg:46.61ms +[2025-09-09 14:13:53] [Rank 0] PRINT: step:7000/10000 train_loss:0.6206 val_loss:0.6124 train_time:326243ms step_avg:46.61ms +[2025-09-09 14:13:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:13:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:13:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:13:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:15:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:15:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:15:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:15:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:15:14] [Rank 0] Total Loss: 4.8861 +[2025-09-09 14:15:14] [Rank 0] Total Loss: 4.8861 +[2025-09-09 14:15:14] [Rank 0] Total FTA (Unweighted): 0.9863 +[2025-09-09 14:15:14] [Rank 0] Total FTA (Unweighted): 0.9863 +[2025-09-09 14:15:14] [Rank 0] Total FTA (Weighted): 0.9862 +[2025-09-09 14:15:14] [Rank 0] Total FTA (Weighted): 0.9862 +[2025-09-09 14:15:14] [Rank 0] Group 0 Loss: 4.7480 +[2025-09-09 14:15:14] [Rank 0] Group 0 Loss: 4.7480 +[2025-09-09 14:15:14] [Rank 0] Group 1 Loss: 4.5416 +[2025-09-09 14:15:14] [Rank 0] Group 1 Loss: 4.5416 +[2025-09-09 14:15:14] [Rank 0] Group 2 Loss: 4.2862 +[2025-09-09 14:15:14] [Rank 0] Group 2 Loss: 4.2862 +[2025-09-09 14:15:14] [Rank 0] Group 3 Loss: 4.8111 +[2025-09-09 14:15:14] [Rank 0] Group 3 Loss: 4.8111 +[2025-09-09 14:15:14] [Rank 0] Group 4 Loss: 4.8140 +[2025-09-09 14:15:14] [Rank 0] Group 4 Loss: 4.8140 +[2025-09-09 14:15:14] [Rank 0] Group 5 Loss: 4.8109 +[2025-09-09 14:15:14] [Rank 0] Group 5 Loss: 4.8109 +[2025-09-09 14:15:14] [Rank 0] Group 6 Loss: 4.7220 +[2025-09-09 14:15:14] [Rank 0] Group 6 Loss: 4.7220 +[2025-09-09 14:15:14] [Rank 0] Group 7 Loss: 4.8123 +[2025-09-09 14:15:14] [Rank 0] Group 7 Loss: 4.8123 +[2025-09-09 14:15:14] [Rank 0] Group 8 Loss: 4.9920 +[2025-09-09 14:15:14] [Rank 0] Group 8 Loss: 4.9920 +[2025-09-09 14:15:14] [Rank 0] Group 9 Loss: 4.9875 +[2025-09-09 14:15:14] [Rank 0] Group 9 Loss: 4.9875 +[2025-09-09 14:15:14] [Rank 0] Group 10 Loss: 5.0695 +[2025-09-09 14:15:14] [Rank 0] Group 10 Loss: 5.0695 +[2025-09-09 14:15:14] [Rank 0] Group 11 Loss: 5.1260 +[2025-09-09 14:15:14] [Rank 0] Group 11 Loss: 5.1260 +[2025-09-09 14:15:14] [Rank 0] Group 12 Loss: 5.0716 +[2025-09-09 14:15:14] [Rank 0] Group 12 Loss: 5.0716 +[2025-09-09 14:15:14] [Rank 0] Group 13 Loss: 5.1538 +[2025-09-09 14:15:14] [Rank 0] Group 13 Loss: 5.1538 +[2025-09-09 14:15:14] [Rank 0] Group 14 Loss: 5.1161 +[2025-09-09 14:15:14] [Rank 0] Group 14 Loss: 5.1161 +[2025-09-09 14:15:14] [Rank 0] Group 15 Loss: 5.1143 +[2025-09-09 14:15:14] [Rank 0] Group 15 Loss: 5.1143 +[2025-09-09 14:15:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:15:14] [Rank 0] Group 14 FTA: 0.9600 +[2025-09-09 14:15:14] [Rank 0] Group 14 FTA: 0.9600 +[2025-09-09 14:15:14] [Rank 0] Group 15 FTA: 0.8200 +[2025-09-09 14:15:14] [Rank 0] Group 15 FTA: 0.8200 +[2025-09-09 14:15:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:15:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:15:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:15:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:15:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:15:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:15:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:15:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:15:15] [Rank 0] step:7001/10000 train_time:326261ms step_avg:46.60ms +[2025-09-09 14:15:15] [Rank 0] step:7001/10000 train_time:326261ms step_avg:46.60ms +[2025-09-09 14:15:16] [Rank 0] step:7021/10000 train_time:327084ms step_avg:46.59ms +[2025-09-09 14:15:16] [Rank 0] step:7021/10000 train_time:327084ms step_avg:46.59ms +[2025-09-09 14:15:17] [Rank 0] step:7041/10000 train_time:328223ms step_avg:46.62ms +[2025-09-09 14:15:17] [Rank 0] step:7041/10000 train_time:328223ms step_avg:46.62ms +[2025-09-09 14:15:18] [Rank 0] step:7061/10000 train_time:329183ms step_avg:46.62ms +[2025-09-09 14:15:18] [Rank 0] step:7061/10000 train_time:329183ms step_avg:46.62ms +[2025-09-09 14:15:19] [Rank 0] step:7081/10000 train_time:330004ms step_avg:46.60ms +[2025-09-09 14:15:19] [Rank 0] step:7081/10000 train_time:330004ms step_avg:46.60ms +[2025-09-09 14:15:20] [Rank 0] step:7101/10000 train_time:330825ms step_avg:46.59ms +[2025-09-09 14:15:20] [Rank 0] step:7101/10000 train_time:330825ms step_avg:46.59ms +[2025-09-09 14:15:21] [Rank 0] step:7121/10000 train_time:331645ms step_avg:46.57ms +[2025-09-09 14:15:21] [Rank 0] step:7121/10000 train_time:331645ms step_avg:46.57ms +[2025-09-09 14:15:21] [Rank 0] step:7141/10000 train_time:332466ms step_avg:46.56ms +[2025-09-09 14:15:21] [Rank 0] step:7141/10000 train_time:332466ms step_avg:46.56ms +[2025-09-09 14:15:22] [Rank 0] step:7161/10000 train_time:333287ms step_avg:46.54ms +[2025-09-09 14:15:22] [Rank 0] step:7161/10000 train_time:333287ms step_avg:46.54ms +[2025-09-09 14:15:23] [Rank 0] step:7181/10000 train_time:334107ms step_avg:46.53ms +[2025-09-09 14:15:23] [Rank 0] step:7181/10000 train_time:334107ms step_avg:46.53ms +[2025-09-09 14:15:24] [Rank 0] step:7201/10000 train_time:334928ms step_avg:46.51ms +[2025-09-09 14:15:24] [Rank 0] step:7201/10000 train_time:334928ms step_avg:46.51ms +[2025-09-09 14:15:25] [Rank 0] step:7221/10000 train_time:335749ms step_avg:46.50ms +[2025-09-09 14:15:25] [Rank 0] step:7221/10000 train_time:335749ms step_avg:46.50ms +[2025-09-09 14:15:26] [Rank 0] step:7241/10000 train_time:336569ms step_avg:46.48ms +[2025-09-09 14:15:26] [Rank 0] step:7241/10000 train_time:336569ms step_avg:46.48ms +[2025-09-09 14:15:26] [Rank 0] step:7261/10000 train_time:337390ms step_avg:46.47ms +[2025-09-09 14:15:26] [Rank 0] step:7261/10000 train_time:337390ms step_avg:46.47ms +[2025-09-09 14:15:27] [Rank 0] step:7281/10000 train_time:338211ms step_avg:46.45ms +[2025-09-09 14:15:27] [Rank 0] step:7281/10000 train_time:338211ms step_avg:46.45ms +[2025-09-09 14:15:28] [Rank 0] step:7301/10000 train_time:339034ms step_avg:46.44ms +[2025-09-09 14:15:28] [Rank 0] step:7301/10000 train_time:339034ms step_avg:46.44ms +[2025-09-09 14:15:29] [Rank 0] step:7321/10000 train_time:339855ms step_avg:46.42ms +[2025-09-09 14:15:29] [Rank 0] step:7321/10000 train_time:339855ms step_avg:46.42ms +[2025-09-09 14:15:30] [Rank 0] step:7341/10000 train_time:340674ms step_avg:46.41ms +[2025-09-09 14:15:30] [Rank 0] step:7341/10000 train_time:340674ms step_avg:46.41ms +[2025-09-09 14:15:30] [Rank 0] step:7361/10000 train_time:341495ms step_avg:46.39ms +[2025-09-09 14:15:30] [Rank 0] step:7361/10000 train_time:341495ms step_avg:46.39ms +[2025-09-09 14:15:31] [Rank 0] step:7381/10000 train_time:342316ms step_avg:46.38ms +[2025-09-09 14:15:31] [Rank 0] step:7381/10000 train_time:342316ms step_avg:46.38ms +[2025-09-09 14:15:32] [Rank 0] step:7401/10000 train_time:343137ms step_avg:46.36ms +[2025-09-09 14:15:32] [Rank 0] step:7401/10000 train_time:343137ms step_avg:46.36ms +[2025-09-09 14:15:33] [Rank 0] step:7421/10000 train_time:343959ms step_avg:46.35ms +[2025-09-09 14:15:33] [Rank 0] step:7421/10000 train_time:343959ms step_avg:46.35ms +[2025-09-09 14:15:34] [Rank 0] step:7441/10000 train_time:344779ms step_avg:46.33ms +[2025-09-09 14:15:34] [Rank 0] step:7441/10000 train_time:344779ms step_avg:46.33ms +[2025-09-09 14:15:35] [Rank 0] step:7461/10000 train_time:345599ms step_avg:46.32ms +[2025-09-09 14:15:35] [Rank 0] step:7461/10000 train_time:345599ms step_avg:46.32ms +[2025-09-09 14:15:35] [Rank 0] step:7481/10000 train_time:346419ms step_avg:46.31ms +[2025-09-09 14:15:35] [Rank 0] step:7481/10000 train_time:346419ms step_avg:46.31ms +[2025-09-09 14:15:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:15:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:15:37] [Rank 0] PRINT: step:7500/10000 train_loss:0.6165 val_loss:0.6100 train_time:347241ms step_avg:46.30ms +[2025-09-09 14:15:37] [Rank 0] PRINT: step:7500/10000 train_loss:0.6165 val_loss:0.6100 train_time:347241ms step_avg:46.30ms +[2025-09-09 14:15:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:15:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:15:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:15:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:16:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:16:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:16:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:16:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:16:58] [Rank 0] Total Loss: 4.8719 +[2025-09-09 14:16:58] [Rank 0] Total Loss: 4.8719 +[2025-09-09 14:16:58] [Rank 0] Total FTA (Unweighted): 0.9850 +[2025-09-09 14:16:58] [Rank 0] Total FTA (Unweighted): 0.9850 +[2025-09-09 14:16:58] [Rank 0] Total FTA (Weighted): 0.9850 +[2025-09-09 14:16:58] [Rank 0] Total FTA (Weighted): 0.9850 +[2025-09-09 14:16:58] [Rank 0] Group 0 Loss: 4.7563 +[2025-09-09 14:16:58] [Rank 0] Group 0 Loss: 4.7563 +[2025-09-09 14:16:58] [Rank 0] Group 1 Loss: 4.5385 +[2025-09-09 14:16:58] [Rank 0] Group 1 Loss: 4.5385 +[2025-09-09 14:16:58] [Rank 0] Group 2 Loss: 4.2561 +[2025-09-09 14:16:58] [Rank 0] Group 2 Loss: 4.2561 +[2025-09-09 14:16:58] [Rank 0] Group 3 Loss: 4.8377 +[2025-09-09 14:16:58] [Rank 0] Group 3 Loss: 4.8377 +[2025-09-09 14:16:58] [Rank 0] Group 4 Loss: 4.7689 +[2025-09-09 14:16:58] [Rank 0] Group 4 Loss: 4.7689 +[2025-09-09 14:16:58] [Rank 0] Group 5 Loss: 4.7915 +[2025-09-09 14:16:58] [Rank 0] Group 5 Loss: 4.7915 +[2025-09-09 14:16:58] [Rank 0] Group 6 Loss: 4.7203 +[2025-09-09 14:16:58] [Rank 0] Group 6 Loss: 4.7203 +[2025-09-09 14:16:58] [Rank 0] Group 7 Loss: 4.7949 +[2025-09-09 14:16:58] [Rank 0] Group 7 Loss: 4.7949 +[2025-09-09 14:16:58] [Rank 0] Group 8 Loss: 4.9768 +[2025-09-09 14:16:58] [Rank 0] Group 8 Loss: 4.9768 +[2025-09-09 14:16:58] [Rank 0] Group 9 Loss: 4.9594 +[2025-09-09 14:16:58] [Rank 0] Group 9 Loss: 4.9594 +[2025-09-09 14:16:58] [Rank 0] Group 10 Loss: 5.0463 +[2025-09-09 14:16:58] [Rank 0] Group 10 Loss: 5.0463 +[2025-09-09 14:16:58] [Rank 0] Group 11 Loss: 5.1101 +[2025-09-09 14:16:58] [Rank 0] Group 11 Loss: 5.1101 +[2025-09-09 14:16:58] [Rank 0] Group 12 Loss: 5.0658 +[2025-09-09 14:16:58] [Rank 0] Group 12 Loss: 5.0658 +[2025-09-09 14:16:58] [Rank 0] Group 13 Loss: 5.1274 +[2025-09-09 14:16:58] [Rank 0] Group 13 Loss: 5.1274 +[2025-09-09 14:16:58] [Rank 0] Group 14 Loss: 5.1029 +[2025-09-09 14:16:58] [Rank 0] Group 14 Loss: 5.1029 +[2025-09-09 14:16:58] [Rank 0] Group 15 Loss: 5.0975 +[2025-09-09 14:16:58] [Rank 0] Group 15 Loss: 5.0975 +[2025-09-09 14:16:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-09 14:16:58] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-09 14:16:58] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:16:58] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-09 14:16:58] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-09 14:16:59] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:16:59] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:16:59] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 14:16:59] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 14:16:59] [Rank 0] Group 14 FTA: 0.9700 +[2025-09-09 14:16:59] [Rank 0] Group 14 FTA: 0.9700 +[2025-09-09 14:16:59] [Rank 0] Group 15 FTA: 0.8900 +[2025-09-09 14:16:59] [Rank 0] Group 15 FTA: 0.8900 +[2025-09-09 14:16:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:16:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:16:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:16:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:17:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:17:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:17:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:17:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:17:00] [Rank 0] step:7501/10000 train_time:347260ms step_avg:46.30ms +[2025-09-09 14:17:00] [Rank 0] step:7501/10000 train_time:347260ms step_avg:46.30ms +[2025-09-09 14:17:01] [Rank 0] step:7521/10000 train_time:348093ms step_avg:46.28ms +[2025-09-09 14:17:01] [Rank 0] step:7521/10000 train_time:348093ms step_avg:46.28ms +[2025-09-09 14:17:02] [Rank 0] step:7541/10000 train_time:348911ms step_avg:46.27ms +[2025-09-09 14:17:02] [Rank 0] step:7541/10000 train_time:348911ms step_avg:46.27ms +[2025-09-09 14:17:02] [Rank 0] step:7561/10000 train_time:349732ms step_avg:46.25ms +[2025-09-09 14:17:02] [Rank 0] step:7561/10000 train_time:349732ms step_avg:46.25ms +[2025-09-09 14:17:03] [Rank 0] step:7581/10000 train_time:350552ms step_avg:46.24ms +[2025-09-09 14:17:03] [Rank 0] step:7581/10000 train_time:350552ms step_avg:46.24ms +[2025-09-09 14:17:04] [Rank 0] step:7601/10000 train_time:351374ms step_avg:46.23ms +[2025-09-09 14:17:04] [Rank 0] step:7601/10000 train_time:351374ms step_avg:46.23ms +[2025-09-09 14:17:05] [Rank 0] step:7621/10000 train_time:352194ms step_avg:46.21ms +[2025-09-09 14:17:05] [Rank 0] step:7621/10000 train_time:352194ms step_avg:46.21ms +[2025-09-09 14:17:06] [Rank 0] step:7641/10000 train_time:353687ms step_avg:46.29ms +[2025-09-09 14:17:06] [Rank 0] step:7641/10000 train_time:353687ms step_avg:46.29ms +[2025-09-09 14:17:07] [Rank 0] step:7661/10000 train_time:354507ms step_avg:46.27ms +[2025-09-09 14:17:07] [Rank 0] step:7661/10000 train_time:354507ms step_avg:46.27ms +[2025-09-09 14:17:08] [Rank 0] step:7681/10000 train_time:355327ms step_avg:46.26ms +[2025-09-09 14:17:08] [Rank 0] step:7681/10000 train_time:355327ms step_avg:46.26ms +[2025-09-09 14:17:09] [Rank 0] step:7701/10000 train_time:356147ms step_avg:46.25ms +[2025-09-09 14:17:09] [Rank 0] step:7701/10000 train_time:356147ms step_avg:46.25ms +[2025-09-09 14:17:10] [Rank 0] step:7721/10000 train_time:356967ms step_avg:46.23ms +[2025-09-09 14:17:10] [Rank 0] step:7721/10000 train_time:356967ms step_avg:46.23ms +[2025-09-09 14:17:10] [Rank 0] step:7741/10000 train_time:357788ms step_avg:46.22ms +[2025-09-09 14:17:10] [Rank 0] step:7741/10000 train_time:357788ms step_avg:46.22ms +[2025-09-09 14:17:11] [Rank 0] step:7761/10000 train_time:358608ms step_avg:46.21ms +[2025-09-09 14:17:11] [Rank 0] step:7761/10000 train_time:358608ms step_avg:46.21ms +[2025-09-09 14:17:12] [Rank 0] step:7781/10000 train_time:359429ms step_avg:46.19ms +[2025-09-09 14:17:12] [Rank 0] step:7781/10000 train_time:359429ms step_avg:46.19ms +[2025-09-09 14:17:13] [Rank 0] step:7801/10000 train_time:360249ms step_avg:46.18ms +[2025-09-09 14:17:13] [Rank 0] step:7801/10000 train_time:360249ms step_avg:46.18ms +[2025-09-09 14:17:14] [Rank 0] step:7821/10000 train_time:361072ms step_avg:46.17ms +[2025-09-09 14:17:14] [Rank 0] step:7821/10000 train_time:361072ms step_avg:46.17ms +[2025-09-09 14:17:15] [Rank 0] step:7841/10000 train_time:361891ms step_avg:46.15ms +[2025-09-09 14:17:15] [Rank 0] step:7841/10000 train_time:361891ms step_avg:46.15ms +[2025-09-09 14:17:15] [Rank 0] step:7861/10000 train_time:362710ms step_avg:46.14ms +[2025-09-09 14:17:15] [Rank 0] step:7861/10000 train_time:362710ms step_avg:46.14ms +[2025-09-09 14:17:16] [Rank 0] step:7881/10000 train_time:363528ms step_avg:46.13ms +[2025-09-09 14:17:16] [Rank 0] step:7881/10000 train_time:363528ms step_avg:46.13ms +[2025-09-09 14:17:17] [Rank 0] step:7901/10000 train_time:364348ms step_avg:46.11ms +[2025-09-09 14:17:17] [Rank 0] step:7901/10000 train_time:364348ms step_avg:46.11ms +[2025-09-09 14:17:18] [Rank 0] step:7921/10000 train_time:365167ms step_avg:46.10ms +[2025-09-09 14:17:18] [Rank 0] step:7921/10000 train_time:365167ms step_avg:46.10ms +[2025-09-09 14:17:19] [Rank 0] step:7941/10000 train_time:365986ms step_avg:46.09ms +[2025-09-09 14:17:19] [Rank 0] step:7941/10000 train_time:365986ms step_avg:46.09ms +[2025-09-09 14:17:19] [Rank 0] step:7961/10000 train_time:366806ms step_avg:46.08ms +[2025-09-09 14:17:19] [Rank 0] step:7961/10000 train_time:366806ms step_avg:46.08ms +[2025-09-09 14:17:20] [Rank 0] step:7981/10000 train_time:367626ms step_avg:46.06ms +[2025-09-09 14:17:20] [Rank 0] step:7981/10000 train_time:367626ms step_avg:46.06ms +[2025-09-09 14:17:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:17:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:17:22] [Rank 0] PRINT: step:8000/10000 train_loss:0.6134 val_loss:0.6084 train_time:368448ms step_avg:46.06ms +[2025-09-09 14:17:22] [Rank 0] PRINT: step:8000/10000 train_loss:0.6134 val_loss:0.6084 train_time:368448ms step_avg:46.06ms +[2025-09-09 14:17:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:17:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:17:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:17:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:18:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:18:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:18:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:18:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:18:43] [Rank 0] Total Loss: 4.8691 +[2025-09-09 14:18:43] [Rank 0] Total Loss: 4.8691 +[2025-09-09 14:18:43] [Rank 0] Total FTA (Unweighted): 0.9956 +[2025-09-09 14:18:43] [Rank 0] Total FTA (Unweighted): 0.9956 +[2025-09-09 14:18:43] [Rank 0] Total FTA (Weighted): 0.9956 +[2025-09-09 14:18:43] [Rank 0] Total FTA (Weighted): 0.9956 +[2025-09-09 14:18:43] [Rank 0] Group 0 Loss: 4.7585 +[2025-09-09 14:18:43] [Rank 0] Group 0 Loss: 4.7585 +[2025-09-09 14:18:43] [Rank 0] Group 1 Loss: 4.5776 +[2025-09-09 14:18:43] [Rank 0] Group 1 Loss: 4.5776 +[2025-09-09 14:18:43] [Rank 0] Group 2 Loss: 4.2713 +[2025-09-09 14:18:43] [Rank 0] Group 2 Loss: 4.2713 +[2025-09-09 14:18:43] [Rank 0] Group 3 Loss: 4.7945 +[2025-09-09 14:18:43] [Rank 0] Group 3 Loss: 4.7945 +[2025-09-09 14:18:43] [Rank 0] Group 4 Loss: 4.7412 +[2025-09-09 14:18:43] [Rank 0] Group 4 Loss: 4.7412 +[2025-09-09 14:18:43] [Rank 0] Group 5 Loss: 4.7950 +[2025-09-09 14:18:43] [Rank 0] Group 5 Loss: 4.7950 +[2025-09-09 14:18:43] [Rank 0] Group 6 Loss: 4.7168 +[2025-09-09 14:18:43] [Rank 0] Group 6 Loss: 4.7168 +[2025-09-09 14:18:43] [Rank 0] Group 7 Loss: 4.7974 +[2025-09-09 14:18:43] [Rank 0] Group 7 Loss: 4.7974 +[2025-09-09 14:18:43] [Rank 0] Group 8 Loss: 4.9666 +[2025-09-09 14:18:43] [Rank 0] Group 8 Loss: 4.9666 +[2025-09-09 14:18:43] [Rank 0] Group 9 Loss: 4.9432 +[2025-09-09 14:18:43] [Rank 0] Group 9 Loss: 4.9432 +[2025-09-09 14:18:43] [Rank 0] Group 10 Loss: 5.0642 +[2025-09-09 14:18:43] [Rank 0] Group 10 Loss: 5.0642 +[2025-09-09 14:18:43] [Rank 0] Group 11 Loss: 5.1018 +[2025-09-09 14:18:43] [Rank 0] Group 11 Loss: 5.1018 +[2025-09-09 14:18:43] [Rank 0] Group 12 Loss: 5.0573 +[2025-09-09 14:18:43] [Rank 0] Group 12 Loss: 5.0573 +[2025-09-09 14:18:43] [Rank 0] Group 13 Loss: 5.1066 +[2025-09-09 14:18:43] [Rank 0] Group 13 Loss: 5.1066 +[2025-09-09 14:18:43] [Rank 0] Group 14 Loss: 5.0991 +[2025-09-09 14:18:43] [Rank 0] Group 14 Loss: 5.0991 +[2025-09-09 14:18:43] [Rank 0] Group 15 Loss: 5.1146 +[2025-09-09 14:18:43] [Rank 0] Group 15 Loss: 5.1146 +[2025-09-09 14:18:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:18:43] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 14:18:43] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 14:18:43] [Rank 0] Group 14 FTA: 0.9900 +[2025-09-09 14:18:43] [Rank 0] Group 14 FTA: 0.9900 +[2025-09-09 14:18:43] [Rank 0] Group 15 FTA: 0.9500 +[2025-09-09 14:18:43] [Rank 0] Group 15 FTA: 0.9500 +[2025-09-09 14:18:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:18:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:18:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:18:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:18:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:18:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:18:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:18:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:18:44] [Rank 0] step:8001/10000 train_time:368466ms step_avg:46.05ms +[2025-09-09 14:18:44] [Rank 0] step:8001/10000 train_time:368466ms step_avg:46.05ms +[2025-09-09 14:18:46] [Rank 0] step:8021/10000 train_time:369978ms step_avg:46.13ms +[2025-09-09 14:18:46] [Rank 0] step:8021/10000 train_time:369978ms step_avg:46.13ms +[2025-09-09 14:18:46] [Rank 0] step:8041/10000 train_time:370798ms step_avg:46.11ms +[2025-09-09 14:18:46] [Rank 0] step:8041/10000 train_time:370798ms step_avg:46.11ms +[2025-09-09 14:18:47] [Rank 0] step:8061/10000 train_time:371618ms step_avg:46.10ms +[2025-09-09 14:18:47] [Rank 0] step:8061/10000 train_time:371618ms step_avg:46.10ms +[2025-09-09 14:18:48] [Rank 0] step:8081/10000 train_time:372438ms step_avg:46.09ms +[2025-09-09 14:18:48] [Rank 0] step:8081/10000 train_time:372438ms step_avg:46.09ms +[2025-09-09 14:18:49] [Rank 0] step:8101/10000 train_time:373258ms step_avg:46.08ms +[2025-09-09 14:18:49] [Rank 0] step:8101/10000 train_time:373258ms step_avg:46.08ms +[2025-09-09 14:18:50] [Rank 0] step:8121/10000 train_time:374079ms step_avg:46.06ms +[2025-09-09 14:18:50] [Rank 0] step:8121/10000 train_time:374079ms step_avg:46.06ms +[2025-09-09 14:18:51] [Rank 0] step:8141/10000 train_time:374898ms step_avg:46.05ms +[2025-09-09 14:18:51] [Rank 0] step:8141/10000 train_time:374898ms step_avg:46.05ms +[2025-09-09 14:18:51] [Rank 0] step:8161/10000 train_time:375718ms step_avg:46.04ms +[2025-09-09 14:18:51] [Rank 0] step:8161/10000 train_time:375718ms step_avg:46.04ms +[2025-09-09 14:18:52] [Rank 0] step:8181/10000 train_time:376538ms step_avg:46.03ms +[2025-09-09 14:18:52] [Rank 0] step:8181/10000 train_time:376538ms step_avg:46.03ms +[2025-09-09 14:18:53] [Rank 0] step:8201/10000 train_time:377358ms step_avg:46.01ms +[2025-09-09 14:18:53] [Rank 0] step:8201/10000 train_time:377358ms step_avg:46.01ms +[2025-09-09 14:18:54] [Rank 0] step:8221/10000 train_time:378177ms step_avg:46.00ms +[2025-09-09 14:18:54] [Rank 0] step:8221/10000 train_time:378177ms step_avg:46.00ms +[2025-09-09 14:18:55] [Rank 0] step:8241/10000 train_time:378997ms step_avg:45.99ms +[2025-09-09 14:18:55] [Rank 0] step:8241/10000 train_time:378997ms step_avg:45.99ms +[2025-09-09 14:18:55] [Rank 0] step:8261/10000 train_time:379819ms step_avg:45.98ms +[2025-09-09 14:18:55] [Rank 0] step:8261/10000 train_time:379819ms step_avg:45.98ms +[2025-09-09 14:18:56] [Rank 0] step:8281/10000 train_time:380640ms step_avg:45.97ms +[2025-09-09 14:18:56] [Rank 0] step:8281/10000 train_time:380640ms step_avg:45.97ms +[2025-09-09 14:18:57] [Rank 0] step:8301/10000 train_time:381458ms step_avg:45.95ms +[2025-09-09 14:18:57] [Rank 0] step:8301/10000 train_time:381458ms step_avg:45.95ms +[2025-09-09 14:18:58] [Rank 0] step:8321/10000 train_time:382278ms step_avg:45.94ms +[2025-09-09 14:18:58] [Rank 0] step:8321/10000 train_time:382278ms step_avg:45.94ms +[2025-09-09 14:18:59] [Rank 0] step:8341/10000 train_time:383097ms step_avg:45.93ms +[2025-09-09 14:18:59] [Rank 0] step:8341/10000 train_time:383097ms step_avg:45.93ms +[2025-09-09 14:19:00] [Rank 0] step:8361/10000 train_time:383917ms step_avg:45.92ms +[2025-09-09 14:19:00] [Rank 0] step:8361/10000 train_time:383917ms step_avg:45.92ms +[2025-09-09 14:19:00] [Rank 0] step:8381/10000 train_time:384737ms step_avg:45.91ms +[2025-09-09 14:19:00] [Rank 0] step:8381/10000 train_time:384737ms step_avg:45.91ms +[2025-09-09 14:19:01] [Rank 0] step:8401/10000 train_time:385557ms step_avg:45.89ms +[2025-09-09 14:19:01] [Rank 0] step:8401/10000 train_time:385557ms step_avg:45.89ms +[2025-09-09 14:19:02] [Rank 0] step:8421/10000 train_time:386377ms step_avg:45.88ms +[2025-09-09 14:19:02] [Rank 0] step:8421/10000 train_time:386377ms step_avg:45.88ms +[2025-09-09 14:19:03] [Rank 0] step:8441/10000 train_time:387196ms step_avg:45.87ms +[2025-09-09 14:19:03] [Rank 0] step:8441/10000 train_time:387196ms step_avg:45.87ms +[2025-09-09 14:19:04] [Rank 0] step:8461/10000 train_time:388017ms step_avg:45.86ms +[2025-09-09 14:19:04] [Rank 0] step:8461/10000 train_time:388017ms step_avg:45.86ms +[2025-09-09 14:19:04] [Rank 0] step:8481/10000 train_time:388836ms step_avg:45.85ms +[2025-09-09 14:19:04] [Rank 0] step:8481/10000 train_time:388836ms step_avg:45.85ms +[2025-09-09 14:19:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:19:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:19:06] [Rank 0] PRINT: step:8500/10000 train_loss:0.6111 val_loss:0.6069 train_time:389659ms step_avg:45.84ms +[2025-09-09 14:19:06] [Rank 0] PRINT: step:8500/10000 train_loss:0.6111 val_loss:0.6069 train_time:389659ms step_avg:45.84ms +[2025-09-09 14:19:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:19:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:19:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:19:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:20:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:20:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:20:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:20:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:20:27] [Rank 0] Total Loss: 4.8670 +[2025-09-09 14:20:27] [Rank 0] Total Loss: 4.8670 +[2025-09-09 14:20:27] [Rank 0] Total FTA (Unweighted): 0.9969 +[2025-09-09 14:20:27] [Rank 0] Total FTA (Unweighted): 0.9969 +[2025-09-09 14:20:27] [Rank 0] Total FTA (Weighted): 0.9969 +[2025-09-09 14:20:27] [Rank 0] Total FTA (Weighted): 0.9969 +[2025-09-09 14:20:27] [Rank 0] Group 0 Loss: 4.6880 +[2025-09-09 14:20:27] [Rank 0] Group 0 Loss: 4.6880 +[2025-09-09 14:20:27] [Rank 0] Group 1 Loss: 4.6813 +[2025-09-09 14:20:27] [Rank 0] Group 1 Loss: 4.6813 +[2025-09-09 14:20:27] [Rank 0] Group 2 Loss: 4.3029 +[2025-09-09 14:20:27] [Rank 0] Group 2 Loss: 4.3029 +[2025-09-09 14:20:27] [Rank 0] Group 3 Loss: 4.8114 +[2025-09-09 14:20:27] [Rank 0] Group 3 Loss: 4.8114 +[2025-09-09 14:20:27] [Rank 0] Group 4 Loss: 4.7478 +[2025-09-09 14:20:27] [Rank 0] Group 4 Loss: 4.7478 +[2025-09-09 14:20:27] [Rank 0] Group 5 Loss: 4.7898 +[2025-09-09 14:20:27] [Rank 0] Group 5 Loss: 4.7898 +[2025-09-09 14:20:27] [Rank 0] Group 6 Loss: 4.7274 +[2025-09-09 14:20:27] [Rank 0] Group 6 Loss: 4.7274 +[2025-09-09 14:20:27] [Rank 0] Group 7 Loss: 4.7750 +[2025-09-09 14:20:27] [Rank 0] Group 7 Loss: 4.7750 +[2025-09-09 14:20:27] [Rank 0] Group 8 Loss: 4.9406 +[2025-09-09 14:20:27] [Rank 0] Group 8 Loss: 4.9406 +[2025-09-09 14:20:27] [Rank 0] Group 9 Loss: 4.9303 +[2025-09-09 14:20:27] [Rank 0] Group 9 Loss: 4.9303 +[2025-09-09 14:20:27] [Rank 0] Group 10 Loss: 5.0606 +[2025-09-09 14:20:27] [Rank 0] Group 10 Loss: 5.0606 +[2025-09-09 14:20:27] [Rank 0] Group 11 Loss: 5.0816 +[2025-09-09 14:20:27] [Rank 0] Group 11 Loss: 5.0816 +[2025-09-09 14:20:27] [Rank 0] Group 12 Loss: 5.0486 +[2025-09-09 14:20:27] [Rank 0] Group 12 Loss: 5.0486 +[2025-09-09 14:20:27] [Rank 0] Group 13 Loss: 5.0998 +[2025-09-09 14:20:27] [Rank 0] Group 13 Loss: 5.0998 +[2025-09-09 14:20:27] [Rank 0] Group 14 Loss: 5.0910 +[2025-09-09 14:20:27] [Rank 0] Group 14 Loss: 5.0910 +[2025-09-09 14:20:27] [Rank 0] Group 15 Loss: 5.0952 +[2025-09-09 14:20:27] [Rank 0] Group 15 Loss: 5.0952 +[2025-09-09 14:20:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:20:27] [Rank 0] Group 14 FTA: 0.9800 +[2025-09-09 14:20:27] [Rank 0] Group 14 FTA: 0.9800 +[2025-09-09 14:20:27] [Rank 0] Group 15 FTA: 0.9700 +[2025-09-09 14:20:27] [Rank 0] Group 15 FTA: 0.9700 +[2025-09-09 14:20:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:20:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:20:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:20:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:20:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:20:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:20:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:20:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:20:28] [Rank 0] step:8501/10000 train_time:389677ms step_avg:45.84ms +[2025-09-09 14:20:28] [Rank 0] step:8501/10000 train_time:389677ms step_avg:45.84ms +[2025-09-09 14:20:29] [Rank 0] step:8521/10000 train_time:390492ms step_avg:45.83ms +[2025-09-09 14:20:29] [Rank 0] step:8521/10000 train_time:390492ms step_avg:45.83ms +[2025-09-09 14:20:30] [Rank 0] step:8541/10000 train_time:391313ms step_avg:45.82ms +[2025-09-09 14:20:30] [Rank 0] step:8541/10000 train_time:391313ms step_avg:45.82ms +[2025-09-09 14:20:31] [Rank 0] step:8561/10000 train_time:392134ms step_avg:45.80ms +[2025-09-09 14:20:31] [Rank 0] step:8561/10000 train_time:392134ms step_avg:45.80ms +[2025-09-09 14:20:32] [Rank 0] step:8581/10000 train_time:392954ms step_avg:45.79ms +[2025-09-09 14:20:32] [Rank 0] step:8581/10000 train_time:392954ms step_avg:45.79ms +[2025-09-09 14:20:32] [Rank 0] step:8601/10000 train_time:393777ms step_avg:45.78ms +[2025-09-09 14:20:32] [Rank 0] step:8601/10000 train_time:393777ms step_avg:45.78ms +[2025-09-09 14:20:34] [Rank 0] step:8621/10000 train_time:395101ms step_avg:45.83ms +[2025-09-09 14:20:34] [Rank 0] step:8621/10000 train_time:395101ms step_avg:45.83ms +[2025-09-09 14:20:35] [Rank 0] step:8641/10000 train_time:395921ms step_avg:45.82ms +[2025-09-09 14:20:35] [Rank 0] step:8641/10000 train_time:395921ms step_avg:45.82ms +[2025-09-09 14:20:35] [Rank 0] step:8661/10000 train_time:396741ms step_avg:45.81ms +[2025-09-09 14:20:35] [Rank 0] step:8661/10000 train_time:396741ms step_avg:45.81ms +[2025-09-09 14:20:36] [Rank 0] step:8681/10000 train_time:397561ms step_avg:45.80ms +[2025-09-09 14:20:36] [Rank 0] step:8681/10000 train_time:397561ms step_avg:45.80ms +[2025-09-09 14:20:37] [Rank 0] step:8701/10000 train_time:398381ms step_avg:45.79ms +[2025-09-09 14:20:37] [Rank 0] step:8701/10000 train_time:398381ms step_avg:45.79ms +[2025-09-09 14:20:38] [Rank 0] step:8721/10000 train_time:399200ms step_avg:45.77ms +[2025-09-09 14:20:38] [Rank 0] step:8721/10000 train_time:399200ms step_avg:45.77ms +[2025-09-09 14:20:39] [Rank 0] step:8741/10000 train_time:400019ms step_avg:45.76ms +[2025-09-09 14:20:39] [Rank 0] step:8741/10000 train_time:400019ms step_avg:45.76ms +[2025-09-09 14:20:39] [Rank 0] step:8761/10000 train_time:400839ms step_avg:45.75ms +[2025-09-09 14:20:39] [Rank 0] step:8761/10000 train_time:400839ms step_avg:45.75ms +[2025-09-09 14:20:40] [Rank 0] step:8781/10000 train_time:401658ms step_avg:45.74ms +[2025-09-09 14:20:40] [Rank 0] step:8781/10000 train_time:401658ms step_avg:45.74ms +[2025-09-09 14:20:41] [Rank 0] step:8801/10000 train_time:402478ms step_avg:45.73ms +[2025-09-09 14:20:41] [Rank 0] step:8801/10000 train_time:402478ms step_avg:45.73ms +[2025-09-09 14:20:42] [Rank 0] step:8821/10000 train_time:403298ms step_avg:45.72ms +[2025-09-09 14:20:42] [Rank 0] step:8821/10000 train_time:403298ms step_avg:45.72ms +[2025-09-09 14:20:43] [Rank 0] step:8841/10000 train_time:404811ms step_avg:45.79ms +[2025-09-09 14:20:43] [Rank 0] step:8841/10000 train_time:404811ms step_avg:45.79ms +[2025-09-09 14:20:44] [Rank 0] step:8861/10000 train_time:405632ms step_avg:45.78ms +[2025-09-09 14:20:44] [Rank 0] step:8861/10000 train_time:405632ms step_avg:45.78ms +[2025-09-09 14:20:45] [Rank 0] step:8881/10000 train_time:406452ms step_avg:45.77ms +[2025-09-09 14:20:45] [Rank 0] step:8881/10000 train_time:406452ms step_avg:45.77ms +[2025-09-09 14:20:46] [Rank 0] step:8901/10000 train_time:407273ms step_avg:45.76ms +[2025-09-09 14:20:46] [Rank 0] step:8901/10000 train_time:407273ms step_avg:45.76ms +[2025-09-09 14:20:47] [Rank 0] step:8921/10000 train_time:408094ms step_avg:45.75ms +[2025-09-09 14:20:47] [Rank 0] step:8921/10000 train_time:408094ms step_avg:45.75ms +[2025-09-09 14:20:48] [Rank 0] step:8941/10000 train_time:408914ms step_avg:45.73ms +[2025-09-09 14:20:48] [Rank 0] step:8941/10000 train_time:408914ms step_avg:45.73ms +[2025-09-09 14:20:48] [Rank 0] step:8961/10000 train_time:409735ms step_avg:45.72ms +[2025-09-09 14:20:48] [Rank 0] step:8961/10000 train_time:409735ms step_avg:45.72ms +[2025-09-09 14:20:49] [Rank 0] step:8981/10000 train_time:410556ms step_avg:45.71ms +[2025-09-09 14:20:49] [Rank 0] step:8981/10000 train_time:410556ms step_avg:45.71ms +[2025-09-09 14:20:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:20:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:20:50] [Rank 0] PRINT: step:9000/10000 train_loss:0.6093 val_loss:0.6058 train_time:411379ms step_avg:45.71ms +[2025-09-09 14:20:50] [Rank 0] PRINT: step:9000/10000 train_loss:0.6093 val_loss:0.6058 train_time:411379ms step_avg:45.71ms +[2025-09-09 14:20:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:20:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:20:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:20:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:22:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:22:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:22:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:22:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:22:11] [Rank 0] Total Loss: 4.9188 +[2025-09-09 14:22:11] [Rank 0] Total Loss: 4.9188 +[2025-09-09 14:22:11] [Rank 0] Total FTA (Unweighted): 0.9988 +[2025-09-09 14:22:11] [Rank 0] Total FTA (Unweighted): 0.9988 +[2025-09-09 14:22:11] [Rank 0] Total FTA (Weighted): 0.9988 +[2025-09-09 14:22:11] [Rank 0] Total FTA (Weighted): 0.9988 +[2025-09-09 14:22:11] [Rank 0] Group 0 Loss: 4.7695 +[2025-09-09 14:22:11] [Rank 0] Group 0 Loss: 4.7695 +[2025-09-09 14:22:11] [Rank 0] Group 1 Loss: 4.5810 +[2025-09-09 14:22:11] [Rank 0] Group 1 Loss: 4.5810 +[2025-09-09 14:22:11] [Rank 0] Group 2 Loss: 4.3517 +[2025-09-09 14:22:11] [Rank 0] Group 2 Loss: 4.3517 +[2025-09-09 14:22:11] [Rank 0] Group 3 Loss: 4.8425 +[2025-09-09 14:22:11] [Rank 0] Group 3 Loss: 4.8425 +[2025-09-09 14:22:11] [Rank 0] Group 4 Loss: 4.8013 +[2025-09-09 14:22:11] [Rank 0] Group 4 Loss: 4.8013 +[2025-09-09 14:22:11] [Rank 0] Group 5 Loss: 4.8467 +[2025-09-09 14:22:11] [Rank 0] Group 5 Loss: 4.8467 +[2025-09-09 14:22:11] [Rank 0] Group 6 Loss: 4.7632 +[2025-09-09 14:22:11] [Rank 0] Group 6 Loss: 4.7632 +[2025-09-09 14:22:11] [Rank 0] Group 7 Loss: 4.8433 +[2025-09-09 14:22:11] [Rank 0] Group 7 Loss: 4.8433 +[2025-09-09 14:22:11] [Rank 0] Group 8 Loss: 5.0167 +[2025-09-09 14:22:11] [Rank 0] Group 8 Loss: 5.0167 +[2025-09-09 14:22:11] [Rank 0] Group 9 Loss: 4.9927 +[2025-09-09 14:22:11] [Rank 0] Group 9 Loss: 4.9927 +[2025-09-09 14:22:11] [Rank 0] Group 10 Loss: 5.1188 +[2025-09-09 14:22:11] [Rank 0] Group 10 Loss: 5.1188 +[2025-09-09 14:22:11] [Rank 0] Group 11 Loss: 5.1908 +[2025-09-09 14:22:11] [Rank 0] Group 11 Loss: 5.1908 +[2025-09-09 14:22:11] [Rank 0] Group 12 Loss: 5.1046 +[2025-09-09 14:22:11] [Rank 0] Group 12 Loss: 5.1046 +[2025-09-09 14:22:11] [Rank 0] Group 13 Loss: 5.1872 +[2025-09-09 14:22:11] [Rank 0] Group 13 Loss: 5.1872 +[2025-09-09 14:22:12] [Rank 0] Group 14 Loss: 5.1406 +[2025-09-09 14:22:12] [Rank 0] Group 14 Loss: 5.1406 +[2025-09-09 14:22:12] [Rank 0] Group 15 Loss: 5.1496 +[2025-09-09 14:22:12] [Rank 0] Group 15 Loss: 5.1496 +[2025-09-09 14:22:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:22:12] [Rank 0] Group 14 FTA: 0.9900 +[2025-09-09 14:22:12] [Rank 0] Group 14 FTA: 0.9900 +[2025-09-09 14:22:12] [Rank 0] Group 15 FTA: 0.9900 +[2025-09-09 14:22:12] [Rank 0] Group 15 FTA: 0.9900 +[2025-09-09 14:22:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:22:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:22:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:22:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:22:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:22:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:22:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:22:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:22:13] [Rank 0] step:9001/10000 train_time:411397ms step_avg:45.71ms +[2025-09-09 14:22:13] [Rank 0] step:9001/10000 train_time:411397ms step_avg:45.71ms +[2025-09-09 14:22:14] [Rank 0] step:9021/10000 train_time:412231ms step_avg:45.70ms +[2025-09-09 14:22:14] [Rank 0] step:9021/10000 train_time:412231ms step_avg:45.70ms +[2025-09-09 14:22:15] [Rank 0] step:9041/10000 train_time:413051ms step_avg:45.69ms +[2025-09-09 14:22:15] [Rank 0] step:9041/10000 train_time:413051ms step_avg:45.69ms +[2025-09-09 14:22:15] [Rank 0] step:9061/10000 train_time:413872ms step_avg:45.68ms +[2025-09-09 14:22:15] [Rank 0] step:9061/10000 train_time:413872ms step_avg:45.68ms +[2025-09-09 14:22:16] [Rank 0] step:9081/10000 train_time:414691ms step_avg:45.67ms +[2025-09-09 14:22:16] [Rank 0] step:9081/10000 train_time:414691ms step_avg:45.67ms +[2025-09-09 14:22:17] [Rank 0] step:9101/10000 train_time:415511ms step_avg:45.66ms +[2025-09-09 14:22:17] [Rank 0] step:9101/10000 train_time:415511ms step_avg:45.66ms +[2025-09-09 14:22:18] [Rank 0] step:9121/10000 train_time:416330ms step_avg:45.65ms +[2025-09-09 14:22:18] [Rank 0] step:9121/10000 train_time:416330ms step_avg:45.65ms +[2025-09-09 14:22:19] [Rank 0] step:9141/10000 train_time:417149ms step_avg:45.63ms +[2025-09-09 14:22:19] [Rank 0] step:9141/10000 train_time:417149ms step_avg:45.63ms +[2025-09-09 14:22:19] [Rank 0] step:9161/10000 train_time:417968ms step_avg:45.62ms +[2025-09-09 14:22:19] [Rank 0] step:9161/10000 train_time:417968ms step_avg:45.62ms +[2025-09-09 14:22:20] [Rank 0] step:9181/10000 train_time:418787ms step_avg:45.61ms +[2025-09-09 14:22:20] [Rank 0] step:9181/10000 train_time:418787ms step_avg:45.61ms +[2025-09-09 14:22:21] [Rank 0] step:9201/10000 train_time:419606ms step_avg:45.60ms +[2025-09-09 14:22:21] [Rank 0] step:9201/10000 train_time:419606ms step_avg:45.60ms +[2025-09-09 14:22:22] [Rank 0] step:9221/10000 train_time:420426ms step_avg:45.59ms +[2025-09-09 14:22:22] [Rank 0] step:9221/10000 train_time:420426ms step_avg:45.59ms +[2025-09-09 14:22:23] [Rank 0] step:9241/10000 train_time:421244ms step_avg:45.58ms +[2025-09-09 14:22:23] [Rank 0] step:9241/10000 train_time:421244ms step_avg:45.58ms +[2025-09-09 14:22:24] [Rank 0] step:9261/10000 train_time:422063ms step_avg:45.57ms +[2025-09-09 14:22:24] [Rank 0] step:9261/10000 train_time:422063ms step_avg:45.57ms +[2025-09-09 14:22:24] [Rank 0] step:9281/10000 train_time:422882ms step_avg:45.56ms +[2025-09-09 14:22:24] [Rank 0] step:9281/10000 train_time:422882ms step_avg:45.56ms +[2025-09-09 14:22:25] [Rank 0] step:9301/10000 train_time:423701ms step_avg:45.55ms +[2025-09-09 14:22:25] [Rank 0] step:9301/10000 train_time:423701ms step_avg:45.55ms +[2025-09-09 14:22:26] [Rank 0] step:9321/10000 train_time:424520ms step_avg:45.54ms +[2025-09-09 14:22:26] [Rank 0] step:9321/10000 train_time:424520ms step_avg:45.54ms +[2025-09-09 14:22:27] [Rank 0] step:9341/10000 train_time:425338ms step_avg:45.53ms +[2025-09-09 14:22:27] [Rank 0] step:9341/10000 train_time:425338ms step_avg:45.53ms +[2025-09-09 14:22:28] [Rank 0] step:9361/10000 train_time:426158ms step_avg:45.52ms +[2025-09-09 14:22:28] [Rank 0] step:9361/10000 train_time:426158ms step_avg:45.52ms +[2025-09-09 14:22:29] [Rank 0] step:9381/10000 train_time:426977ms step_avg:45.52ms +[2025-09-09 14:22:29] [Rank 0] step:9381/10000 train_time:426977ms step_avg:45.52ms +[2025-09-09 14:22:29] [Rank 0] step:9401/10000 train_time:427799ms step_avg:45.51ms +[2025-09-09 14:22:29] [Rank 0] step:9401/10000 train_time:427799ms step_avg:45.51ms +[2025-09-09 14:22:30] [Rank 0] step:9421/10000 train_time:428619ms step_avg:45.50ms +[2025-09-09 14:22:30] [Rank 0] step:9421/10000 train_time:428619ms step_avg:45.50ms +[2025-09-09 14:22:31] [Rank 0] step:9441/10000 train_time:429439ms step_avg:45.49ms +[2025-09-09 14:22:31] [Rank 0] step:9441/10000 train_time:429439ms step_avg:45.49ms +[2025-09-09 14:22:32] [Rank 0] step:9461/10000 train_time:430259ms step_avg:45.48ms +[2025-09-09 14:22:32] [Rank 0] step:9461/10000 train_time:430259ms step_avg:45.48ms +[2025-09-09 14:22:33] [Rank 0] step:9481/10000 train_time:431079ms step_avg:45.47ms +[2025-09-09 14:22:33] [Rank 0] step:9481/10000 train_time:431079ms step_avg:45.47ms +[2025-09-09 14:22:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:22:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:22:34] [Rank 0] PRINT: step:9500/10000 train_loss:0.6081 val_loss:0.6051 train_time:431902ms step_avg:45.46ms +[2025-09-09 14:22:34] [Rank 0] PRINT: step:9500/10000 train_loss:0.6081 val_loss:0.6051 train_time:431902ms step_avg:45.46ms +[2025-09-09 14:22:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:22:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:22:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:22:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:23:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:23:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:23:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:23:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:23:55] [Rank 0] Total Loss: 4.9585 +[2025-09-09 14:23:55] [Rank 0] Total Loss: 4.9585 +[2025-09-09 14:23:55] [Rank 0] Total FTA (Unweighted): 0.9988 +[2025-09-09 14:23:55] [Rank 0] Total FTA (Unweighted): 0.9988 +[2025-09-09 14:23:55] [Rank 0] Total FTA (Weighted): 0.9988 +[2025-09-09 14:23:55] [Rank 0] Total FTA (Weighted): 0.9988 +[2025-09-09 14:23:55] [Rank 0] Group 0 Loss: 4.9784 +[2025-09-09 14:23:55] [Rank 0] Group 0 Loss: 4.9784 +[2025-09-09 14:23:55] [Rank 0] Group 1 Loss: 4.6566 +[2025-09-09 14:23:55] [Rank 0] Group 1 Loss: 4.6566 +[2025-09-09 14:23:55] [Rank 0] Group 2 Loss: 4.3871 +[2025-09-09 14:23:55] [Rank 0] Group 2 Loss: 4.3871 +[2025-09-09 14:23:55] [Rank 0] Group 3 Loss: 4.8903 +[2025-09-09 14:23:55] [Rank 0] Group 3 Loss: 4.8903 +[2025-09-09 14:23:55] [Rank 0] Group 4 Loss: 4.8159 +[2025-09-09 14:23:55] [Rank 0] Group 4 Loss: 4.8159 +[2025-09-09 14:23:55] [Rank 0] Group 5 Loss: 4.8735 +[2025-09-09 14:23:55] [Rank 0] Group 5 Loss: 4.8735 +[2025-09-09 14:23:55] [Rank 0] Group 6 Loss: 4.7963 +[2025-09-09 14:23:55] [Rank 0] Group 6 Loss: 4.7963 +[2025-09-09 14:23:55] [Rank 0] Group 7 Loss: 4.8787 +[2025-09-09 14:23:55] [Rank 0] Group 7 Loss: 4.8787 +[2025-09-09 14:23:55] [Rank 0] Group 8 Loss: 5.0206 +[2025-09-09 14:23:55] [Rank 0] Group 8 Loss: 5.0206 +[2025-09-09 14:23:55] [Rank 0] Group 9 Loss: 5.0077 +[2025-09-09 14:23:55] [Rank 0] Group 9 Loss: 5.0077 +[2025-09-09 14:23:55] [Rank 0] Group 10 Loss: 5.1392 +[2025-09-09 14:23:55] [Rank 0] Group 10 Loss: 5.1392 +[2025-09-09 14:23:55] [Rank 0] Group 11 Loss: 5.1705 +[2025-09-09 14:23:55] [Rank 0] Group 11 Loss: 5.1705 +[2025-09-09 14:23:55] [Rank 0] Group 12 Loss: 5.1498 +[2025-09-09 14:23:55] [Rank 0] Group 12 Loss: 5.1498 +[2025-09-09 14:23:55] [Rank 0] Group 13 Loss: 5.2116 +[2025-09-09 14:23:55] [Rank 0] Group 13 Loss: 5.2116 +[2025-09-09 14:23:55] [Rank 0] Group 14 Loss: 5.1709 +[2025-09-09 14:23:55] [Rank 0] Group 14 Loss: 5.1709 +[2025-09-09 14:23:55] [Rank 0] Group 15 Loss: 5.1884 +[2025-09-09 14:23:55] [Rank 0] Group 15 Loss: 5.1884 +[2025-09-09 14:23:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 14:23:55] [Rank 0] Group 15 FTA: 0.9800 +[2025-09-09 14:23:55] [Rank 0] Group 15 FTA: 0.9800 +[2025-09-09 14:23:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:23:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:23:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:23:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:23:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:23:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:23:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:23:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:23:56] [Rank 0] step:9501/10000 train_time:431920ms step_avg:45.46ms +[2025-09-09 14:23:56] [Rank 0] step:9501/10000 train_time:431920ms step_avg:45.46ms +[2025-09-09 14:23:57] [Rank 0] step:9521/10000 train_time:432738ms step_avg:45.45ms +[2025-09-09 14:23:57] [Rank 0] step:9521/10000 train_time:432738ms step_avg:45.45ms +[2025-09-09 14:23:58] [Rank 0] step:9541/10000 train_time:433558ms step_avg:45.44ms +[2025-09-09 14:23:58] [Rank 0] step:9541/10000 train_time:433558ms step_avg:45.44ms +[2025-09-09 14:23:59] [Rank 0] step:9561/10000 train_time:434379ms step_avg:45.43ms +[2025-09-09 14:23:59] [Rank 0] step:9561/10000 train_time:434379ms step_avg:45.43ms +[2025-09-09 14:24:00] [Rank 0] step:9581/10000 train_time:435200ms step_avg:45.42ms +[2025-09-09 14:24:00] [Rank 0] step:9581/10000 train_time:435200ms step_avg:45.42ms +[2025-09-09 14:24:00] [Rank 0] step:9601/10000 train_time:436020ms step_avg:45.41ms +[2025-09-09 14:24:00] [Rank 0] step:9601/10000 train_time:436020ms step_avg:45.41ms +[2025-09-09 14:24:01] [Rank 0] step:9621/10000 train_time:436841ms step_avg:45.40ms +[2025-09-09 14:24:01] [Rank 0] step:9621/10000 train_time:436841ms step_avg:45.40ms +[2025-09-09 14:24:02] [Rank 0] step:9641/10000 train_time:437661ms step_avg:45.40ms +[2025-09-09 14:24:02] [Rank 0] step:9641/10000 train_time:437661ms step_avg:45.40ms +[2025-09-09 14:24:03] [Rank 0] step:9661/10000 train_time:438761ms step_avg:45.42ms +[2025-09-09 14:24:03] [Rank 0] step:9661/10000 train_time:438761ms step_avg:45.42ms +[2025-09-09 14:24:04] [Rank 0] step:9681/10000 train_time:439581ms step_avg:45.41ms +[2025-09-09 14:24:04] [Rank 0] step:9681/10000 train_time:439581ms step_avg:45.41ms +[2025-09-09 14:24:05] [Rank 0] step:9701/10000 train_time:440400ms step_avg:45.40ms +[2025-09-09 14:24:05] [Rank 0] step:9701/10000 train_time:440400ms step_avg:45.40ms +[2025-09-09 14:24:06] [Rank 0] step:9721/10000 train_time:441219ms step_avg:45.39ms +[2025-09-09 14:24:06] [Rank 0] step:9721/10000 train_time:441219ms step_avg:45.39ms +[2025-09-09 14:24:06] [Rank 0] step:9741/10000 train_time:442038ms step_avg:45.38ms +[2025-09-09 14:24:06] [Rank 0] step:9741/10000 train_time:442038ms step_avg:45.38ms +[2025-09-09 14:24:07] [Rank 0] step:9761/10000 train_time:442857ms step_avg:45.37ms +[2025-09-09 14:24:07] [Rank 0] step:9761/10000 train_time:442857ms step_avg:45.37ms +[2025-09-09 14:24:08] [Rank 0] step:9781/10000 train_time:443676ms step_avg:45.36ms +[2025-09-09 14:24:08] [Rank 0] step:9781/10000 train_time:443676ms step_avg:45.36ms +[2025-09-09 14:24:09] [Rank 0] step:9801/10000 train_time:444495ms step_avg:45.35ms +[2025-09-09 14:24:09] [Rank 0] step:9801/10000 train_time:444495ms step_avg:45.35ms +[2025-09-09 14:24:10] [Rank 0] step:9821/10000 train_time:445315ms step_avg:45.34ms +[2025-09-09 14:24:10] [Rank 0] step:9821/10000 train_time:445315ms step_avg:45.34ms +[2025-09-09 14:24:11] [Rank 0] step:9841/10000 train_time:446134ms step_avg:45.33ms +[2025-09-09 14:24:11] [Rank 0] step:9841/10000 train_time:446134ms step_avg:45.33ms +[2025-09-09 14:24:11] [Rank 0] step:9861/10000 train_time:446954ms step_avg:45.33ms +[2025-09-09 14:24:11] [Rank 0] step:9861/10000 train_time:446954ms step_avg:45.33ms +[2025-09-09 14:24:12] [Rank 0] step:9881/10000 train_time:447774ms step_avg:45.32ms +[2025-09-09 14:24:12] [Rank 0] step:9881/10000 train_time:447774ms step_avg:45.32ms +[2025-09-09 14:24:13] [Rank 0] step:9901/10000 train_time:448594ms step_avg:45.31ms +[2025-09-09 14:24:13] [Rank 0] step:9901/10000 train_time:448594ms step_avg:45.31ms +[2025-09-09 14:24:14] [Rank 0] step:9921/10000 train_time:449414ms step_avg:45.30ms +[2025-09-09 14:24:14] [Rank 0] step:9921/10000 train_time:449414ms step_avg:45.30ms +[2025-09-09 14:24:15] [Rank 0] step:9941/10000 train_time:450234ms step_avg:45.29ms +[2025-09-09 14:24:15] [Rank 0] step:9941/10000 train_time:450234ms step_avg:45.29ms +[2025-09-09 14:24:15] [Rank 0] step:9961/10000 train_time:451054ms step_avg:45.28ms +[2025-09-09 14:24:15] [Rank 0] step:9961/10000 train_time:451054ms step_avg:45.28ms +[2025-09-09 14:24:16] [Rank 0] step:9981/10000 train_time:451874ms step_avg:45.27ms +[2025-09-09 14:24:16] [Rank 0] step:9981/10000 train_time:451874ms step_avg:45.27ms +[2025-09-09 14:24:17] [Rank 0] step:10000/10000 train_time:452653ms step_avg:45.27ms +[2025-09-09 14:24:17] [Rank 0] step:10000/10000 train_time:452653ms step_avg:45.27ms +[2025-09-09 14:24:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:24:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:24:18] [Rank 0] PRINT: step:10000/10000 train_loss:0.6070 val_loss:0.6044 train_time:452700ms step_avg:45.27ms +[2025-09-09 14:24:18] [Rank 0] PRINT: step:10000/10000 train_loss:0.6070 val_loss:0.6044 train_time:452700ms step_avg:45.27ms +[2025-09-09 14:24:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:24:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:24:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:24:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:25:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:25:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:25:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:25:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:25:39] [Rank 0] Total Loss: 4.9396 +[2025-09-09 14:25:39] [Rank 0] Total Loss: 4.9396 +[2025-09-09 14:25:39] [Rank 0] Total FTA (Unweighted): 1.0000 +[2025-09-09 14:25:39] [Rank 0] Total FTA (Unweighted): 1.0000 +[2025-09-09 14:25:39] [Rank 0] Total FTA (Weighted): 1.0000 +[2025-09-09 14:25:39] [Rank 0] Total FTA (Weighted): 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 0 Loss: 4.8687 +[2025-09-09 14:25:39] [Rank 0] Group 0 Loss: 4.8687 +[2025-09-09 14:25:39] [Rank 0] Group 1 Loss: 4.6028 +[2025-09-09 14:25:39] [Rank 0] Group 1 Loss: 4.6028 +[2025-09-09 14:25:39] [Rank 0] Group 2 Loss: 4.3561 +[2025-09-09 14:25:39] [Rank 0] Group 2 Loss: 4.3561 +[2025-09-09 14:25:39] [Rank 0] Group 3 Loss: 4.8651 +[2025-09-09 14:25:39] [Rank 0] Group 3 Loss: 4.8651 +[2025-09-09 14:25:39] [Rank 0] Group 4 Loss: 4.8286 +[2025-09-09 14:25:39] [Rank 0] Group 4 Loss: 4.8286 +[2025-09-09 14:25:39] [Rank 0] Group 5 Loss: 4.8610 +[2025-09-09 14:25:39] [Rank 0] Group 5 Loss: 4.8610 +[2025-09-09 14:25:39] [Rank 0] Group 6 Loss: 4.7956 +[2025-09-09 14:25:39] [Rank 0] Group 6 Loss: 4.7956 +[2025-09-09 14:25:39] [Rank 0] Group 7 Loss: 4.8585 +[2025-09-09 14:25:39] [Rank 0] Group 7 Loss: 4.8585 +[2025-09-09 14:25:39] [Rank 0] Group 8 Loss: 5.0089 +[2025-09-09 14:25:39] [Rank 0] Group 8 Loss: 5.0089 +[2025-09-09 14:25:39] [Rank 0] Group 9 Loss: 5.0092 +[2025-09-09 14:25:39] [Rank 0] Group 9 Loss: 5.0092 +[2025-09-09 14:25:39] [Rank 0] Group 10 Loss: 5.1285 +[2025-09-09 14:25:39] [Rank 0] Group 10 Loss: 5.1285 +[2025-09-09 14:25:39] [Rank 0] Group 11 Loss: 5.1712 +[2025-09-09 14:25:39] [Rank 0] Group 11 Loss: 5.1712 +[2025-09-09 14:25:39] [Rank 0] Group 12 Loss: 5.1321 +[2025-09-09 14:25:39] [Rank 0] Group 12 Loss: 5.1321 +[2025-09-09 14:25:39] [Rank 0] Group 13 Loss: 5.1925 +[2025-09-09 14:25:39] [Rank 0] Group 13 Loss: 5.1925 +[2025-09-09 14:25:39] [Rank 0] Group 14 Loss: 5.1714 +[2025-09-09 14:25:39] [Rank 0] Group 14 Loss: 5.1714 +[2025-09-09 14:25:39] [Rank 0] Group 15 Loss: 5.1837 +[2025-09-09 14:25:39] [Rank 0] Group 15 Loss: 5.1837 +[2025-09-09 14:25:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 15 FTA: 1.0000 +[2025-09-09 14:25:39] [Rank 0] Group 15 FTA: 1.0000 +[2025-09-09 14:25:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:25:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_loss_curves.png +[2025-09-09 14:25:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:25:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/per_class_acc_curves.png +[2025-09-09 14:25:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:25:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_loss_curve.png +[2025-09-09 14:25:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:25:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_43/total_acc_curve.png +[2025-09-09 14:25:41] [Rank 0] step:10001/10000 train_time:452720ms step_avg:45.27ms +[2025-09-09 14:25:41] [Rank 0] step:10001/10000 train_time:452720ms step_avg:45.27ms +[2025-09-09 14:25:41] [Rank 0] PRINT: --- Training Finished: Tue Sep 9 14:25:41 2025 --- +[2025-09-09 14:25:41] [Rank 0] PRINT: --- Training Finished: Tue Sep 9 14:25:41 2025 --- +[2025-09-09 14:25:41] [Rank 0] PRINT: Peak memory allocated: 3880 MiB reserved: 4808 MiB +[2025-09-09 14:25:41] [Rank 0] PRINT: Peak memory allocated: 3880 MiB reserved: 4808 MiB diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/config.json b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..74a912f10736c211358b3b25ee428fa6627e5bfd --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 7, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.001, + "adam_lr": 0.002, + "base_dir": "logs_qa_muon_gated/diff_mode", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "48428ecc-019b-4c23-8f6c-a6a8e88b3361", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/fixed_eval_indices.json b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..4f0a630c9c605c8d100ea25e361aa0efd2bbdc28 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92c68ea34fb6c0fb809c659d7984fb7da94f21718eef0f19d6047ff3eecf1f95 +size 329089 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..c4df5ab24733c147db52e8143eb80a8be21ccf49 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:612a51205b37d4075f647dd2f98a0f6c6dac120260f245336977bfc458df57a4 +size 458612 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..6c2bd65a4bc019c2078022659fe3a4fa2fd6b62f --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4300269290d347e95582be52ab46b070cad52607aa31820b1ee97dd7a46464c2 +size 94499 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..0e8a5daea9877755484b40a5f421b43b204d745b --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ea814ef58b82ef8eac6d0701b8d89ef2ec48685a243e7858e4f2ac307113141 +size 108442 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/training_log_48428ecc-019b-4c23-8f6c-a6a8e88b3361.txt b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/training_log_48428ecc-019b-4c23-8f6c-a6a8e88b3361.txt new file mode 100644 index 0000000000000000000000000000000000000000..af55ca9a65399344f27c494291bf400930dc02c0 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/training_log_48428ecc-019b-4c23-8f6c-a6a8e88b3361.txt @@ -0,0 +1,5618 @@ +[2025-09-09 14:26:06] [Rank 0] PRINT: --- Script Start: Tue Sep 9 14:26:06 2025 --- +[2025-09-09 14:26:06] [Rank 0] PRINT: --- Script Start: Tue Sep 9 14:26:06 2025 --- +[2025-09-09 14:26:06] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=7, model_parameterization='gated', per_group_k=100, muon_lr=0.001, adam_lr=0.002, base_dir='logs_qa_muon_gated/diff_mode', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-09 14:26:06] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=7, model_parameterization='gated', per_group_k=100, muon_lr=0.001, adam_lr=0.002, base_dir='logs_qa_muon_gated/diff_mode', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-09 14:26:06] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-09 14:26:06] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-09 14:26:06] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-09 14:26:06] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-09 14:26:06] [Rank 0] PRINT: Run directory: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44 +[2025-09-09 14:26:06] [Rank 0] PRINT: Run directory: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44 +[2025-09-09 14:26:06] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + muon_lr = exp_args.muon_lr + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-09 14:26:06] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + muon_lr = exp_args.muon_lr + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-09 14:26:06] [Rank 0] PRINT: Constructing model... +[2025-09-09 14:26:06] [Rank 0] PRINT: Constructing model... +[2025-09-09 14:26:08] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-09 14:26:08] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-09 14:26:08] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-09 14:26:08] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-09 14:26:08] [Rank 0] PRINT: Testing model forward function: +[2025-09-09 14:26:08] [Rank 0] PRINT: Testing model forward function: +[2025-09-09 14:26:12] [Rank 0] PRINT: Model test - Result type: +[2025-09-09 14:26:12] [Rank 0] PRINT: Model test - Result type: +[2025-09-09 14:26:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-09 14:26:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-09 14:26:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-09 14:26:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-09 14:26:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-09 14:26:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-09 14:26:12] [Rank 0] PRINT: Model returns: +[2025-09-09 14:26:12] [Rank 0] PRINT: Model returns: +[2025-09-09 14:26:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-09 14:26:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-09 14:26:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 7 +[2025-09-09 14:26:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 7 +[2025-09-09 14:26:12] [Rank 0] PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: 0.002). +[2025-09-09 14:26:12] [Rank 0] PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: 0.002). +[2025-09-09 14:26:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-09 14:26:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-09 14:26:12] [Rank 0] PRINT: Muon optimizer is active with 48 parameters. +[2025-09-09 14:26:12] [Rank 0] PRINT: Muon optimizer is active with 48 parameters. +[2025-09-09 14:26:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-09 14:26:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-09 14:26:17] [Rank 0] PRINT: Model compilation complete. +[2025-09-09 14:26:17] [Rank 0] PRINT: Model compilation complete. +[2025-09-09 14:26:17] [Rank 0] PRINT: Starting warmup... +[2025-09-09 14:26:17] [Rank 0] PRINT: Starting warmup... +[2025-09-09 14:27:05] [Rank 0] PRINT: Warmup complete. +[2025-09-09 14:27:05] [Rank 0] PRINT: Warmup complete. +[2025-09-09 14:27:05] [Rank 0] PRINT: Starting training... +[2025-09-09 14:27:05] [Rank 0] PRINT: Starting training... +[2025-09-09 14:27:12] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/fixed_eval_indices.json +[2025-09-09 14:27:12] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/fixed_eval_indices.json +[2025-09-09 14:27:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:27:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:27:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-09 14:27:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-09 14:27:50] [Rank 0] step:21/10000 train_time:33561ms step_avg:1598.12ms +[2025-09-09 14:27:50] [Rank 0] step:21/10000 train_time:33561ms step_avg:1598.12ms +[2025-09-09 14:27:51] [Rank 0] step:41/10000 train_time:34376ms step_avg:838.43ms +[2025-09-09 14:27:51] [Rank 0] step:41/10000 train_time:34376ms step_avg:838.43ms +[2025-09-09 14:27:52] [Rank 0] step:61/10000 train_time:35189ms step_avg:576.87ms +[2025-09-09 14:27:52] [Rank 0] step:61/10000 train_time:35189ms step_avg:576.87ms +[2025-09-09 14:27:53] [Rank 0] step:81/10000 train_time:36004ms step_avg:444.49ms +[2025-09-09 14:27:53] [Rank 0] step:81/10000 train_time:36004ms step_avg:444.49ms +[2025-09-09 14:27:53] [Rank 0] step:101/10000 train_time:36819ms step_avg:364.54ms +[2025-09-09 14:27:53] [Rank 0] step:101/10000 train_time:36819ms step_avg:364.54ms +[2025-09-09 14:27:54] [Rank 0] step:121/10000 train_time:37634ms step_avg:311.03ms +[2025-09-09 14:27:54] [Rank 0] step:121/10000 train_time:37634ms step_avg:311.03ms +[2025-09-09 14:27:55] [Rank 0] step:141/10000 train_time:38591ms step_avg:273.69ms +[2025-09-09 14:27:55] [Rank 0] step:141/10000 train_time:38591ms step_avg:273.69ms +[2025-09-09 14:27:56] [Rank 0] step:161/10000 train_time:39719ms step_avg:246.70ms +[2025-09-09 14:27:56] [Rank 0] step:161/10000 train_time:39719ms step_avg:246.70ms +[2025-09-09 14:27:57] [Rank 0] step:181/10000 train_time:40534ms step_avg:223.94ms +[2025-09-09 14:27:57] [Rank 0] step:181/10000 train_time:40534ms step_avg:223.94ms +[2025-09-09 14:27:58] [Rank 0] step:201/10000 train_time:41348ms step_avg:205.71ms +[2025-09-09 14:27:58] [Rank 0] step:201/10000 train_time:41348ms step_avg:205.71ms +[2025-09-09 14:27:59] [Rank 0] step:221/10000 train_time:42163ms step_avg:190.78ms +[2025-09-09 14:27:59] [Rank 0] step:221/10000 train_time:42163ms step_avg:190.78ms +[2025-09-09 14:27:59] [Rank 0] step:241/10000 train_time:42978ms step_avg:178.33ms +[2025-09-09 14:27:59] [Rank 0] step:241/10000 train_time:42978ms step_avg:178.33ms +[2025-09-09 14:28:00] [Rank 0] step:261/10000 train_time:43793ms step_avg:167.79ms +[2025-09-09 14:28:00] [Rank 0] step:261/10000 train_time:43793ms step_avg:167.79ms +[2025-09-09 14:28:01] [Rank 0] step:281/10000 train_time:44609ms step_avg:158.75ms +[2025-09-09 14:28:01] [Rank 0] step:281/10000 train_time:44609ms step_avg:158.75ms +[2025-09-09 14:28:02] [Rank 0] step:301/10000 train_time:45424ms step_avg:150.91ms +[2025-09-09 14:28:02] [Rank 0] step:301/10000 train_time:45424ms step_avg:150.91ms +[2025-09-09 14:28:03] [Rank 0] step:321/10000 train_time:46240ms step_avg:144.05ms +[2025-09-09 14:28:03] [Rank 0] step:321/10000 train_time:46240ms step_avg:144.05ms +[2025-09-09 14:28:04] [Rank 0] step:341/10000 train_time:47056ms step_avg:137.99ms +[2025-09-09 14:28:04] [Rank 0] step:341/10000 train_time:47056ms step_avg:137.99ms +[2025-09-09 14:28:04] [Rank 0] step:361/10000 train_time:47871ms step_avg:132.61ms +[2025-09-09 14:28:04] [Rank 0] step:361/10000 train_time:47871ms step_avg:132.61ms +[2025-09-09 14:28:05] [Rank 0] step:381/10000 train_time:48686ms step_avg:127.79ms +[2025-09-09 14:28:05] [Rank 0] step:381/10000 train_time:48686ms step_avg:127.79ms +[2025-09-09 14:28:06] [Rank 0] step:401/10000 train_time:49502ms step_avg:123.45ms +[2025-09-09 14:28:06] [Rank 0] step:401/10000 train_time:49502ms step_avg:123.45ms +[2025-09-09 14:28:07] [Rank 0] step:421/10000 train_time:50317ms step_avg:119.52ms +[2025-09-09 14:28:07] [Rank 0] step:421/10000 train_time:50317ms step_avg:119.52ms +[2025-09-09 14:28:08] [Rank 0] step:441/10000 train_time:51130ms step_avg:115.94ms +[2025-09-09 14:28:08] [Rank 0] step:441/10000 train_time:51130ms step_avg:115.94ms +[2025-09-09 14:28:08] [Rank 0] step:461/10000 train_time:51943ms step_avg:112.68ms +[2025-09-09 14:28:08] [Rank 0] step:461/10000 train_time:51943ms step_avg:112.68ms +[2025-09-09 14:28:09] [Rank 0] step:481/10000 train_time:52757ms step_avg:109.68ms +[2025-09-09 14:28:09] [Rank 0] step:481/10000 train_time:52757ms step_avg:109.68ms +[2025-09-09 14:28:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:28:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:28:11] [Rank 0] PRINT: step:500/10000 train_loss:2.9219 val_loss:1.0504 train_time:53573ms step_avg:107.15ms +[2025-09-09 14:28:11] [Rank 0] PRINT: step:500/10000 train_loss:2.9219 val_loss:1.0504 train_time:53573ms step_avg:107.15ms +[2025-09-09 14:28:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:28:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:28:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:28:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:29:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:29:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:29:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:29:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:29:32] [Rank 0] Total Loss: 3.8785 +[2025-09-09 14:29:32] [Rank 0] Total Loss: 3.8785 +[2025-09-09 14:29:32] [Rank 0] Total FTA (Unweighted): 0.5369 +[2025-09-09 14:29:32] [Rank 0] Total FTA (Unweighted): 0.5369 +[2025-09-09 14:29:32] [Rank 0] Total FTA (Weighted): 0.5369 +[2025-09-09 14:29:32] [Rank 0] Total FTA (Weighted): 0.5369 +[2025-09-09 14:29:32] [Rank 0] Group 0 Loss: 3.4286 +[2025-09-09 14:29:32] [Rank 0] Group 0 Loss: 3.4286 +[2025-09-09 14:29:32] [Rank 0] Group 1 Loss: 3.2577 +[2025-09-09 14:29:32] [Rank 0] Group 1 Loss: 3.2577 +[2025-09-09 14:29:32] [Rank 0] Group 2 Loss: 3.1675 +[2025-09-09 14:29:32] [Rank 0] Group 2 Loss: 3.1675 +[2025-09-09 14:29:32] [Rank 0] Group 3 Loss: 3.4835 +[2025-09-09 14:29:32] [Rank 0] Group 3 Loss: 3.4835 +[2025-09-09 14:29:32] [Rank 0] Group 4 Loss: 3.5083 +[2025-09-09 14:29:32] [Rank 0] Group 4 Loss: 3.5083 +[2025-09-09 14:29:32] [Rank 0] Group 5 Loss: 3.5606 +[2025-09-09 14:29:32] [Rank 0] Group 5 Loss: 3.5606 +[2025-09-09 14:29:32] [Rank 0] Group 6 Loss: 3.5620 +[2025-09-09 14:29:32] [Rank 0] Group 6 Loss: 3.5620 +[2025-09-09 14:29:32] [Rank 0] Group 7 Loss: 3.7078 +[2025-09-09 14:29:32] [Rank 0] Group 7 Loss: 3.7078 +[2025-09-09 14:29:32] [Rank 0] Group 8 Loss: 3.9399 +[2025-09-09 14:29:32] [Rank 0] Group 8 Loss: 3.9399 +[2025-09-09 14:29:32] [Rank 0] Group 9 Loss: 4.0377 +[2025-09-09 14:29:32] [Rank 0] Group 9 Loss: 4.0377 +[2025-09-09 14:29:32] [Rank 0] Group 10 Loss: 4.2113 +[2025-09-09 14:29:32] [Rank 0] Group 10 Loss: 4.2113 +[2025-09-09 14:29:32] [Rank 0] Group 11 Loss: 4.2611 +[2025-09-09 14:29:32] [Rank 0] Group 11 Loss: 4.2611 +[2025-09-09 14:29:32] [Rank 0] Group 12 Loss: 4.4093 +[2025-09-09 14:29:32] [Rank 0] Group 12 Loss: 4.4093 +[2025-09-09 14:29:32] [Rank 0] Group 13 Loss: 4.5091 +[2025-09-09 14:29:32] [Rank 0] Group 13 Loss: 4.5091 +[2025-09-09 14:29:32] [Rank 0] Group 14 Loss: 4.4837 +[2025-09-09 14:29:32] [Rank 0] Group 14 Loss: 4.4837 +[2025-09-09 14:29:32] [Rank 0] Group 15 Loss: 4.5273 +[2025-09-09 14:29:32] [Rank 0] Group 15 Loss: 4.5273 +[2025-09-09 14:29:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:29:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:29:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:29:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:29:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:29:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:29:32] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:29:32] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:29:32] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:29:32] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:29:32] [Rank 0] Group 5 FTA: 0.9000 +[2025-09-09 14:29:32] [Rank 0] Group 5 FTA: 0.9000 +[2025-09-09 14:29:32] [Rank 0] Group 6 FTA: 0.6100 +[2025-09-09 14:29:32] [Rank 0] Group 6 FTA: 0.6100 +[2025-09-09 14:29:32] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-09 14:29:32] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-09 14:29:32] [Rank 0] Group 8 FTA: 0.5200 +[2025-09-09 14:29:32] [Rank 0] Group 8 FTA: 0.5200 +[2025-09-09 14:29:32] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-09 14:29:32] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-09 14:29:32] [Rank 0] Group 10 FTA: 0.2900 +[2025-09-09 14:29:32] [Rank 0] Group 10 FTA: 0.2900 +[2025-09-09 14:29:32] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-09 14:29:32] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-09 14:29:32] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-09 14:29:32] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-09 14:29:32] [Rank 0] Group 13 FTA: 0.0800 +[2025-09-09 14:29:32] [Rank 0] Group 13 FTA: 0.0800 +[2025-09-09 14:29:32] [Rank 0] Group 14 FTA: 0.0800 +[2025-09-09 14:29:32] [Rank 0] Group 14 FTA: 0.0800 +[2025-09-09 14:29:32] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-09 14:29:32] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-09 14:29:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:29:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:29:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:29:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:29:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:29:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:29:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:29:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:29:34] [Rank 0] step:501/10000 train_time:53589ms step_avg:106.96ms +[2025-09-09 14:29:34] [Rank 0] step:501/10000 train_time:53589ms step_avg:106.96ms +[2025-09-09 14:29:35] [Rank 0] step:521/10000 train_time:54411ms step_avg:104.44ms +[2025-09-09 14:29:35] [Rank 0] step:521/10000 train_time:54411ms step_avg:104.44ms +[2025-09-09 14:29:35] [Rank 0] step:541/10000 train_time:55223ms step_avg:102.08ms +[2025-09-09 14:29:35] [Rank 0] step:541/10000 train_time:55223ms step_avg:102.08ms +[2025-09-09 14:29:36] [Rank 0] step:561/10000 train_time:56035ms step_avg:99.88ms +[2025-09-09 14:29:36] [Rank 0] step:561/10000 train_time:56035ms step_avg:99.88ms +[2025-09-09 14:29:37] [Rank 0] step:581/10000 train_time:56847ms step_avg:97.84ms +[2025-09-09 14:29:37] [Rank 0] step:581/10000 train_time:56847ms step_avg:97.84ms +[2025-09-09 14:29:38] [Rank 0] step:601/10000 train_time:57659ms step_avg:95.94ms +[2025-09-09 14:29:38] [Rank 0] step:601/10000 train_time:57659ms step_avg:95.94ms +[2025-09-09 14:29:39] [Rank 0] step:621/10000 train_time:58471ms step_avg:94.16ms +[2025-09-09 14:29:39] [Rank 0] step:621/10000 train_time:58471ms step_avg:94.16ms +[2025-09-09 14:29:40] [Rank 0] step:641/10000 train_time:59282ms step_avg:92.48ms +[2025-09-09 14:29:40] [Rank 0] step:641/10000 train_time:59282ms step_avg:92.48ms +[2025-09-09 14:29:40] [Rank 0] step:661/10000 train_time:60094ms step_avg:90.91ms +[2025-09-09 14:29:40] [Rank 0] step:661/10000 train_time:60094ms step_avg:90.91ms +[2025-09-09 14:29:41] [Rank 0] step:681/10000 train_time:60906ms step_avg:89.44ms +[2025-09-09 14:29:41] [Rank 0] step:681/10000 train_time:60906ms step_avg:89.44ms +[2025-09-09 14:29:42] [Rank 0] step:701/10000 train_time:61717ms step_avg:88.04ms +[2025-09-09 14:29:42] [Rank 0] step:701/10000 train_time:61717ms step_avg:88.04ms +[2025-09-09 14:29:43] [Rank 0] step:721/10000 train_time:62530ms step_avg:86.73ms +[2025-09-09 14:29:43] [Rank 0] step:721/10000 train_time:62530ms step_avg:86.73ms +[2025-09-09 14:29:44] [Rank 0] step:741/10000 train_time:63343ms step_avg:85.48ms +[2025-09-09 14:29:44] [Rank 0] step:741/10000 train_time:63343ms step_avg:85.48ms +[2025-09-09 14:29:44] [Rank 0] step:761/10000 train_time:64160ms step_avg:84.31ms +[2025-09-09 14:29:44] [Rank 0] step:761/10000 train_time:64160ms step_avg:84.31ms +[2025-09-09 14:29:45] [Rank 0] step:781/10000 train_time:64978ms step_avg:83.20ms +[2025-09-09 14:29:45] [Rank 0] step:781/10000 train_time:64978ms step_avg:83.20ms +[2025-09-09 14:29:46] [Rank 0] step:801/10000 train_time:65796ms step_avg:82.14ms +[2025-09-09 14:29:46] [Rank 0] step:801/10000 train_time:65796ms step_avg:82.14ms +[2025-09-09 14:29:48] [Rank 0] step:821/10000 train_time:67299ms step_avg:81.97ms +[2025-09-09 14:29:48] [Rank 0] step:821/10000 train_time:67299ms step_avg:81.97ms +[2025-09-09 14:29:48] [Rank 0] step:841/10000 train_time:68117ms step_avg:81.00ms +[2025-09-09 14:29:48] [Rank 0] step:841/10000 train_time:68117ms step_avg:81.00ms +[2025-09-09 14:29:49] [Rank 0] step:861/10000 train_time:68934ms step_avg:80.06ms +[2025-09-09 14:29:49] [Rank 0] step:861/10000 train_time:68934ms step_avg:80.06ms +[2025-09-09 14:29:50] [Rank 0] step:881/10000 train_time:69751ms step_avg:79.17ms +[2025-09-09 14:29:50] [Rank 0] step:881/10000 train_time:69751ms step_avg:79.17ms +[2025-09-09 14:29:51] [Rank 0] step:901/10000 train_time:70567ms step_avg:78.32ms +[2025-09-09 14:29:51] [Rank 0] step:901/10000 train_time:70567ms step_avg:78.32ms +[2025-09-09 14:29:52] [Rank 0] step:921/10000 train_time:71387ms step_avg:77.51ms +[2025-09-09 14:29:52] [Rank 0] step:921/10000 train_time:71387ms step_avg:77.51ms +[2025-09-09 14:29:52] [Rank 0] step:941/10000 train_time:72203ms step_avg:76.73ms +[2025-09-09 14:29:52] [Rank 0] step:941/10000 train_time:72203ms step_avg:76.73ms +[2025-09-09 14:29:53] [Rank 0] step:961/10000 train_time:73020ms step_avg:75.98ms +[2025-09-09 14:29:53] [Rank 0] step:961/10000 train_time:73020ms step_avg:75.98ms +[2025-09-09 14:29:54] [Rank 0] step:981/10000 train_time:73837ms step_avg:75.27ms +[2025-09-09 14:29:54] [Rank 0] step:981/10000 train_time:73837ms step_avg:75.27ms +[2025-09-09 14:29:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:29:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:29:55] [Rank 0] PRINT: step:1000/10000 train_loss:0.9210 val_loss:0.8219 train_time:74666ms step_avg:74.67ms +[2025-09-09 14:29:55] [Rank 0] PRINT: step:1000/10000 train_loss:0.9210 val_loss:0.8219 train_time:74666ms step_avg:74.67ms +[2025-09-09 14:29:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:29:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:29:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:29:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:31:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:31:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:31:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:31:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:31:17] [Rank 0] Total Loss: 4.2434 +[2025-09-09 14:31:17] [Rank 0] Total Loss: 4.2434 +[2025-09-09 14:31:17] [Rank 0] Total FTA (Unweighted): 0.7175 +[2025-09-09 14:31:17] [Rank 0] Total FTA (Unweighted): 0.7175 +[2025-09-09 14:31:17] [Rank 0] Total FTA (Weighted): 0.7175 +[2025-09-09 14:31:17] [Rank 0] Total FTA (Weighted): 0.7175 +[2025-09-09 14:31:17] [Rank 0] Group 0 Loss: 4.0966 +[2025-09-09 14:31:17] [Rank 0] Group 0 Loss: 4.0966 +[2025-09-09 14:31:17] [Rank 0] Group 1 Loss: 3.7122 +[2025-09-09 14:31:17] [Rank 0] Group 1 Loss: 3.7122 +[2025-09-09 14:31:17] [Rank 0] Group 2 Loss: 3.6553 +[2025-09-09 14:31:17] [Rank 0] Group 2 Loss: 3.6553 +[2025-09-09 14:31:17] [Rank 0] Group 3 Loss: 4.0746 +[2025-09-09 14:31:17] [Rank 0] Group 3 Loss: 4.0746 +[2025-09-09 14:31:17] [Rank 0] Group 4 Loss: 4.0503 +[2025-09-09 14:31:17] [Rank 0] Group 4 Loss: 4.0503 +[2025-09-09 14:31:17] [Rank 0] Group 5 Loss: 4.0244 +[2025-09-09 14:31:17] [Rank 0] Group 5 Loss: 4.0244 +[2025-09-09 14:31:17] [Rank 0] Group 6 Loss: 3.9439 +[2025-09-09 14:31:17] [Rank 0] Group 6 Loss: 3.9439 +[2025-09-09 14:31:17] [Rank 0] Group 7 Loss: 4.0662 +[2025-09-09 14:31:17] [Rank 0] Group 7 Loss: 4.0662 +[2025-09-09 14:31:17] [Rank 0] Group 8 Loss: 4.2685 +[2025-09-09 14:31:17] [Rank 0] Group 8 Loss: 4.2685 +[2025-09-09 14:31:17] [Rank 0] Group 9 Loss: 4.2539 +[2025-09-09 14:31:17] [Rank 0] Group 9 Loss: 4.2539 +[2025-09-09 14:31:17] [Rank 0] Group 10 Loss: 4.4101 +[2025-09-09 14:31:17] [Rank 0] Group 10 Loss: 4.4101 +[2025-09-09 14:31:17] [Rank 0] Group 11 Loss: 4.4756 +[2025-09-09 14:31:17] [Rank 0] Group 11 Loss: 4.4756 +[2025-09-09 14:31:17] [Rank 0] Group 12 Loss: 4.5911 +[2025-09-09 14:31:17] [Rank 0] Group 12 Loss: 4.5911 +[2025-09-09 14:31:17] [Rank 0] Group 13 Loss: 4.6979 +[2025-09-09 14:31:17] [Rank 0] Group 13 Loss: 4.6979 +[2025-09-09 14:31:17] [Rank 0] Group 14 Loss: 4.7108 +[2025-09-09 14:31:17] [Rank 0] Group 14 Loss: 4.7108 +[2025-09-09 14:31:17] [Rank 0] Group 15 Loss: 4.8625 +[2025-09-09 14:31:17] [Rank 0] Group 15 Loss: 4.8625 +[2025-09-09 14:31:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:31:17] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-09 14:31:17] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-09 14:31:17] [Rank 0] Group 8 FTA: 0.8500 +[2025-09-09 14:31:17] [Rank 0] Group 8 FTA: 0.8500 +[2025-09-09 14:31:17] [Rank 0] Group 9 FTA: 0.6900 +[2025-09-09 14:31:17] [Rank 0] Group 9 FTA: 0.6900 +[2025-09-09 14:31:17] [Rank 0] Group 10 FTA: 0.7000 +[2025-09-09 14:31:17] [Rank 0] Group 10 FTA: 0.7000 +[2025-09-09 14:31:17] [Rank 0] Group 11 FTA: 0.4700 +[2025-09-09 14:31:17] [Rank 0] Group 11 FTA: 0.4700 +[2025-09-09 14:31:17] [Rank 0] Group 12 FTA: 0.3200 +[2025-09-09 14:31:17] [Rank 0] Group 12 FTA: 0.3200 +[2025-09-09 14:31:17] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-09 14:31:17] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-09 14:31:17] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-09 14:31:17] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-09 14:31:17] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-09 14:31:17] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-09 14:31:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:31:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:31:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:31:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:31:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:31:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:31:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:31:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:31:19] [Rank 0] step:1001/10000 train_time:74682ms step_avg:74.61ms +[2025-09-09 14:31:19] [Rank 0] step:1001/10000 train_time:74682ms step_avg:74.61ms +[2025-09-09 14:31:20] [Rank 0] step:1021/10000 train_time:75511ms step_avg:73.96ms +[2025-09-09 14:31:20] [Rank 0] step:1021/10000 train_time:75511ms step_avg:73.96ms +[2025-09-09 14:31:20] [Rank 0] step:1041/10000 train_time:76328ms step_avg:73.32ms +[2025-09-09 14:31:20] [Rank 0] step:1041/10000 train_time:76328ms step_avg:73.32ms +[2025-09-09 14:31:21] [Rank 0] step:1061/10000 train_time:77145ms step_avg:72.71ms +[2025-09-09 14:31:21] [Rank 0] step:1061/10000 train_time:77145ms step_avg:72.71ms +[2025-09-09 14:31:22] [Rank 0] step:1081/10000 train_time:77962ms step_avg:72.12ms +[2025-09-09 14:31:22] [Rank 0] step:1081/10000 train_time:77962ms step_avg:72.12ms +[2025-09-09 14:31:23] [Rank 0] step:1101/10000 train_time:78782ms step_avg:71.55ms +[2025-09-09 14:31:23] [Rank 0] step:1101/10000 train_time:78782ms step_avg:71.55ms +[2025-09-09 14:31:24] [Rank 0] step:1121/10000 train_time:79599ms step_avg:71.01ms +[2025-09-09 14:31:24] [Rank 0] step:1121/10000 train_time:79599ms step_avg:71.01ms +[2025-09-09 14:31:24] [Rank 0] step:1141/10000 train_time:80417ms step_avg:70.48ms +[2025-09-09 14:31:24] [Rank 0] step:1141/10000 train_time:80417ms step_avg:70.48ms +[2025-09-09 14:31:25] [Rank 0] step:1161/10000 train_time:81234ms step_avg:69.97ms +[2025-09-09 14:31:25] [Rank 0] step:1161/10000 train_time:81234ms step_avg:69.97ms +[2025-09-09 14:31:26] [Rank 0] step:1181/10000 train_time:82051ms step_avg:69.48ms +[2025-09-09 14:31:26] [Rank 0] step:1181/10000 train_time:82051ms step_avg:69.48ms +[2025-09-09 14:31:27] [Rank 0] step:1201/10000 train_time:82867ms step_avg:69.00ms +[2025-09-09 14:31:27] [Rank 0] step:1201/10000 train_time:82867ms step_avg:69.00ms +[2025-09-09 14:31:28] [Rank 0] step:1221/10000 train_time:83686ms step_avg:68.54ms +[2025-09-09 14:31:28] [Rank 0] step:1221/10000 train_time:83686ms step_avg:68.54ms +[2025-09-09 14:31:29] [Rank 0] step:1241/10000 train_time:84503ms step_avg:68.09ms +[2025-09-09 14:31:29] [Rank 0] step:1241/10000 train_time:84503ms step_avg:68.09ms +[2025-09-09 14:31:29] [Rank 0] step:1261/10000 train_time:85320ms step_avg:67.66ms +[2025-09-09 14:31:29] [Rank 0] step:1261/10000 train_time:85320ms step_avg:67.66ms +[2025-09-09 14:31:30] [Rank 0] step:1281/10000 train_time:86138ms step_avg:67.24ms +[2025-09-09 14:31:30] [Rank 0] step:1281/10000 train_time:86138ms step_avg:67.24ms +[2025-09-09 14:31:31] [Rank 0] step:1301/10000 train_time:86955ms step_avg:66.84ms +[2025-09-09 14:31:31] [Rank 0] step:1301/10000 train_time:86955ms step_avg:66.84ms +[2025-09-09 14:31:32] [Rank 0] step:1321/10000 train_time:87773ms step_avg:66.44ms +[2025-09-09 14:31:32] [Rank 0] step:1321/10000 train_time:87773ms step_avg:66.44ms +[2025-09-09 14:31:33] [Rank 0] step:1341/10000 train_time:88591ms step_avg:66.06ms +[2025-09-09 14:31:33] [Rank 0] step:1341/10000 train_time:88591ms step_avg:66.06ms +[2025-09-09 14:31:33] [Rank 0] step:1361/10000 train_time:89408ms step_avg:65.69ms +[2025-09-09 14:31:33] [Rank 0] step:1361/10000 train_time:89408ms step_avg:65.69ms +[2025-09-09 14:31:34] [Rank 0] step:1381/10000 train_time:90226ms step_avg:65.33ms +[2025-09-09 14:31:34] [Rank 0] step:1381/10000 train_time:90226ms step_avg:65.33ms +[2025-09-09 14:31:35] [Rank 0] step:1401/10000 train_time:91042ms step_avg:64.98ms +[2025-09-09 14:31:35] [Rank 0] step:1401/10000 train_time:91042ms step_avg:64.98ms +[2025-09-09 14:31:36] [Rank 0] step:1421/10000 train_time:91859ms step_avg:64.64ms +[2025-09-09 14:31:36] [Rank 0] step:1421/10000 train_time:91859ms step_avg:64.64ms +[2025-09-09 14:31:37] [Rank 0] step:1441/10000 train_time:92676ms step_avg:64.31ms +[2025-09-09 14:31:37] [Rank 0] step:1441/10000 train_time:92676ms step_avg:64.31ms +[2025-09-09 14:31:38] [Rank 0] step:1461/10000 train_time:93494ms step_avg:63.99ms +[2025-09-09 14:31:38] [Rank 0] step:1461/10000 train_time:93494ms step_avg:63.99ms +[2025-09-09 14:31:38] [Rank 0] step:1481/10000 train_time:94313ms step_avg:63.68ms +[2025-09-09 14:31:38] [Rank 0] step:1481/10000 train_time:94313ms step_avg:63.68ms +[2025-09-09 14:31:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:31:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:31:40] [Rank 0] PRINT: step:1500/10000 train_loss:0.7910 val_loss:0.7458 train_time:95133ms step_avg:63.42ms +[2025-09-09 14:31:40] [Rank 0] PRINT: step:1500/10000 train_loss:0.7910 val_loss:0.7458 train_time:95133ms step_avg:63.42ms +[2025-09-09 14:31:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:31:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:31:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:31:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:33:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:33:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:33:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:33:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:33:02] [Rank 0] Total Loss: 4.5694 +[2025-09-09 14:33:02] [Rank 0] Total Loss: 4.5694 +[2025-09-09 14:33:02] [Rank 0] Total FTA (Unweighted): 0.8006 +[2025-09-09 14:33:02] [Rank 0] Total FTA (Unweighted): 0.8006 +[2025-09-09 14:33:02] [Rank 0] Total FTA (Weighted): 0.8006 +[2025-09-09 14:33:02] [Rank 0] Total FTA (Weighted): 0.8006 +[2025-09-09 14:33:02] [Rank 0] Group 0 Loss: 4.5350 +[2025-09-09 14:33:02] [Rank 0] Group 0 Loss: 4.5350 +[2025-09-09 14:33:02] [Rank 0] Group 1 Loss: 4.0813 +[2025-09-09 14:33:02] [Rank 0] Group 1 Loss: 4.0813 +[2025-09-09 14:33:02] [Rank 0] Group 2 Loss: 3.9337 +[2025-09-09 14:33:02] [Rank 0] Group 2 Loss: 3.9337 +[2025-09-09 14:33:02] [Rank 0] Group 3 Loss: 4.4093 +[2025-09-09 14:33:02] [Rank 0] Group 3 Loss: 4.4093 +[2025-09-09 14:33:02] [Rank 0] Group 4 Loss: 4.4109 +[2025-09-09 14:33:02] [Rank 0] Group 4 Loss: 4.4109 +[2025-09-09 14:33:02] [Rank 0] Group 5 Loss: 4.4218 +[2025-09-09 14:33:02] [Rank 0] Group 5 Loss: 4.4218 +[2025-09-09 14:33:02] [Rank 0] Group 6 Loss: 4.3143 +[2025-09-09 14:33:02] [Rank 0] Group 6 Loss: 4.3143 +[2025-09-09 14:33:02] [Rank 0] Group 7 Loss: 4.4645 +[2025-09-09 14:33:02] [Rank 0] Group 7 Loss: 4.4645 +[2025-09-09 14:33:02] [Rank 0] Group 8 Loss: 4.6289 +[2025-09-09 14:33:02] [Rank 0] Group 8 Loss: 4.6289 +[2025-09-09 14:33:02] [Rank 0] Group 9 Loss: 4.5566 +[2025-09-09 14:33:02] [Rank 0] Group 9 Loss: 4.5566 +[2025-09-09 14:33:02] [Rank 0] Group 10 Loss: 4.7217 +[2025-09-09 14:33:02] [Rank 0] Group 10 Loss: 4.7217 +[2025-09-09 14:33:02] [Rank 0] Group 11 Loss: 4.7168 +[2025-09-09 14:33:02] [Rank 0] Group 11 Loss: 4.7168 +[2025-09-09 14:33:02] [Rank 0] Group 12 Loss: 4.8076 +[2025-09-09 14:33:02] [Rank 0] Group 12 Loss: 4.8076 +[2025-09-09 14:33:02] [Rank 0] Group 13 Loss: 4.9549 +[2025-09-09 14:33:02] [Rank 0] Group 13 Loss: 4.9549 +[2025-09-09 14:33:02] [Rank 0] Group 14 Loss: 5.0083 +[2025-09-09 14:33:02] [Rank 0] Group 14 Loss: 5.0083 +[2025-09-09 14:33:02] [Rank 0] Group 15 Loss: 5.1450 +[2025-09-09 14:33:02] [Rank 0] Group 15 Loss: 5.1450 +[2025-09-09 14:33:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:33:02] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-09 14:33:02] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-09 14:33:02] [Rank 0] Group 9 FTA: 0.9000 +[2025-09-09 14:33:02] [Rank 0] Group 9 FTA: 0.9000 +[2025-09-09 14:33:02] [Rank 0] Group 10 FTA: 0.9200 +[2025-09-09 14:33:02] [Rank 0] Group 10 FTA: 0.9200 +[2025-09-09 14:33:02] [Rank 0] Group 11 FTA: 0.8000 +[2025-09-09 14:33:02] [Rank 0] Group 11 FTA: 0.8000 +[2025-09-09 14:33:02] [Rank 0] Group 12 FTA: 0.6300 +[2025-09-09 14:33:02] [Rank 0] Group 12 FTA: 0.6300 +[2025-09-09 14:33:02] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-09 14:33:02] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-09 14:33:02] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-09 14:33:02] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-09 14:33:02] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-09 14:33:02] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-09 14:33:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:33:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:33:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:33:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:33:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:33:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:33:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:33:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:33:03] [Rank 0] step:1501/10000 train_time:95150ms step_avg:63.39ms +[2025-09-09 14:33:03] [Rank 0] step:1501/10000 train_time:95150ms step_avg:63.39ms +[2025-09-09 14:33:04] [Rank 0] step:1521/10000 train_time:95966ms step_avg:63.09ms +[2025-09-09 14:33:04] [Rank 0] step:1521/10000 train_time:95966ms step_avg:63.09ms +[2025-09-09 14:33:05] [Rank 0] step:1541/10000 train_time:96784ms step_avg:62.81ms +[2025-09-09 14:33:05] [Rank 0] step:1541/10000 train_time:96784ms step_avg:62.81ms +[2025-09-09 14:33:06] [Rank 0] step:1561/10000 train_time:97603ms step_avg:62.53ms +[2025-09-09 14:33:06] [Rank 0] step:1561/10000 train_time:97603ms step_avg:62.53ms +[2025-09-09 14:33:06] [Rank 0] step:1581/10000 train_time:98420ms step_avg:62.25ms +[2025-09-09 14:33:06] [Rank 0] step:1581/10000 train_time:98420ms step_avg:62.25ms +[2025-09-09 14:33:07] [Rank 0] step:1601/10000 train_time:99238ms step_avg:61.98ms +[2025-09-09 14:33:07] [Rank 0] step:1601/10000 train_time:99238ms step_avg:61.98ms +[2025-09-09 14:33:08] [Rank 0] step:1621/10000 train_time:100055ms step_avg:61.72ms +[2025-09-09 14:33:08] [Rank 0] step:1621/10000 train_time:100055ms step_avg:61.72ms +[2025-09-09 14:33:09] [Rank 0] step:1641/10000 train_time:101146ms step_avg:61.64ms +[2025-09-09 14:33:09] [Rank 0] step:1641/10000 train_time:101146ms step_avg:61.64ms +[2025-09-09 14:33:10] [Rank 0] step:1661/10000 train_time:101964ms step_avg:61.39ms +[2025-09-09 14:33:10] [Rank 0] step:1661/10000 train_time:101964ms step_avg:61.39ms +[2025-09-09 14:33:11] [Rank 0] step:1681/10000 train_time:102782ms step_avg:61.14ms +[2025-09-09 14:33:11] [Rank 0] step:1681/10000 train_time:102782ms step_avg:61.14ms +[2025-09-09 14:33:12] [Rank 0] step:1701/10000 train_time:103600ms step_avg:60.91ms +[2025-09-09 14:33:12] [Rank 0] step:1701/10000 train_time:103600ms step_avg:60.91ms +[2025-09-09 14:33:13] [Rank 0] step:1721/10000 train_time:104919ms step_avg:60.96ms +[2025-09-09 14:33:13] [Rank 0] step:1721/10000 train_time:104919ms step_avg:60.96ms +[2025-09-09 14:33:14] [Rank 0] step:1741/10000 train_time:105751ms step_avg:60.74ms +[2025-09-09 14:33:14] [Rank 0] step:1741/10000 train_time:105751ms step_avg:60.74ms +[2025-09-09 14:33:15] [Rank 0] step:1761/10000 train_time:106568ms step_avg:60.52ms +[2025-09-09 14:33:15] [Rank 0] step:1761/10000 train_time:106568ms step_avg:60.52ms +[2025-09-09 14:33:15] [Rank 0] step:1781/10000 train_time:107386ms step_avg:60.30ms +[2025-09-09 14:33:15] [Rank 0] step:1781/10000 train_time:107386ms step_avg:60.30ms +[2025-09-09 14:33:16] [Rank 0] step:1801/10000 train_time:108204ms step_avg:60.08ms +[2025-09-09 14:33:16] [Rank 0] step:1801/10000 train_time:108204ms step_avg:60.08ms +[2025-09-09 14:33:17] [Rank 0] step:1821/10000 train_time:109022ms step_avg:59.87ms +[2025-09-09 14:33:17] [Rank 0] step:1821/10000 train_time:109022ms step_avg:59.87ms +[2025-09-09 14:33:18] [Rank 0] step:1841/10000 train_time:109840ms step_avg:59.66ms +[2025-09-09 14:33:18] [Rank 0] step:1841/10000 train_time:109840ms step_avg:59.66ms +[2025-09-09 14:33:19] [Rank 0] step:1861/10000 train_time:110657ms step_avg:59.46ms +[2025-09-09 14:33:19] [Rank 0] step:1861/10000 train_time:110657ms step_avg:59.46ms +[2025-09-09 14:33:19] [Rank 0] step:1881/10000 train_time:111476ms step_avg:59.26ms +[2025-09-09 14:33:19] [Rank 0] step:1881/10000 train_time:111476ms step_avg:59.26ms +[2025-09-09 14:33:20] [Rank 0] step:1901/10000 train_time:112293ms step_avg:59.07ms +[2025-09-09 14:33:20] [Rank 0] step:1901/10000 train_time:112293ms step_avg:59.07ms +[2025-09-09 14:33:21] [Rank 0] step:1921/10000 train_time:113110ms step_avg:58.88ms +[2025-09-09 14:33:21] [Rank 0] step:1921/10000 train_time:113110ms step_avg:58.88ms +[2025-09-09 14:33:22] [Rank 0] step:1941/10000 train_time:113927ms step_avg:58.69ms +[2025-09-09 14:33:22] [Rank 0] step:1941/10000 train_time:113927ms step_avg:58.69ms +[2025-09-09 14:33:23] [Rank 0] step:1961/10000 train_time:114744ms step_avg:58.51ms +[2025-09-09 14:33:23] [Rank 0] step:1961/10000 train_time:114744ms step_avg:58.51ms +[2025-09-09 14:33:24] [Rank 0] step:1981/10000 train_time:115560ms step_avg:58.33ms +[2025-09-09 14:33:24] [Rank 0] step:1981/10000 train_time:115560ms step_avg:58.33ms +[2025-09-09 14:33:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:33:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:33:25] [Rank 0] PRINT: step:2000/10000 train_loss:0.7386 val_loss:0.7066 train_time:116380ms step_avg:58.19ms +[2025-09-09 14:33:25] [Rank 0] PRINT: step:2000/10000 train_loss:0.7386 val_loss:0.7066 train_time:116380ms step_avg:58.19ms +[2025-09-09 14:33:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:33:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:33:25] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:33:25] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:34:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:34:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:34:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:34:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:34:46] [Rank 0] Total Loss: 4.6922 +[2025-09-09 14:34:46] [Rank 0] Total Loss: 4.6922 +[2025-09-09 14:34:46] [Rank 0] Total FTA (Unweighted): 0.8381 +[2025-09-09 14:34:46] [Rank 0] Total FTA (Unweighted): 0.8381 +[2025-09-09 14:34:46] [Rank 0] Total FTA (Weighted): 0.8381 +[2025-09-09 14:34:46] [Rank 0] Total FTA (Weighted): 0.8381 +[2025-09-09 14:34:46] [Rank 0] Group 0 Loss: 4.4396 +[2025-09-09 14:34:46] [Rank 0] Group 0 Loss: 4.4396 +[2025-09-09 14:34:46] [Rank 0] Group 1 Loss: 4.2763 +[2025-09-09 14:34:46] [Rank 0] Group 1 Loss: 4.2763 +[2025-09-09 14:34:46] [Rank 0] Group 2 Loss: 4.0629 +[2025-09-09 14:34:46] [Rank 0] Group 2 Loss: 4.0629 +[2025-09-09 14:34:46] [Rank 0] Group 3 Loss: 4.5994 +[2025-09-09 14:34:46] [Rank 0] Group 3 Loss: 4.5994 +[2025-09-09 14:34:46] [Rank 0] Group 4 Loss: 4.5881 +[2025-09-09 14:34:46] [Rank 0] Group 4 Loss: 4.5881 +[2025-09-09 14:34:46] [Rank 0] Group 5 Loss: 4.5291 +[2025-09-09 14:34:46] [Rank 0] Group 5 Loss: 4.5291 +[2025-09-09 14:34:46] [Rank 0] Group 6 Loss: 4.5133 +[2025-09-09 14:34:46] [Rank 0] Group 6 Loss: 4.5133 +[2025-09-09 14:34:46] [Rank 0] Group 7 Loss: 4.6298 +[2025-09-09 14:34:46] [Rank 0] Group 7 Loss: 4.6298 +[2025-09-09 14:34:46] [Rank 0] Group 8 Loss: 4.8236 +[2025-09-09 14:34:46] [Rank 0] Group 8 Loss: 4.8236 +[2025-09-09 14:34:46] [Rank 0] Group 9 Loss: 4.7164 +[2025-09-09 14:34:46] [Rank 0] Group 9 Loss: 4.7164 +[2025-09-09 14:34:46] [Rank 0] Group 10 Loss: 4.8139 +[2025-09-09 14:34:46] [Rank 0] Group 10 Loss: 4.8139 +[2025-09-09 14:34:46] [Rank 0] Group 11 Loss: 4.8274 +[2025-09-09 14:34:46] [Rank 0] Group 11 Loss: 4.8274 +[2025-09-09 14:34:46] [Rank 0] Group 12 Loss: 4.8772 +[2025-09-09 14:34:46] [Rank 0] Group 12 Loss: 4.8772 +[2025-09-09 14:34:46] [Rank 0] Group 13 Loss: 5.0950 +[2025-09-09 14:34:46] [Rank 0] Group 13 Loss: 5.0950 +[2025-09-09 14:34:46] [Rank 0] Group 14 Loss: 5.0681 +[2025-09-09 14:34:46] [Rank 0] Group 14 Loss: 5.0681 +[2025-09-09 14:34:46] [Rank 0] Group 15 Loss: 5.2145 +[2025-09-09 14:34:46] [Rank 0] Group 15 Loss: 5.2145 +[2025-09-09 14:34:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:34:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:34:46] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:34:46] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:34:47] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-09 14:34:47] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-09 14:34:47] [Rank 0] Group 11 FTA: 0.9100 +[2025-09-09 14:34:47] [Rank 0] Group 11 FTA: 0.9100 +[2025-09-09 14:34:47] [Rank 0] Group 12 FTA: 0.7900 +[2025-09-09 14:34:47] [Rank 0] Group 12 FTA: 0.7900 +[2025-09-09 14:34:47] [Rank 0] Group 13 FTA: 0.4100 +[2025-09-09 14:34:47] [Rank 0] Group 13 FTA: 0.4100 +[2025-09-09 14:34:47] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-09 14:34:47] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-09 14:34:47] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-09 14:34:47] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-09 14:34:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:34:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:34:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:34:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:34:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:34:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:34:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:34:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:34:48] [Rank 0] step:2001/10000 train_time:116397ms step_avg:58.17ms +[2025-09-09 14:34:48] [Rank 0] step:2001/10000 train_time:116397ms step_avg:58.17ms +[2025-09-09 14:34:49] [Rank 0] step:2021/10000 train_time:117898ms step_avg:58.34ms +[2025-09-09 14:34:49] [Rank 0] step:2021/10000 train_time:117898ms step_avg:58.34ms +[2025-09-09 14:34:50] [Rank 0] step:2041/10000 train_time:118720ms step_avg:58.17ms +[2025-09-09 14:34:50] [Rank 0] step:2041/10000 train_time:118720ms step_avg:58.17ms +[2025-09-09 14:34:51] [Rank 0] step:2061/10000 train_time:119543ms step_avg:58.00ms +[2025-09-09 14:34:51] [Rank 0] step:2061/10000 train_time:119543ms step_avg:58.00ms +[2025-09-09 14:34:52] [Rank 0] step:2081/10000 train_time:120364ms step_avg:57.84ms +[2025-09-09 14:34:52] [Rank 0] step:2081/10000 train_time:120364ms step_avg:57.84ms +[2025-09-09 14:34:53] [Rank 0] step:2101/10000 train_time:121185ms step_avg:57.68ms +[2025-09-09 14:34:53] [Rank 0] step:2101/10000 train_time:121185ms step_avg:57.68ms +[2025-09-09 14:34:54] [Rank 0] step:2121/10000 train_time:122007ms step_avg:57.52ms +[2025-09-09 14:34:54] [Rank 0] step:2121/10000 train_time:122007ms step_avg:57.52ms +[2025-09-09 14:34:54] [Rank 0] step:2141/10000 train_time:122830ms step_avg:57.37ms +[2025-09-09 14:34:54] [Rank 0] step:2141/10000 train_time:122830ms step_avg:57.37ms +[2025-09-09 14:34:55] [Rank 0] step:2161/10000 train_time:123651ms step_avg:57.22ms +[2025-09-09 14:34:55] [Rank 0] step:2161/10000 train_time:123651ms step_avg:57.22ms +[2025-09-09 14:34:56] [Rank 0] step:2181/10000 train_time:124469ms step_avg:57.07ms +[2025-09-09 14:34:56] [Rank 0] step:2181/10000 train_time:124469ms step_avg:57.07ms +[2025-09-09 14:34:57] [Rank 0] step:2201/10000 train_time:125287ms step_avg:56.92ms +[2025-09-09 14:34:57] [Rank 0] step:2201/10000 train_time:125287ms step_avg:56.92ms +[2025-09-09 14:34:58] [Rank 0] step:2221/10000 train_time:126107ms step_avg:56.78ms +[2025-09-09 14:34:58] [Rank 0] step:2221/10000 train_time:126107ms step_avg:56.78ms +[2025-09-09 14:34:59] [Rank 0] step:2241/10000 train_time:126929ms step_avg:56.64ms +[2025-09-09 14:34:59] [Rank 0] step:2241/10000 train_time:126929ms step_avg:56.64ms +[2025-09-09 14:34:59] [Rank 0] step:2261/10000 train_time:127752ms step_avg:56.50ms +[2025-09-09 14:34:59] [Rank 0] step:2261/10000 train_time:127752ms step_avg:56.50ms +[2025-09-09 14:35:00] [Rank 0] step:2281/10000 train_time:128577ms step_avg:56.37ms +[2025-09-09 14:35:00] [Rank 0] step:2281/10000 train_time:128577ms step_avg:56.37ms +[2025-09-09 14:35:01] [Rank 0] step:2301/10000 train_time:129448ms step_avg:56.26ms +[2025-09-09 14:35:01] [Rank 0] step:2301/10000 train_time:129448ms step_avg:56.26ms +[2025-09-09 14:35:02] [Rank 0] step:2321/10000 train_time:130330ms step_avg:56.15ms +[2025-09-09 14:35:02] [Rank 0] step:2321/10000 train_time:130330ms step_avg:56.15ms +[2025-09-09 14:35:03] [Rank 0] step:2341/10000 train_time:131155ms step_avg:56.03ms +[2025-09-09 14:35:03] [Rank 0] step:2341/10000 train_time:131155ms step_avg:56.03ms +[2025-09-09 14:35:04] [Rank 0] step:2361/10000 train_time:131980ms step_avg:55.90ms +[2025-09-09 14:35:04] [Rank 0] step:2361/10000 train_time:131980ms step_avg:55.90ms +[2025-09-09 14:35:04] [Rank 0] step:2381/10000 train_time:132808ms step_avg:55.78ms +[2025-09-09 14:35:04] [Rank 0] step:2381/10000 train_time:132808ms step_avg:55.78ms +[2025-09-09 14:35:05] [Rank 0] step:2401/10000 train_time:133632ms step_avg:55.66ms +[2025-09-09 14:35:05] [Rank 0] step:2401/10000 train_time:133632ms step_avg:55.66ms +[2025-09-09 14:35:06] [Rank 0] step:2421/10000 train_time:134456ms step_avg:55.54ms +[2025-09-09 14:35:06] [Rank 0] step:2421/10000 train_time:134456ms step_avg:55.54ms +[2025-09-09 14:35:07] [Rank 0] step:2441/10000 train_time:135281ms step_avg:55.42ms +[2025-09-09 14:35:07] [Rank 0] step:2441/10000 train_time:135281ms step_avg:55.42ms +[2025-09-09 14:35:08] [Rank 0] step:2461/10000 train_time:136105ms step_avg:55.30ms +[2025-09-09 14:35:08] [Rank 0] step:2461/10000 train_time:136105ms step_avg:55.30ms +[2025-09-09 14:35:09] [Rank 0] step:2481/10000 train_time:136931ms step_avg:55.19ms +[2025-09-09 14:35:09] [Rank 0] step:2481/10000 train_time:136931ms step_avg:55.19ms +[2025-09-09 14:35:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:35:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:35:10] [Rank 0] PRINT: step:2500/10000 train_loss:0.7079 val_loss:0.6819 train_time:137758ms step_avg:55.10ms +[2025-09-09 14:35:10] [Rank 0] PRINT: step:2500/10000 train_loss:0.7079 val_loss:0.6819 train_time:137758ms step_avg:55.10ms +[2025-09-09 14:35:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:35:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:35:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:35:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:36:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:36:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:36:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:36:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:36:31] [Rank 0] Total Loss: 4.8316 +[2025-09-09 14:36:31] [Rank 0] Total Loss: 4.8316 +[2025-09-09 14:36:31] [Rank 0] Total FTA (Unweighted): 0.8688 +[2025-09-09 14:36:31] [Rank 0] Total FTA (Unweighted): 0.8688 +[2025-09-09 14:36:31] [Rank 0] Total FTA (Weighted): 0.8688 +[2025-09-09 14:36:31] [Rank 0] Total FTA (Weighted): 0.8688 +[2025-09-09 14:36:31] [Rank 0] Group 0 Loss: 4.6668 +[2025-09-09 14:36:31] [Rank 0] Group 0 Loss: 4.6668 +[2025-09-09 14:36:31] [Rank 0] Group 1 Loss: 4.5990 +[2025-09-09 14:36:31] [Rank 0] Group 1 Loss: 4.5990 +[2025-09-09 14:36:31] [Rank 0] Group 2 Loss: 4.2274 +[2025-09-09 14:36:31] [Rank 0] Group 2 Loss: 4.2274 +[2025-09-09 14:36:31] [Rank 0] Group 3 Loss: 4.7318 +[2025-09-09 14:36:31] [Rank 0] Group 3 Loss: 4.7318 +[2025-09-09 14:36:31] [Rank 0] Group 4 Loss: 4.6551 +[2025-09-09 14:36:31] [Rank 0] Group 4 Loss: 4.6551 +[2025-09-09 14:36:31] [Rank 0] Group 5 Loss: 4.7195 +[2025-09-09 14:36:31] [Rank 0] Group 5 Loss: 4.7195 +[2025-09-09 14:36:31] [Rank 0] Group 6 Loss: 4.6664 +[2025-09-09 14:36:31] [Rank 0] Group 6 Loss: 4.6664 +[2025-09-09 14:36:31] [Rank 0] Group 7 Loss: 4.7284 +[2025-09-09 14:36:31] [Rank 0] Group 7 Loss: 4.7284 +[2025-09-09 14:36:31] [Rank 0] Group 8 Loss: 4.9252 +[2025-09-09 14:36:31] [Rank 0] Group 8 Loss: 4.9252 +[2025-09-09 14:36:31] [Rank 0] Group 9 Loss: 4.8618 +[2025-09-09 14:36:31] [Rank 0] Group 9 Loss: 4.8618 +[2025-09-09 14:36:31] [Rank 0] Group 10 Loss: 4.9911 +[2025-09-09 14:36:31] [Rank 0] Group 10 Loss: 4.9911 +[2025-09-09 14:36:31] [Rank 0] Group 11 Loss: 4.9879 +[2025-09-09 14:36:31] [Rank 0] Group 11 Loss: 4.9879 +[2025-09-09 14:36:31] [Rank 0] Group 12 Loss: 4.9936 +[2025-09-09 14:36:31] [Rank 0] Group 12 Loss: 4.9936 +[2025-09-09 14:36:31] [Rank 0] Group 13 Loss: 5.1418 +[2025-09-09 14:36:31] [Rank 0] Group 13 Loss: 5.1418 +[2025-09-09 14:36:31] [Rank 0] Group 14 Loss: 5.1457 +[2025-09-09 14:36:31] [Rank 0] Group 14 Loss: 5.1457 +[2025-09-09 14:36:32] [Rank 0] Group 15 Loss: 5.2645 +[2025-09-09 14:36:32] [Rank 0] Group 15 Loss: 5.2645 +[2025-09-09 14:36:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:36:32] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-09 14:36:32] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-09 14:36:32] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-09 14:36:32] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-09 14:36:32] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-09 14:36:32] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-09 14:36:32] [Rank 0] Group 12 FTA: 0.8700 +[2025-09-09 14:36:32] [Rank 0] Group 12 FTA: 0.8700 +[2025-09-09 14:36:32] [Rank 0] Group 13 FTA: 0.6300 +[2025-09-09 14:36:32] [Rank 0] Group 13 FTA: 0.6300 +[2025-09-09 14:36:32] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-09 14:36:32] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-09 14:36:32] [Rank 0] Group 15 FTA: 0.2200 +[2025-09-09 14:36:32] [Rank 0] Group 15 FTA: 0.2200 +[2025-09-09 14:36:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:36:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:36:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:36:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:36:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:36:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:36:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:36:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:36:33] [Rank 0] step:2501/10000 train_time:137774ms step_avg:55.09ms +[2025-09-09 14:36:33] [Rank 0] step:2501/10000 train_time:137774ms step_avg:55.09ms +[2025-09-09 14:36:34] [Rank 0] step:2521/10000 train_time:138604ms step_avg:54.98ms +[2025-09-09 14:36:34] [Rank 0] step:2521/10000 train_time:138604ms step_avg:54.98ms +[2025-09-09 14:36:35] [Rank 0] step:2541/10000 train_time:139435ms step_avg:54.87ms +[2025-09-09 14:36:35] [Rank 0] step:2541/10000 train_time:139435ms step_avg:54.87ms +[2025-09-09 14:36:36] [Rank 0] step:2561/10000 train_time:140261ms step_avg:54.77ms +[2025-09-09 14:36:36] [Rank 0] step:2561/10000 train_time:140261ms step_avg:54.77ms +[2025-09-09 14:36:36] [Rank 0] step:2581/10000 train_time:141088ms step_avg:54.66ms +[2025-09-09 14:36:36] [Rank 0] step:2581/10000 train_time:141088ms step_avg:54.66ms +[2025-09-09 14:36:37] [Rank 0] step:2601/10000 train_time:141917ms step_avg:54.56ms +[2025-09-09 14:36:37] [Rank 0] step:2601/10000 train_time:141917ms step_avg:54.56ms +[2025-09-09 14:36:38] [Rank 0] step:2621/10000 train_time:142745ms step_avg:54.46ms +[2025-09-09 14:36:38] [Rank 0] step:2621/10000 train_time:142745ms step_avg:54.46ms +[2025-09-09 14:36:39] [Rank 0] step:2641/10000 train_time:143582ms step_avg:54.37ms +[2025-09-09 14:36:39] [Rank 0] step:2641/10000 train_time:143582ms step_avg:54.37ms +[2025-09-09 14:36:40] [Rank 0] step:2661/10000 train_time:144409ms step_avg:54.27ms +[2025-09-09 14:36:40] [Rank 0] step:2661/10000 train_time:144409ms step_avg:54.27ms +[2025-09-09 14:36:41] [Rank 0] step:2681/10000 train_time:145237ms step_avg:54.17ms +[2025-09-09 14:36:41] [Rank 0] step:2681/10000 train_time:145237ms step_avg:54.17ms +[2025-09-09 14:36:41] [Rank 0] step:2701/10000 train_time:146062ms step_avg:54.08ms +[2025-09-09 14:36:41] [Rank 0] step:2701/10000 train_time:146062ms step_avg:54.08ms +[2025-09-09 14:36:42] [Rank 0] step:2721/10000 train_time:146887ms step_avg:53.98ms +[2025-09-09 14:36:42] [Rank 0] step:2721/10000 train_time:146887ms step_avg:53.98ms +[2025-09-09 14:36:43] [Rank 0] step:2741/10000 train_time:147713ms step_avg:53.89ms +[2025-09-09 14:36:43] [Rank 0] step:2741/10000 train_time:147713ms step_avg:53.89ms +[2025-09-09 14:36:44] [Rank 0] step:2761/10000 train_time:148537ms step_avg:53.80ms +[2025-09-09 14:36:44] [Rank 0] step:2761/10000 train_time:148537ms step_avg:53.80ms +[2025-09-09 14:36:45] [Rank 0] step:2781/10000 train_time:149361ms step_avg:53.71ms +[2025-09-09 14:36:45] [Rank 0] step:2781/10000 train_time:149361ms step_avg:53.71ms +[2025-09-09 14:36:45] [Rank 0] step:2801/10000 train_time:150186ms step_avg:53.62ms +[2025-09-09 14:36:45] [Rank 0] step:2801/10000 train_time:150186ms step_avg:53.62ms +[2025-09-09 14:36:47] [Rank 0] step:2821/10000 train_time:151699ms step_avg:53.77ms +[2025-09-09 14:36:47] [Rank 0] step:2821/10000 train_time:151699ms step_avg:53.77ms +[2025-09-09 14:36:48] [Rank 0] step:2841/10000 train_time:152524ms step_avg:53.69ms +[2025-09-09 14:36:48] [Rank 0] step:2841/10000 train_time:152524ms step_avg:53.69ms +[2025-09-09 14:36:49] [Rank 0] step:2861/10000 train_time:153348ms step_avg:53.60ms +[2025-09-09 14:36:49] [Rank 0] step:2861/10000 train_time:153348ms step_avg:53.60ms +[2025-09-09 14:36:49] [Rank 0] step:2881/10000 train_time:154173ms step_avg:53.51ms +[2025-09-09 14:36:49] [Rank 0] step:2881/10000 train_time:154173ms step_avg:53.51ms +[2025-09-09 14:36:50] [Rank 0] step:2901/10000 train_time:154998ms step_avg:53.43ms +[2025-09-09 14:36:50] [Rank 0] step:2901/10000 train_time:154998ms step_avg:53.43ms +[2025-09-09 14:36:51] [Rank 0] step:2921/10000 train_time:155824ms step_avg:53.35ms +[2025-09-09 14:36:51] [Rank 0] step:2921/10000 train_time:155824ms step_avg:53.35ms +[2025-09-09 14:36:52] [Rank 0] step:2941/10000 train_time:156650ms step_avg:53.26ms +[2025-09-09 14:36:52] [Rank 0] step:2941/10000 train_time:156650ms step_avg:53.26ms +[2025-09-09 14:36:53] [Rank 0] step:2961/10000 train_time:157476ms step_avg:53.18ms +[2025-09-09 14:36:53] [Rank 0] step:2961/10000 train_time:157476ms step_avg:53.18ms +[2025-09-09 14:36:54] [Rank 0] step:2981/10000 train_time:158303ms step_avg:53.10ms +[2025-09-09 14:36:54] [Rank 0] step:2981/10000 train_time:158303ms step_avg:53.10ms +[2025-09-09 14:36:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:36:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:36:55] [Rank 0] PRINT: step:3000/10000 train_loss:0.6864 val_loss:0.6653 train_time:159135ms step_avg:53.04ms +[2025-09-09 14:36:55] [Rank 0] PRINT: step:3000/10000 train_loss:0.6864 val_loss:0.6653 train_time:159135ms step_avg:53.04ms +[2025-09-09 14:36:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:36:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:36:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:36:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:38:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:38:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:38:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:38:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:38:17] [Rank 0] Total Loss: 4.8099 +[2025-09-09 14:38:17] [Rank 0] Total Loss: 4.8099 +[2025-09-09 14:38:17] [Rank 0] Total FTA (Unweighted): 0.8919 +[2025-09-09 14:38:17] [Rank 0] Total FTA (Unweighted): 0.8919 +[2025-09-09 14:38:17] [Rank 0] Total FTA (Weighted): 0.8919 +[2025-09-09 14:38:17] [Rank 0] Total FTA (Weighted): 0.8919 +[2025-09-09 14:38:17] [Rank 0] Group 0 Loss: 4.6185 +[2025-09-09 14:38:17] [Rank 0] Group 0 Loss: 4.6185 +[2025-09-09 14:38:17] [Rank 0] Group 1 Loss: 4.4310 +[2025-09-09 14:38:17] [Rank 0] Group 1 Loss: 4.4310 +[2025-09-09 14:38:17] [Rank 0] Group 2 Loss: 4.1895 +[2025-09-09 14:38:17] [Rank 0] Group 2 Loss: 4.1895 +[2025-09-09 14:38:17] [Rank 0] Group 3 Loss: 4.7388 +[2025-09-09 14:38:17] [Rank 0] Group 3 Loss: 4.7388 +[2025-09-09 14:38:17] [Rank 0] Group 4 Loss: 4.6401 +[2025-09-09 14:38:17] [Rank 0] Group 4 Loss: 4.6401 +[2025-09-09 14:38:17] [Rank 0] Group 5 Loss: 4.7120 +[2025-09-09 14:38:17] [Rank 0] Group 5 Loss: 4.7120 +[2025-09-09 14:38:17] [Rank 0] Group 6 Loss: 4.6515 +[2025-09-09 14:38:17] [Rank 0] Group 6 Loss: 4.6515 +[2025-09-09 14:38:17] [Rank 0] Group 7 Loss: 4.7776 +[2025-09-09 14:38:17] [Rank 0] Group 7 Loss: 4.7776 +[2025-09-09 14:38:17] [Rank 0] Group 8 Loss: 4.9053 +[2025-09-09 14:38:17] [Rank 0] Group 8 Loss: 4.9053 +[2025-09-09 14:38:17] [Rank 0] Group 9 Loss: 4.8524 +[2025-09-09 14:38:17] [Rank 0] Group 9 Loss: 4.8524 +[2025-09-09 14:38:17] [Rank 0] Group 10 Loss: 4.9790 +[2025-09-09 14:38:17] [Rank 0] Group 10 Loss: 4.9790 +[2025-09-09 14:38:17] [Rank 0] Group 11 Loss: 5.0058 +[2025-09-09 14:38:17] [Rank 0] Group 11 Loss: 5.0058 +[2025-09-09 14:38:17] [Rank 0] Group 12 Loss: 4.9933 +[2025-09-09 14:38:17] [Rank 0] Group 12 Loss: 4.9933 +[2025-09-09 14:38:17] [Rank 0] Group 13 Loss: 5.1486 +[2025-09-09 14:38:17] [Rank 0] Group 13 Loss: 5.1486 +[2025-09-09 14:38:17] [Rank 0] Group 14 Loss: 5.1052 +[2025-09-09 14:38:17] [Rank 0] Group 14 Loss: 5.1052 +[2025-09-09 14:38:17] [Rank 0] Group 15 Loss: 5.2093 +[2025-09-09 14:38:17] [Rank 0] Group 15 Loss: 5.2093 +[2025-09-09 14:38:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:38:17] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-09 14:38:17] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-09 14:38:17] [Rank 0] Group 12 FTA: 0.9100 +[2025-09-09 14:38:17] [Rank 0] Group 12 FTA: 0.9100 +[2025-09-09 14:38:17] [Rank 0] Group 13 FTA: 0.7900 +[2025-09-09 14:38:17] [Rank 0] Group 13 FTA: 0.7900 +[2025-09-09 14:38:17] [Rank 0] Group 14 FTA: 0.3800 +[2025-09-09 14:38:17] [Rank 0] Group 14 FTA: 0.3800 +[2025-09-09 14:38:17] [Rank 0] Group 15 FTA: 0.2200 +[2025-09-09 14:38:17] [Rank 0] Group 15 FTA: 0.2200 +[2025-09-09 14:38:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:38:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:38:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:38:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:38:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:38:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:38:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:38:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:38:18] [Rank 0] step:3001/10000 train_time:159151ms step_avg:53.03ms +[2025-09-09 14:38:18] [Rank 0] step:3001/10000 train_time:159151ms step_avg:53.03ms +[2025-09-09 14:38:19] [Rank 0] step:3021/10000 train_time:159986ms step_avg:52.96ms +[2025-09-09 14:38:19] [Rank 0] step:3021/10000 train_time:159986ms step_avg:52.96ms +[2025-09-09 14:38:20] [Rank 0] step:3041/10000 train_time:160814ms step_avg:52.88ms +[2025-09-09 14:38:20] [Rank 0] step:3041/10000 train_time:160814ms step_avg:52.88ms +[2025-09-09 14:38:21] [Rank 0] step:3061/10000 train_time:161642ms step_avg:52.81ms +[2025-09-09 14:38:21] [Rank 0] step:3061/10000 train_time:161642ms step_avg:52.81ms +[2025-09-09 14:38:22] [Rank 0] step:3081/10000 train_time:162470ms step_avg:52.73ms +[2025-09-09 14:38:22] [Rank 0] step:3081/10000 train_time:162470ms step_avg:52.73ms +[2025-09-09 14:38:22] [Rank 0] step:3101/10000 train_time:163298ms step_avg:52.66ms +[2025-09-09 14:38:22] [Rank 0] step:3101/10000 train_time:163298ms step_avg:52.66ms +[2025-09-09 14:38:23] [Rank 0] step:3121/10000 train_time:164127ms step_avg:52.59ms +[2025-09-09 14:38:23] [Rank 0] step:3121/10000 train_time:164127ms step_avg:52.59ms +[2025-09-09 14:38:24] [Rank 0] step:3141/10000 train_time:164957ms step_avg:52.52ms +[2025-09-09 14:38:24] [Rank 0] step:3141/10000 train_time:164957ms step_avg:52.52ms +[2025-09-09 14:38:25] [Rank 0] step:3161/10000 train_time:165784ms step_avg:52.45ms +[2025-09-09 14:38:25] [Rank 0] step:3161/10000 train_time:165784ms step_avg:52.45ms +[2025-09-09 14:38:26] [Rank 0] step:3181/10000 train_time:166613ms step_avg:52.38ms +[2025-09-09 14:38:26] [Rank 0] step:3181/10000 train_time:166613ms step_avg:52.38ms +[2025-09-09 14:38:27] [Rank 0] step:3201/10000 train_time:167440ms step_avg:52.31ms +[2025-09-09 14:38:27] [Rank 0] step:3201/10000 train_time:167440ms step_avg:52.31ms +[2025-09-09 14:38:27] [Rank 0] step:3221/10000 train_time:168266ms step_avg:52.24ms +[2025-09-09 14:38:27] [Rank 0] step:3221/10000 train_time:168266ms step_avg:52.24ms +[2025-09-09 14:38:29] [Rank 0] step:3241/10000 train_time:169602ms step_avg:52.33ms +[2025-09-09 14:38:29] [Rank 0] step:3241/10000 train_time:169602ms step_avg:52.33ms +[2025-09-09 14:38:30] [Rank 0] step:3261/10000 train_time:170429ms step_avg:52.26ms +[2025-09-09 14:38:30] [Rank 0] step:3261/10000 train_time:170429ms step_avg:52.26ms +[2025-09-09 14:38:30] [Rank 0] step:3281/10000 train_time:171252ms step_avg:52.20ms +[2025-09-09 14:38:30] [Rank 0] step:3281/10000 train_time:171252ms step_avg:52.20ms +[2025-09-09 14:38:31] [Rank 0] step:3301/10000 train_time:172077ms step_avg:52.13ms +[2025-09-09 14:38:31] [Rank 0] step:3301/10000 train_time:172077ms step_avg:52.13ms +[2025-09-09 14:38:32] [Rank 0] step:3321/10000 train_time:172901ms step_avg:52.06ms +[2025-09-09 14:38:32] [Rank 0] step:3321/10000 train_time:172901ms step_avg:52.06ms +[2025-09-09 14:38:33] [Rank 0] step:3341/10000 train_time:173725ms step_avg:52.00ms +[2025-09-09 14:38:33] [Rank 0] step:3341/10000 train_time:173725ms step_avg:52.00ms +[2025-09-09 14:38:34] [Rank 0] step:3361/10000 train_time:174551ms step_avg:51.93ms +[2025-09-09 14:38:34] [Rank 0] step:3361/10000 train_time:174551ms step_avg:51.93ms +[2025-09-09 14:38:35] [Rank 0] step:3381/10000 train_time:175375ms step_avg:51.87ms +[2025-09-09 14:38:35] [Rank 0] step:3381/10000 train_time:175375ms step_avg:51.87ms +[2025-09-09 14:38:35] [Rank 0] step:3401/10000 train_time:176199ms step_avg:51.81ms +[2025-09-09 14:38:35] [Rank 0] step:3401/10000 train_time:176199ms step_avg:51.81ms +[2025-09-09 14:38:36] [Rank 0] step:3421/10000 train_time:177023ms step_avg:51.75ms +[2025-09-09 14:38:36] [Rank 0] step:3421/10000 train_time:177023ms step_avg:51.75ms +[2025-09-09 14:38:37] [Rank 0] step:3441/10000 train_time:177848ms step_avg:51.68ms +[2025-09-09 14:38:37] [Rank 0] step:3441/10000 train_time:177848ms step_avg:51.68ms +[2025-09-09 14:38:38] [Rank 0] step:3461/10000 train_time:178675ms step_avg:51.63ms +[2025-09-09 14:38:38] [Rank 0] step:3461/10000 train_time:178675ms step_avg:51.63ms +[2025-09-09 14:38:39] [Rank 0] step:3481/10000 train_time:179498ms step_avg:51.57ms +[2025-09-09 14:38:39] [Rank 0] step:3481/10000 train_time:179498ms step_avg:51.57ms +[2025-09-09 14:38:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:38:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:38:40] [Rank 0] PRINT: step:3500/10000 train_loss:0.6721 val_loss:0.6534 train_time:180325ms step_avg:51.52ms +[2025-09-09 14:38:40] [Rank 0] PRINT: step:3500/10000 train_loss:0.6721 val_loss:0.6534 train_time:180325ms step_avg:51.52ms +[2025-09-09 14:38:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:38:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:38:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:38:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:40:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:40:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:40:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:40:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:40:01] [Rank 0] Total Loss: 4.9462 +[2025-09-09 14:40:01] [Rank 0] Total Loss: 4.9462 +[2025-09-09 14:40:01] [Rank 0] Total FTA (Unweighted): 0.9131 +[2025-09-09 14:40:01] [Rank 0] Total FTA (Unweighted): 0.9131 +[2025-09-09 14:40:01] [Rank 0] Total FTA (Weighted): 0.9131 +[2025-09-09 14:40:01] [Rank 0] Total FTA (Weighted): 0.9131 +[2025-09-09 14:40:01] [Rank 0] Group 0 Loss: 4.9101 +[2025-09-09 14:40:01] [Rank 0] Group 0 Loss: 4.9101 +[2025-09-09 14:40:01] [Rank 0] Group 1 Loss: 4.4916 +[2025-09-09 14:40:01] [Rank 0] Group 1 Loss: 4.4916 +[2025-09-09 14:40:01] [Rank 0] Group 2 Loss: 4.2891 +[2025-09-09 14:40:01] [Rank 0] Group 2 Loss: 4.2891 +[2025-09-09 14:40:01] [Rank 0] Group 3 Loss: 4.8484 +[2025-09-09 14:40:01] [Rank 0] Group 3 Loss: 4.8484 +[2025-09-09 14:40:01] [Rank 0] Group 4 Loss: 4.7896 +[2025-09-09 14:40:01] [Rank 0] Group 4 Loss: 4.7896 +[2025-09-09 14:40:01] [Rank 0] Group 5 Loss: 4.8562 +[2025-09-09 14:40:01] [Rank 0] Group 5 Loss: 4.8562 +[2025-09-09 14:40:01] [Rank 0] Group 6 Loss: 4.7798 +[2025-09-09 14:40:01] [Rank 0] Group 6 Loss: 4.7798 +[2025-09-09 14:40:01] [Rank 0] Group 7 Loss: 4.9514 +[2025-09-09 14:40:01] [Rank 0] Group 7 Loss: 4.9514 +[2025-09-09 14:40:01] [Rank 0] Group 8 Loss: 5.0156 +[2025-09-09 14:40:01] [Rank 0] Group 8 Loss: 5.0156 +[2025-09-09 14:40:01] [Rank 0] Group 9 Loss: 4.9910 +[2025-09-09 14:40:01] [Rank 0] Group 9 Loss: 4.9910 +[2025-09-09 14:40:01] [Rank 0] Group 10 Loss: 5.1498 +[2025-09-09 14:40:01] [Rank 0] Group 10 Loss: 5.1498 +[2025-09-09 14:40:01] [Rank 0] Group 11 Loss: 5.1263 +[2025-09-09 14:40:01] [Rank 0] Group 11 Loss: 5.1263 +[2025-09-09 14:40:01] [Rank 0] Group 12 Loss: 5.1197 +[2025-09-09 14:40:01] [Rank 0] Group 12 Loss: 5.1197 +[2025-09-09 14:40:01] [Rank 0] Group 13 Loss: 5.2460 +[2025-09-09 14:40:01] [Rank 0] Group 13 Loss: 5.2460 +[2025-09-09 14:40:01] [Rank 0] Group 14 Loss: 5.2400 +[2025-09-09 14:40:01] [Rank 0] Group 14 Loss: 5.2400 +[2025-09-09 14:40:01] [Rank 0] Group 15 Loss: 5.3346 +[2025-09-09 14:40:01] [Rank 0] Group 15 Loss: 5.3346 +[2025-09-09 14:40:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:40:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:40:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:40:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:40:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:40:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:40:01] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:40:01] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:40:01] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:40:01] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:40:02] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:40:02] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:40:02] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:40:02] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:40:02] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:40:02] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:40:02] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:40:02] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:40:02] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:40:02] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:40:02] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 14:40:02] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 14:40:02] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 14:40:02] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 14:40:02] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 14:40:02] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 14:40:02] [Rank 0] Group 13 FTA: 0.8900 +[2025-09-09 14:40:02] [Rank 0] Group 13 FTA: 0.8900 +[2025-09-09 14:40:02] [Rank 0] Group 14 FTA: 0.4800 +[2025-09-09 14:40:02] [Rank 0] Group 14 FTA: 0.4800 +[2025-09-09 14:40:02] [Rank 0] Group 15 FTA: 0.2800 +[2025-09-09 14:40:02] [Rank 0] Group 15 FTA: 0.2800 +[2025-09-09 14:40:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:40:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:40:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:40:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:40:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:40:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:40:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:40:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:40:03] [Rank 0] step:3501/10000 train_time:180342ms step_avg:51.51ms +[2025-09-09 14:40:03] [Rank 0] step:3501/10000 train_time:180342ms step_avg:51.51ms +[2025-09-09 14:40:04] [Rank 0] step:3521/10000 train_time:181187ms step_avg:51.46ms +[2025-09-09 14:40:04] [Rank 0] step:3521/10000 train_time:181187ms step_avg:51.46ms +[2025-09-09 14:40:05] [Rank 0] step:3541/10000 train_time:182017ms step_avg:51.40ms +[2025-09-09 14:40:05] [Rank 0] step:3541/10000 train_time:182017ms step_avg:51.40ms +[2025-09-09 14:40:05] [Rank 0] step:3561/10000 train_time:182844ms step_avg:51.35ms +[2025-09-09 14:40:05] [Rank 0] step:3561/10000 train_time:182844ms step_avg:51.35ms +[2025-09-09 14:40:06] [Rank 0] step:3581/10000 train_time:183672ms step_avg:51.29ms +[2025-09-09 14:40:06] [Rank 0] step:3581/10000 train_time:183672ms step_avg:51.29ms +[2025-09-09 14:40:07] [Rank 0] step:3601/10000 train_time:184501ms step_avg:51.24ms +[2025-09-09 14:40:07] [Rank 0] step:3601/10000 train_time:184501ms step_avg:51.24ms +[2025-09-09 14:40:08] [Rank 0] step:3621/10000 train_time:185329ms step_avg:51.18ms +[2025-09-09 14:40:08] [Rank 0] step:3621/10000 train_time:185329ms step_avg:51.18ms +[2025-09-09 14:40:09] [Rank 0] step:3641/10000 train_time:186843ms step_avg:51.32ms +[2025-09-09 14:40:09] [Rank 0] step:3641/10000 train_time:186843ms step_avg:51.32ms +[2025-09-09 14:40:10] [Rank 0] step:3661/10000 train_time:187671ms step_avg:51.26ms +[2025-09-09 14:40:10] [Rank 0] step:3661/10000 train_time:187671ms step_avg:51.26ms +[2025-09-09 14:40:11] [Rank 0] step:3681/10000 train_time:188499ms step_avg:51.21ms +[2025-09-09 14:40:11] [Rank 0] step:3681/10000 train_time:188499ms step_avg:51.21ms +[2025-09-09 14:40:12] [Rank 0] step:3701/10000 train_time:189327ms step_avg:51.16ms +[2025-09-09 14:40:12] [Rank 0] step:3701/10000 train_time:189327ms step_avg:51.16ms +[2025-09-09 14:40:13] [Rank 0] step:3721/10000 train_time:190157ms step_avg:51.10ms +[2025-09-09 14:40:13] [Rank 0] step:3721/10000 train_time:190157ms step_avg:51.10ms +[2025-09-09 14:40:14] [Rank 0] step:3741/10000 train_time:190985ms step_avg:51.05ms +[2025-09-09 14:40:14] [Rank 0] step:3741/10000 train_time:190985ms step_avg:51.05ms +[2025-09-09 14:40:14] [Rank 0] step:3761/10000 train_time:191813ms step_avg:51.00ms +[2025-09-09 14:40:14] [Rank 0] step:3761/10000 train_time:191813ms step_avg:51.00ms +[2025-09-09 14:40:15] [Rank 0] step:3781/10000 train_time:192639ms step_avg:50.95ms +[2025-09-09 14:40:15] [Rank 0] step:3781/10000 train_time:192639ms step_avg:50.95ms +[2025-09-09 14:40:16] [Rank 0] step:3801/10000 train_time:193465ms step_avg:50.90ms +[2025-09-09 14:40:16] [Rank 0] step:3801/10000 train_time:193465ms step_avg:50.90ms +[2025-09-09 14:40:17] [Rank 0] step:3821/10000 train_time:194290ms step_avg:50.85ms +[2025-09-09 14:40:17] [Rank 0] step:3821/10000 train_time:194290ms step_avg:50.85ms +[2025-09-09 14:40:18] [Rank 0] step:3841/10000 train_time:195115ms step_avg:50.80ms +[2025-09-09 14:40:18] [Rank 0] step:3841/10000 train_time:195115ms step_avg:50.80ms +[2025-09-09 14:40:19] [Rank 0] step:3861/10000 train_time:195941ms step_avg:50.75ms +[2025-09-09 14:40:19] [Rank 0] step:3861/10000 train_time:195941ms step_avg:50.75ms +[2025-09-09 14:40:19] [Rank 0] step:3881/10000 train_time:196768ms step_avg:50.70ms +[2025-09-09 14:40:19] [Rank 0] step:3881/10000 train_time:196768ms step_avg:50.70ms +[2025-09-09 14:40:20] [Rank 0] step:3901/10000 train_time:197592ms step_avg:50.65ms +[2025-09-09 14:40:20] [Rank 0] step:3901/10000 train_time:197592ms step_avg:50.65ms +[2025-09-09 14:40:21] [Rank 0] step:3921/10000 train_time:198417ms step_avg:50.60ms +[2025-09-09 14:40:21] [Rank 0] step:3921/10000 train_time:198417ms step_avg:50.60ms +[2025-09-09 14:40:22] [Rank 0] step:3941/10000 train_time:199242ms step_avg:50.56ms +[2025-09-09 14:40:22] [Rank 0] step:3941/10000 train_time:199242ms step_avg:50.56ms +[2025-09-09 14:40:23] [Rank 0] step:3961/10000 train_time:200066ms step_avg:50.51ms +[2025-09-09 14:40:23] [Rank 0] step:3961/10000 train_time:200066ms step_avg:50.51ms +[2025-09-09 14:40:24] [Rank 0] step:3981/10000 train_time:200891ms step_avg:50.46ms +[2025-09-09 14:40:24] [Rank 0] step:3981/10000 train_time:200891ms step_avg:50.46ms +[2025-09-09 14:40:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:40:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:40:25] [Rank 0] PRINT: step:4000/10000 train_loss:0.6616 val_loss:0.6438 train_time:201719ms step_avg:50.43ms +[2025-09-09 14:40:25] [Rank 0] PRINT: step:4000/10000 train_loss:0.6616 val_loss:0.6438 train_time:201719ms step_avg:50.43ms +[2025-09-09 14:40:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:40:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:40:25] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:40:25] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:41:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:41:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:41:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:41:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:41:46] [Rank 0] Total Loss: 4.9763 +[2025-09-09 14:41:46] [Rank 0] Total Loss: 4.9763 +[2025-09-09 14:41:46] [Rank 0] Total FTA (Unweighted): 0.9219 +[2025-09-09 14:41:46] [Rank 0] Total FTA (Unweighted): 0.9219 +[2025-09-09 14:41:46] [Rank 0] Total FTA (Weighted): 0.9219 +[2025-09-09 14:41:46] [Rank 0] Total FTA (Weighted): 0.9219 +[2025-09-09 14:41:46] [Rank 0] Group 0 Loss: 4.9194 +[2025-09-09 14:41:46] [Rank 0] Group 0 Loss: 4.9194 +[2025-09-09 14:41:46] [Rank 0] Group 1 Loss: 4.4839 +[2025-09-09 14:41:46] [Rank 0] Group 1 Loss: 4.4839 +[2025-09-09 14:41:46] [Rank 0] Group 2 Loss: 4.2470 +[2025-09-09 14:41:46] [Rank 0] Group 2 Loss: 4.2470 +[2025-09-09 14:41:46] [Rank 0] Group 3 Loss: 4.8935 +[2025-09-09 14:41:46] [Rank 0] Group 3 Loss: 4.8935 +[2025-09-09 14:41:46] [Rank 0] Group 4 Loss: 4.8621 +[2025-09-09 14:41:46] [Rank 0] Group 4 Loss: 4.8621 +[2025-09-09 14:41:46] [Rank 0] Group 5 Loss: 4.8827 +[2025-09-09 14:41:46] [Rank 0] Group 5 Loss: 4.8827 +[2025-09-09 14:41:46] [Rank 0] Group 6 Loss: 4.8393 +[2025-09-09 14:41:46] [Rank 0] Group 6 Loss: 4.8393 +[2025-09-09 14:41:46] [Rank 0] Group 7 Loss: 4.9670 +[2025-09-09 14:41:46] [Rank 0] Group 7 Loss: 4.9670 +[2025-09-09 14:41:47] [Rank 0] Group 8 Loss: 5.0770 +[2025-09-09 14:41:47] [Rank 0] Group 8 Loss: 5.0770 +[2025-09-09 14:41:47] [Rank 0] Group 9 Loss: 5.0222 +[2025-09-09 14:41:47] [Rank 0] Group 9 Loss: 5.0222 +[2025-09-09 14:41:47] [Rank 0] Group 10 Loss: 5.1597 +[2025-09-09 14:41:47] [Rank 0] Group 10 Loss: 5.1597 +[2025-09-09 14:41:47] [Rank 0] Group 11 Loss: 5.1857 +[2025-09-09 14:41:47] [Rank 0] Group 11 Loss: 5.1857 +[2025-09-09 14:41:47] [Rank 0] Group 12 Loss: 5.1696 +[2025-09-09 14:41:47] [Rank 0] Group 12 Loss: 5.1696 +[2025-09-09 14:41:47] [Rank 0] Group 13 Loss: 5.2741 +[2025-09-09 14:41:47] [Rank 0] Group 13 Loss: 5.2741 +[2025-09-09 14:41:47] [Rank 0] Group 14 Loss: 5.2698 +[2025-09-09 14:41:47] [Rank 0] Group 14 Loss: 5.2698 +[2025-09-09 14:41:47] [Rank 0] Group 15 Loss: 5.3672 +[2025-09-09 14:41:47] [Rank 0] Group 15 Loss: 5.3672 +[2025-09-09 14:41:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:41:47] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-09 14:41:47] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-09 14:41:47] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:41:47] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:41:47] [Rank 0] Group 13 FTA: 0.9400 +[2025-09-09 14:41:47] [Rank 0] Group 13 FTA: 0.9400 +[2025-09-09 14:41:47] [Rank 0] Group 14 FTA: 0.5200 +[2025-09-09 14:41:47] [Rank 0] Group 14 FTA: 0.5200 +[2025-09-09 14:41:47] [Rank 0] Group 15 FTA: 0.3300 +[2025-09-09 14:41:47] [Rank 0] Group 15 FTA: 0.3300 +[2025-09-09 14:41:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:41:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:41:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:41:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:41:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:41:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:41:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:41:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:41:48] [Rank 0] step:4001/10000 train_time:201735ms step_avg:50.42ms +[2025-09-09 14:41:48] [Rank 0] step:4001/10000 train_time:201735ms step_avg:50.42ms +[2025-09-09 14:41:49] [Rank 0] step:4021/10000 train_time:202830ms step_avg:50.44ms +[2025-09-09 14:41:49] [Rank 0] step:4021/10000 train_time:202830ms step_avg:50.44ms +[2025-09-09 14:41:50] [Rank 0] step:4041/10000 train_time:203653ms step_avg:50.40ms +[2025-09-09 14:41:50] [Rank 0] step:4041/10000 train_time:203653ms step_avg:50.40ms +[2025-09-09 14:41:51] [Rank 0] step:4061/10000 train_time:204481ms step_avg:50.35ms +[2025-09-09 14:41:51] [Rank 0] step:4061/10000 train_time:204481ms step_avg:50.35ms +[2025-09-09 14:41:52] [Rank 0] step:4081/10000 train_time:205307ms step_avg:50.31ms +[2025-09-09 14:41:52] [Rank 0] step:4081/10000 train_time:205307ms step_avg:50.31ms +[2025-09-09 14:41:52] [Rank 0] step:4101/10000 train_time:206135ms step_avg:50.26ms +[2025-09-09 14:41:52] [Rank 0] step:4101/10000 train_time:206135ms step_avg:50.26ms +[2025-09-09 14:41:53] [Rank 0] step:4121/10000 train_time:206961ms step_avg:50.22ms +[2025-09-09 14:41:53] [Rank 0] step:4121/10000 train_time:206961ms step_avg:50.22ms +[2025-09-09 14:41:54] [Rank 0] step:4141/10000 train_time:207789ms step_avg:50.18ms +[2025-09-09 14:41:54] [Rank 0] step:4141/10000 train_time:207789ms step_avg:50.18ms +[2025-09-09 14:41:55] [Rank 0] step:4161/10000 train_time:208616ms step_avg:50.14ms +[2025-09-09 14:41:55] [Rank 0] step:4161/10000 train_time:208616ms step_avg:50.14ms +[2025-09-09 14:41:56] [Rank 0] step:4181/10000 train_time:209443ms step_avg:50.09ms +[2025-09-09 14:41:56] [Rank 0] step:4181/10000 train_time:209443ms step_avg:50.09ms +[2025-09-09 14:41:57] [Rank 0] step:4201/10000 train_time:210272ms step_avg:50.05ms +[2025-09-09 14:41:57] [Rank 0] step:4201/10000 train_time:210272ms step_avg:50.05ms +[2025-09-09 14:41:57] [Rank 0] step:4221/10000 train_time:211100ms step_avg:50.01ms +[2025-09-09 14:41:57] [Rank 0] step:4221/10000 train_time:211100ms step_avg:50.01ms +[2025-09-09 14:41:58] [Rank 0] step:4241/10000 train_time:211927ms step_avg:49.97ms +[2025-09-09 14:41:58] [Rank 0] step:4241/10000 train_time:211927ms step_avg:49.97ms +[2025-09-09 14:41:59] [Rank 0] step:4261/10000 train_time:212755ms step_avg:49.93ms +[2025-09-09 14:41:59] [Rank 0] step:4261/10000 train_time:212755ms step_avg:49.93ms +[2025-09-09 14:42:00] [Rank 0] step:4281/10000 train_time:213583ms step_avg:49.89ms +[2025-09-09 14:42:00] [Rank 0] step:4281/10000 train_time:213583ms step_avg:49.89ms +[2025-09-09 14:42:01] [Rank 0] step:4301/10000 train_time:214410ms step_avg:49.85ms +[2025-09-09 14:42:01] [Rank 0] step:4301/10000 train_time:214410ms step_avg:49.85ms +[2025-09-09 14:42:02] [Rank 0] step:4321/10000 train_time:215242ms step_avg:49.81ms +[2025-09-09 14:42:02] [Rank 0] step:4321/10000 train_time:215242ms step_avg:49.81ms +[2025-09-09 14:42:02] [Rank 0] step:4341/10000 train_time:216072ms step_avg:49.77ms +[2025-09-09 14:42:02] [Rank 0] step:4341/10000 train_time:216072ms step_avg:49.77ms +[2025-09-09 14:42:03] [Rank 0] step:4361/10000 train_time:216898ms step_avg:49.74ms +[2025-09-09 14:42:03] [Rank 0] step:4361/10000 train_time:216898ms step_avg:49.74ms +[2025-09-09 14:42:04] [Rank 0] step:4381/10000 train_time:217725ms step_avg:49.70ms +[2025-09-09 14:42:04] [Rank 0] step:4381/10000 train_time:217725ms step_avg:49.70ms +[2025-09-09 14:42:05] [Rank 0] step:4401/10000 train_time:218550ms step_avg:49.66ms +[2025-09-09 14:42:05] [Rank 0] step:4401/10000 train_time:218550ms step_avg:49.66ms +[2025-09-09 14:42:06] [Rank 0] step:4421/10000 train_time:219374ms step_avg:49.62ms +[2025-09-09 14:42:06] [Rank 0] step:4421/10000 train_time:219374ms step_avg:49.62ms +[2025-09-09 14:42:07] [Rank 0] step:4441/10000 train_time:220197ms step_avg:49.58ms +[2025-09-09 14:42:07] [Rank 0] step:4441/10000 train_time:220197ms step_avg:49.58ms +[2025-09-09 14:42:07] [Rank 0] step:4461/10000 train_time:221022ms step_avg:49.55ms +[2025-09-09 14:42:07] [Rank 0] step:4461/10000 train_time:221022ms step_avg:49.55ms +[2025-09-09 14:42:08] [Rank 0] step:4481/10000 train_time:221847ms step_avg:49.51ms +[2025-09-09 14:42:08] [Rank 0] step:4481/10000 train_time:221847ms step_avg:49.51ms +[2025-09-09 14:42:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:42:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:42:09] [Rank 0] PRINT: step:4500/10000 train_loss:0.6525 val_loss:0.6354 train_time:222674ms step_avg:49.48ms +[2025-09-09 14:42:09] [Rank 0] PRINT: step:4500/10000 train_loss:0.6525 val_loss:0.6354 train_time:222674ms step_avg:49.48ms +[2025-09-09 14:42:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:42:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:42:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:42:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:43:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:43:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:43:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:43:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:43:31] [Rank 0] Total Loss: 4.9490 +[2025-09-09 14:43:31] [Rank 0] Total Loss: 4.9490 +[2025-09-09 14:43:31] [Rank 0] Total FTA (Unweighted): 0.9412 +[2025-09-09 14:43:31] [Rank 0] Total FTA (Unweighted): 0.9412 +[2025-09-09 14:43:31] [Rank 0] Total FTA (Weighted): 0.9413 +[2025-09-09 14:43:31] [Rank 0] Total FTA (Weighted): 0.9413 +[2025-09-09 14:43:31] [Rank 0] Group 0 Loss: 4.9241 +[2025-09-09 14:43:31] [Rank 0] Group 0 Loss: 4.9241 +[2025-09-09 14:43:31] [Rank 0] Group 1 Loss: 4.5145 +[2025-09-09 14:43:31] [Rank 0] Group 1 Loss: 4.5145 +[2025-09-09 14:43:31] [Rank 0] Group 2 Loss: 4.2805 +[2025-09-09 14:43:31] [Rank 0] Group 2 Loss: 4.2805 +[2025-09-09 14:43:31] [Rank 0] Group 3 Loss: 4.8629 +[2025-09-09 14:43:31] [Rank 0] Group 3 Loss: 4.8629 +[2025-09-09 14:43:31] [Rank 0] Group 4 Loss: 4.8202 +[2025-09-09 14:43:31] [Rank 0] Group 4 Loss: 4.8202 +[2025-09-09 14:43:31] [Rank 0] Group 5 Loss: 4.8636 +[2025-09-09 14:43:31] [Rank 0] Group 5 Loss: 4.8636 +[2025-09-09 14:43:31] [Rank 0] Group 6 Loss: 4.7907 +[2025-09-09 14:43:31] [Rank 0] Group 6 Loss: 4.7907 +[2025-09-09 14:43:31] [Rank 0] Group 7 Loss: 4.9166 +[2025-09-09 14:43:31] [Rank 0] Group 7 Loss: 4.9166 +[2025-09-09 14:43:31] [Rank 0] Group 8 Loss: 5.0741 +[2025-09-09 14:43:31] [Rank 0] Group 8 Loss: 5.0741 +[2025-09-09 14:43:31] [Rank 0] Group 9 Loss: 5.0041 +[2025-09-09 14:43:31] [Rank 0] Group 9 Loss: 5.0041 +[2025-09-09 14:43:31] [Rank 0] Group 10 Loss: 5.1380 +[2025-09-09 14:43:31] [Rank 0] Group 10 Loss: 5.1380 +[2025-09-09 14:43:31] [Rank 0] Group 11 Loss: 5.1673 +[2025-09-09 14:43:31] [Rank 0] Group 11 Loss: 5.1673 +[2025-09-09 14:43:31] [Rank 0] Group 12 Loss: 5.1032 +[2025-09-09 14:43:31] [Rank 0] Group 12 Loss: 5.1032 +[2025-09-09 14:43:31] [Rank 0] Group 13 Loss: 5.2422 +[2025-09-09 14:43:31] [Rank 0] Group 13 Loss: 5.2422 +[2025-09-09 14:43:31] [Rank 0] Group 14 Loss: 5.2070 +[2025-09-09 14:43:31] [Rank 0] Group 14 Loss: 5.2070 +[2025-09-09 14:43:31] [Rank 0] Group 15 Loss: 5.2745 +[2025-09-09 14:43:31] [Rank 0] Group 15 Loss: 5.2745 +[2025-09-09 14:43:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:43:31] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 14:43:31] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 14:43:31] [Rank 0] Group 13 FTA: 0.9500 +[2025-09-09 14:43:31] [Rank 0] Group 13 FTA: 0.9500 +[2025-09-09 14:43:31] [Rank 0] Group 14 FTA: 0.6800 +[2025-09-09 14:43:31] [Rank 0] Group 14 FTA: 0.6800 +[2025-09-09 14:43:31] [Rank 0] Group 15 FTA: 0.4500 +[2025-09-09 14:43:31] [Rank 0] Group 15 FTA: 0.4500 +[2025-09-09 14:43:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:43:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:43:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:43:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:43:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:43:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:43:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:43:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:43:32] [Rank 0] step:4501/10000 train_time:222690ms step_avg:49.48ms +[2025-09-09 14:43:32] [Rank 0] step:4501/10000 train_time:222690ms step_avg:49.48ms +[2025-09-09 14:43:33] [Rank 0] step:4521/10000 train_time:223536ms step_avg:49.44ms +[2025-09-09 14:43:33] [Rank 0] step:4521/10000 train_time:223536ms step_avg:49.44ms +[2025-09-09 14:43:34] [Rank 0] step:4541/10000 train_time:224360ms step_avg:49.41ms +[2025-09-09 14:43:34] [Rank 0] step:4541/10000 train_time:224360ms step_avg:49.41ms +[2025-09-09 14:43:35] [Rank 0] step:4561/10000 train_time:225185ms step_avg:49.37ms +[2025-09-09 14:43:35] [Rank 0] step:4561/10000 train_time:225185ms step_avg:49.37ms +[2025-09-09 14:43:36] [Rank 0] step:4581/10000 train_time:226010ms step_avg:49.34ms +[2025-09-09 14:43:36] [Rank 0] step:4581/10000 train_time:226010ms step_avg:49.34ms +[2025-09-09 14:43:36] [Rank 0] step:4601/10000 train_time:226833ms step_avg:49.30ms +[2025-09-09 14:43:36] [Rank 0] step:4601/10000 train_time:226833ms step_avg:49.30ms +[2025-09-09 14:43:37] [Rank 0] step:4621/10000 train_time:227658ms step_avg:49.27ms +[2025-09-09 14:43:37] [Rank 0] step:4621/10000 train_time:227658ms step_avg:49.27ms +[2025-09-09 14:43:38] [Rank 0] step:4641/10000 train_time:228483ms step_avg:49.23ms +[2025-09-09 14:43:38] [Rank 0] step:4641/10000 train_time:228483ms step_avg:49.23ms +[2025-09-09 14:43:39] [Rank 0] step:4661/10000 train_time:229308ms step_avg:49.20ms +[2025-09-09 14:43:39] [Rank 0] step:4661/10000 train_time:229308ms step_avg:49.20ms +[2025-09-09 14:43:40] [Rank 0] step:4681/10000 train_time:230134ms step_avg:49.16ms +[2025-09-09 14:43:40] [Rank 0] step:4681/10000 train_time:230134ms step_avg:49.16ms +[2025-09-09 14:43:41] [Rank 0] step:4701/10000 train_time:230958ms step_avg:49.13ms +[2025-09-09 14:43:41] [Rank 0] step:4701/10000 train_time:230958ms step_avg:49.13ms +[2025-09-09 14:43:41] [Rank 0] step:4721/10000 train_time:231784ms step_avg:49.10ms +[2025-09-09 14:43:41] [Rank 0] step:4721/10000 train_time:231784ms step_avg:49.10ms +[2025-09-09 14:43:42] [Rank 0] step:4741/10000 train_time:232608ms step_avg:49.06ms +[2025-09-09 14:43:42] [Rank 0] step:4741/10000 train_time:232608ms step_avg:49.06ms +[2025-09-09 14:43:43] [Rank 0] step:4761/10000 train_time:233433ms step_avg:49.03ms +[2025-09-09 14:43:43] [Rank 0] step:4761/10000 train_time:233433ms step_avg:49.03ms +[2025-09-09 14:43:44] [Rank 0] step:4781/10000 train_time:234413ms step_avg:49.03ms +[2025-09-09 14:43:44] [Rank 0] step:4781/10000 train_time:234413ms step_avg:49.03ms +[2025-09-09 14:43:45] [Rank 0] step:4801/10000 train_time:235539ms step_avg:49.06ms +[2025-09-09 14:43:45] [Rank 0] step:4801/10000 train_time:235539ms step_avg:49.06ms +[2025-09-09 14:43:46] [Rank 0] step:4821/10000 train_time:236364ms step_avg:49.03ms +[2025-09-09 14:43:46] [Rank 0] step:4821/10000 train_time:236364ms step_avg:49.03ms +[2025-09-09 14:43:47] [Rank 0] step:4841/10000 train_time:237503ms step_avg:49.06ms +[2025-09-09 14:43:47] [Rank 0] step:4841/10000 train_time:237503ms step_avg:49.06ms +[2025-09-09 14:43:48] [Rank 0] step:4861/10000 train_time:238327ms step_avg:49.03ms +[2025-09-09 14:43:48] [Rank 0] step:4861/10000 train_time:238327ms step_avg:49.03ms +[2025-09-09 14:43:49] [Rank 0] step:4881/10000 train_time:239151ms step_avg:49.00ms +[2025-09-09 14:43:49] [Rank 0] step:4881/10000 train_time:239151ms step_avg:49.00ms +[2025-09-09 14:43:50] [Rank 0] step:4901/10000 train_time:239975ms step_avg:48.96ms +[2025-09-09 14:43:50] [Rank 0] step:4901/10000 train_time:239975ms step_avg:48.96ms +[2025-09-09 14:43:50] [Rank 0] step:4921/10000 train_time:240798ms step_avg:48.93ms +[2025-09-09 14:43:50] [Rank 0] step:4921/10000 train_time:240798ms step_avg:48.93ms +[2025-09-09 14:43:51] [Rank 0] step:4941/10000 train_time:241634ms step_avg:48.90ms +[2025-09-09 14:43:51] [Rank 0] step:4941/10000 train_time:241634ms step_avg:48.90ms +[2025-09-09 14:43:52] [Rank 0] step:4961/10000 train_time:242462ms step_avg:48.87ms +[2025-09-09 14:43:52] [Rank 0] step:4961/10000 train_time:242462ms step_avg:48.87ms +[2025-09-09 14:43:53] [Rank 0] step:4981/10000 train_time:243286ms step_avg:48.84ms +[2025-09-09 14:43:53] [Rank 0] step:4981/10000 train_time:243286ms step_avg:48.84ms +[2025-09-09 14:43:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:43:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:43:54] [Rank 0] PRINT: step:5000/10000 train_loss:0.6440 val_loss:0.6287 train_time:244113ms step_avg:48.82ms +[2025-09-09 14:43:54] [Rank 0] PRINT: step:5000/10000 train_loss:0.6440 val_loss:0.6287 train_time:244113ms step_avg:48.82ms +[2025-09-09 14:43:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:43:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:43:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:43:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:45:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:45:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:45:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:45:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:45:16] [Rank 0] Total Loss: 4.9354 +[2025-09-09 14:45:16] [Rank 0] Total Loss: 4.9354 +[2025-09-09 14:45:16] [Rank 0] Total FTA (Unweighted): 0.9525 +[2025-09-09 14:45:16] [Rank 0] Total FTA (Unweighted): 0.9525 +[2025-09-09 14:45:16] [Rank 0] Total FTA (Weighted): 0.9525 +[2025-09-09 14:45:16] [Rank 0] Total FTA (Weighted): 0.9525 +[2025-09-09 14:45:16] [Rank 0] Group 0 Loss: 4.8807 +[2025-09-09 14:45:16] [Rank 0] Group 0 Loss: 4.8807 +[2025-09-09 14:45:16] [Rank 0] Group 1 Loss: 4.5089 +[2025-09-09 14:45:16] [Rank 0] Group 1 Loss: 4.5089 +[2025-09-09 14:45:16] [Rank 0] Group 2 Loss: 4.3572 +[2025-09-09 14:45:16] [Rank 0] Group 2 Loss: 4.3572 +[2025-09-09 14:45:16] [Rank 0] Group 3 Loss: 4.8222 +[2025-09-09 14:45:16] [Rank 0] Group 3 Loss: 4.8222 +[2025-09-09 14:45:16] [Rank 0] Group 4 Loss: 4.8142 +[2025-09-09 14:45:16] [Rank 0] Group 4 Loss: 4.8142 +[2025-09-09 14:45:16] [Rank 0] Group 5 Loss: 4.8509 +[2025-09-09 14:45:16] [Rank 0] Group 5 Loss: 4.8509 +[2025-09-09 14:45:16] [Rank 0] Group 6 Loss: 4.7753 +[2025-09-09 14:45:16] [Rank 0] Group 6 Loss: 4.7753 +[2025-09-09 14:45:16] [Rank 0] Group 7 Loss: 4.8950 +[2025-09-09 14:45:16] [Rank 0] Group 7 Loss: 4.8950 +[2025-09-09 14:45:16] [Rank 0] Group 8 Loss: 5.0317 +[2025-09-09 14:45:16] [Rank 0] Group 8 Loss: 5.0317 +[2025-09-09 14:45:16] [Rank 0] Group 9 Loss: 5.0071 +[2025-09-09 14:45:16] [Rank 0] Group 9 Loss: 5.0071 +[2025-09-09 14:45:16] [Rank 0] Group 10 Loss: 5.1160 +[2025-09-09 14:45:16] [Rank 0] Group 10 Loss: 5.1160 +[2025-09-09 14:45:16] [Rank 0] Group 11 Loss: 5.1687 +[2025-09-09 14:45:16] [Rank 0] Group 11 Loss: 5.1687 +[2025-09-09 14:45:16] [Rank 0] Group 12 Loss: 5.1028 +[2025-09-09 14:45:16] [Rank 0] Group 12 Loss: 5.1028 +[2025-09-09 14:45:16] [Rank 0] Group 13 Loss: 5.2590 +[2025-09-09 14:45:16] [Rank 0] Group 13 Loss: 5.2590 +[2025-09-09 14:45:16] [Rank 0] Group 14 Loss: 5.1887 +[2025-09-09 14:45:16] [Rank 0] Group 14 Loss: 5.1887 +[2025-09-09 14:45:16] [Rank 0] Group 15 Loss: 5.1881 +[2025-09-09 14:45:16] [Rank 0] Group 15 Loss: 5.1881 +[2025-09-09 14:45:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 14:45:16] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 14:45:16] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:45:16] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 14:45:16] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 14:45:16] [Rank 0] Group 13 FTA: 0.9400 +[2025-09-09 14:45:16] [Rank 0] Group 13 FTA: 0.9400 +[2025-09-09 14:45:16] [Rank 0] Group 14 FTA: 0.7900 +[2025-09-09 14:45:16] [Rank 0] Group 14 FTA: 0.7900 +[2025-09-09 14:45:16] [Rank 0] Group 15 FTA: 0.5400 +[2025-09-09 14:45:16] [Rank 0] Group 15 FTA: 0.5400 +[2025-09-09 14:45:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:45:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:45:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:45:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:45:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:45:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:45:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:45:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:45:17] [Rank 0] step:5001/10000 train_time:244129ms step_avg:48.82ms +[2025-09-09 14:45:17] [Rank 0] step:5001/10000 train_time:244129ms step_avg:48.82ms +[2025-09-09 14:45:18] [Rank 0] step:5021/10000 train_time:244965ms step_avg:48.79ms +[2025-09-09 14:45:18] [Rank 0] step:5021/10000 train_time:244965ms step_avg:48.79ms +[2025-09-09 14:45:19] [Rank 0] step:5041/10000 train_time:245789ms step_avg:48.76ms +[2025-09-09 14:45:19] [Rank 0] step:5041/10000 train_time:245789ms step_avg:48.76ms +[2025-09-09 14:45:20] [Rank 0] step:5061/10000 train_time:246614ms step_avg:48.73ms +[2025-09-09 14:45:20] [Rank 0] step:5061/10000 train_time:246614ms step_avg:48.73ms +[2025-09-09 14:45:21] [Rank 0] step:5081/10000 train_time:247439ms step_avg:48.70ms +[2025-09-09 14:45:21] [Rank 0] step:5081/10000 train_time:247439ms step_avg:48.70ms +[2025-09-09 14:45:21] [Rank 0] step:5101/10000 train_time:248263ms step_avg:48.67ms +[2025-09-09 14:45:21] [Rank 0] step:5101/10000 train_time:248263ms step_avg:48.67ms +[2025-09-09 14:45:22] [Rank 0] step:5121/10000 train_time:249088ms step_avg:48.64ms +[2025-09-09 14:45:22] [Rank 0] step:5121/10000 train_time:249088ms step_avg:48.64ms +[2025-09-09 14:45:23] [Rank 0] step:5141/10000 train_time:249915ms step_avg:48.61ms +[2025-09-09 14:45:23] [Rank 0] step:5141/10000 train_time:249915ms step_avg:48.61ms +[2025-09-09 14:45:24] [Rank 0] step:5161/10000 train_time:250738ms step_avg:48.58ms +[2025-09-09 14:45:24] [Rank 0] step:5161/10000 train_time:250738ms step_avg:48.58ms +[2025-09-09 14:45:25] [Rank 0] step:5181/10000 train_time:251559ms step_avg:48.55ms +[2025-09-09 14:45:25] [Rank 0] step:5181/10000 train_time:251559ms step_avg:48.55ms +[2025-09-09 14:45:26] [Rank 0] step:5201/10000 train_time:252382ms step_avg:48.53ms +[2025-09-09 14:45:26] [Rank 0] step:5201/10000 train_time:252382ms step_avg:48.53ms +[2025-09-09 14:45:26] [Rank 0] step:5221/10000 train_time:253206ms step_avg:48.50ms +[2025-09-09 14:45:26] [Rank 0] step:5221/10000 train_time:253206ms step_avg:48.50ms +[2025-09-09 14:45:27] [Rank 0] step:5241/10000 train_time:254029ms step_avg:48.47ms +[2025-09-09 14:45:27] [Rank 0] step:5241/10000 train_time:254029ms step_avg:48.47ms +[2025-09-09 14:45:28] [Rank 0] step:5261/10000 train_time:254854ms step_avg:48.44ms +[2025-09-09 14:45:28] [Rank 0] step:5261/10000 train_time:254854ms step_avg:48.44ms +[2025-09-09 14:45:29] [Rank 0] step:5281/10000 train_time:255677ms step_avg:48.41ms +[2025-09-09 14:45:29] [Rank 0] step:5281/10000 train_time:255677ms step_avg:48.41ms +[2025-09-09 14:45:30] [Rank 0] step:5301/10000 train_time:256502ms step_avg:48.39ms +[2025-09-09 14:45:30] [Rank 0] step:5301/10000 train_time:256502ms step_avg:48.39ms +[2025-09-09 14:45:30] [Rank 0] step:5321/10000 train_time:257325ms step_avg:48.36ms +[2025-09-09 14:45:30] [Rank 0] step:5321/10000 train_time:257325ms step_avg:48.36ms +[2025-09-09 14:45:31] [Rank 0] step:5341/10000 train_time:258150ms step_avg:48.33ms +[2025-09-09 14:45:31] [Rank 0] step:5341/10000 train_time:258150ms step_avg:48.33ms +[2025-09-09 14:45:32] [Rank 0] step:5361/10000 train_time:258975ms step_avg:48.31ms +[2025-09-09 14:45:32] [Rank 0] step:5361/10000 train_time:258975ms step_avg:48.31ms +[2025-09-09 14:45:33] [Rank 0] step:5381/10000 train_time:259797ms step_avg:48.28ms +[2025-09-09 14:45:33] [Rank 0] step:5381/10000 train_time:259797ms step_avg:48.28ms +[2025-09-09 14:45:34] [Rank 0] step:5401/10000 train_time:260621ms step_avg:48.25ms +[2025-09-09 14:45:34] [Rank 0] step:5401/10000 train_time:260621ms step_avg:48.25ms +[2025-09-09 14:45:35] [Rank 0] step:5421/10000 train_time:261445ms step_avg:48.23ms +[2025-09-09 14:45:35] [Rank 0] step:5421/10000 train_time:261445ms step_avg:48.23ms +[2025-09-09 14:45:35] [Rank 0] step:5441/10000 train_time:262268ms step_avg:48.20ms +[2025-09-09 14:45:35] [Rank 0] step:5441/10000 train_time:262268ms step_avg:48.20ms +[2025-09-09 14:45:36] [Rank 0] step:5461/10000 train_time:263091ms step_avg:48.18ms +[2025-09-09 14:45:36] [Rank 0] step:5461/10000 train_time:263091ms step_avg:48.18ms +[2025-09-09 14:45:37] [Rank 0] step:5481/10000 train_time:263916ms step_avg:48.15ms +[2025-09-09 14:45:37] [Rank 0] step:5481/10000 train_time:263916ms step_avg:48.15ms +[2025-09-09 14:45:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:45:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:45:38] [Rank 0] PRINT: step:5500/10000 train_loss:0.6369 val_loss:0.6236 train_time:264743ms step_avg:48.14ms +[2025-09-09 14:45:38] [Rank 0] PRINT: step:5500/10000 train_loss:0.6369 val_loss:0.6236 train_time:264743ms step_avg:48.14ms +[2025-09-09 14:45:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:45:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:45:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:45:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:47:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:47:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:47:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:47:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:47:00] [Rank 0] Total Loss: 5.0132 +[2025-09-09 14:47:00] [Rank 0] Total Loss: 5.0132 +[2025-09-09 14:47:00] [Rank 0] Total FTA (Unweighted): 0.9619 +[2025-09-09 14:47:00] [Rank 0] Total FTA (Unweighted): 0.9619 +[2025-09-09 14:47:00] [Rank 0] Total FTA (Weighted): 0.9619 +[2025-09-09 14:47:00] [Rank 0] Total FTA (Weighted): 0.9619 +[2025-09-09 14:47:00] [Rank 0] Group 0 Loss: 4.9315 +[2025-09-09 14:47:00] [Rank 0] Group 0 Loss: 4.9315 +[2025-09-09 14:47:00] [Rank 0] Group 1 Loss: 4.6123 +[2025-09-09 14:47:00] [Rank 0] Group 1 Loss: 4.6123 +[2025-09-09 14:47:00] [Rank 0] Group 2 Loss: 4.3999 +[2025-09-09 14:47:00] [Rank 0] Group 2 Loss: 4.3999 +[2025-09-09 14:47:00] [Rank 0] Group 3 Loss: 4.9059 +[2025-09-09 14:47:00] [Rank 0] Group 3 Loss: 4.9059 +[2025-09-09 14:47:00] [Rank 0] Group 4 Loss: 4.9112 +[2025-09-09 14:47:00] [Rank 0] Group 4 Loss: 4.9112 +[2025-09-09 14:47:00] [Rank 0] Group 5 Loss: 4.9383 +[2025-09-09 14:47:00] [Rank 0] Group 5 Loss: 4.9383 +[2025-09-09 14:47:00] [Rank 0] Group 6 Loss: 4.8447 +[2025-09-09 14:47:00] [Rank 0] Group 6 Loss: 4.8447 +[2025-09-09 14:47:00] [Rank 0] Group 7 Loss: 5.0178 +[2025-09-09 14:47:00] [Rank 0] Group 7 Loss: 5.0178 +[2025-09-09 14:47:00] [Rank 0] Group 8 Loss: 5.1176 +[2025-09-09 14:47:00] [Rank 0] Group 8 Loss: 5.1176 +[2025-09-09 14:47:00] [Rank 0] Group 9 Loss: 5.0892 +[2025-09-09 14:47:00] [Rank 0] Group 9 Loss: 5.0892 +[2025-09-09 14:47:00] [Rank 0] Group 10 Loss: 5.2284 +[2025-09-09 14:47:00] [Rank 0] Group 10 Loss: 5.2284 +[2025-09-09 14:47:00] [Rank 0] Group 11 Loss: 5.2379 +[2025-09-09 14:47:00] [Rank 0] Group 11 Loss: 5.2379 +[2025-09-09 14:47:00] [Rank 0] Group 12 Loss: 5.1494 +[2025-09-09 14:47:00] [Rank 0] Group 12 Loss: 5.1494 +[2025-09-09 14:47:00] [Rank 0] Group 13 Loss: 5.3253 +[2025-09-09 14:47:00] [Rank 0] Group 13 Loss: 5.3253 +[2025-09-09 14:47:00] [Rank 0] Group 14 Loss: 5.2231 +[2025-09-09 14:47:00] [Rank 0] Group 14 Loss: 5.2231 +[2025-09-09 14:47:00] [Rank 0] Group 15 Loss: 5.2793 +[2025-09-09 14:47:00] [Rank 0] Group 15 Loss: 5.2793 +[2025-09-09 14:47:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:47:00] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:47:00] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:47:00] [Rank 0] Group 13 FTA: 0.9800 +[2025-09-09 14:47:00] [Rank 0] Group 13 FTA: 0.9800 +[2025-09-09 14:47:00] [Rank 0] Group 14 FTA: 0.8500 +[2025-09-09 14:47:00] [Rank 0] Group 14 FTA: 0.8500 +[2025-09-09 14:47:00] [Rank 0] Group 15 FTA: 0.5700 +[2025-09-09 14:47:00] [Rank 0] Group 15 FTA: 0.5700 +[2025-09-09 14:47:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:47:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:47:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:47:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:47:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:47:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:47:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:47:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:47:01] [Rank 0] step:5501/10000 train_time:264759ms step_avg:48.13ms +[2025-09-09 14:47:01] [Rank 0] step:5501/10000 train_time:264759ms step_avg:48.13ms +[2025-09-09 14:47:02] [Rank 0] step:5521/10000 train_time:265599ms step_avg:48.11ms +[2025-09-09 14:47:02] [Rank 0] step:5521/10000 train_time:265599ms step_avg:48.11ms +[2025-09-09 14:47:03] [Rank 0] step:5541/10000 train_time:266422ms step_avg:48.08ms +[2025-09-09 14:47:03] [Rank 0] step:5541/10000 train_time:266422ms step_avg:48.08ms +[2025-09-09 14:47:04] [Rank 0] step:5561/10000 train_time:267245ms step_avg:48.06ms +[2025-09-09 14:47:04] [Rank 0] step:5561/10000 train_time:267245ms step_avg:48.06ms +[2025-09-09 14:47:04] [Rank 0] step:5581/10000 train_time:268069ms step_avg:48.03ms +[2025-09-09 14:47:04] [Rank 0] step:5581/10000 train_time:268069ms step_avg:48.03ms +[2025-09-09 14:47:05] [Rank 0] step:5601/10000 train_time:268893ms step_avg:48.01ms +[2025-09-09 14:47:05] [Rank 0] step:5601/10000 train_time:268893ms step_avg:48.01ms +[2025-09-09 14:47:06] [Rank 0] step:5621/10000 train_time:269716ms step_avg:47.98ms +[2025-09-09 14:47:06] [Rank 0] step:5621/10000 train_time:269716ms step_avg:47.98ms +[2025-09-09 14:47:08] [Rank 0] step:5641/10000 train_time:271240ms step_avg:48.08ms +[2025-09-09 14:47:08] [Rank 0] step:5641/10000 train_time:271240ms step_avg:48.08ms +[2025-09-09 14:47:08] [Rank 0] step:5661/10000 train_time:272065ms step_avg:48.06ms +[2025-09-09 14:47:08] [Rank 0] step:5661/10000 train_time:272065ms step_avg:48.06ms +[2025-09-09 14:47:09] [Rank 0] step:5681/10000 train_time:272889ms step_avg:48.04ms +[2025-09-09 14:47:09] [Rank 0] step:5681/10000 train_time:272889ms step_avg:48.04ms +[2025-09-09 14:47:10] [Rank 0] step:5701/10000 train_time:273714ms step_avg:48.01ms +[2025-09-09 14:47:10] [Rank 0] step:5701/10000 train_time:273714ms step_avg:48.01ms +[2025-09-09 14:47:11] [Rank 0] step:5721/10000 train_time:274538ms step_avg:47.99ms +[2025-09-09 14:47:11] [Rank 0] step:5721/10000 train_time:274538ms step_avg:47.99ms +[2025-09-09 14:47:12] [Rank 0] step:5741/10000 train_time:275362ms step_avg:47.96ms +[2025-09-09 14:47:12] [Rank 0] step:5741/10000 train_time:275362ms step_avg:47.96ms +[2025-09-09 14:47:13] [Rank 0] step:5761/10000 train_time:276187ms step_avg:47.94ms +[2025-09-09 14:47:13] [Rank 0] step:5761/10000 train_time:276187ms step_avg:47.94ms +[2025-09-09 14:47:13] [Rank 0] step:5781/10000 train_time:277012ms step_avg:47.92ms +[2025-09-09 14:47:13] [Rank 0] step:5781/10000 train_time:277012ms step_avg:47.92ms +[2025-09-09 14:47:14] [Rank 0] step:5801/10000 train_time:277835ms step_avg:47.89ms +[2025-09-09 14:47:14] [Rank 0] step:5801/10000 train_time:277835ms step_avg:47.89ms +[2025-09-09 14:47:15] [Rank 0] step:5821/10000 train_time:278660ms step_avg:47.87ms +[2025-09-09 14:47:15] [Rank 0] step:5821/10000 train_time:278660ms step_avg:47.87ms +[2025-09-09 14:47:16] [Rank 0] step:5841/10000 train_time:279484ms step_avg:47.85ms +[2025-09-09 14:47:16] [Rank 0] step:5841/10000 train_time:279484ms step_avg:47.85ms +[2025-09-09 14:47:17] [Rank 0] step:5861/10000 train_time:280308ms step_avg:47.83ms +[2025-09-09 14:47:17] [Rank 0] step:5861/10000 train_time:280308ms step_avg:47.83ms +[2025-09-09 14:47:18] [Rank 0] step:5881/10000 train_time:281133ms step_avg:47.80ms +[2025-09-09 14:47:18] [Rank 0] step:5881/10000 train_time:281133ms step_avg:47.80ms +[2025-09-09 14:47:18] [Rank 0] step:5901/10000 train_time:281958ms step_avg:47.78ms +[2025-09-09 14:47:18] [Rank 0] step:5901/10000 train_time:281958ms step_avg:47.78ms +[2025-09-09 14:47:19] [Rank 0] step:5921/10000 train_time:282783ms step_avg:47.76ms +[2025-09-09 14:47:19] [Rank 0] step:5921/10000 train_time:282783ms step_avg:47.76ms +[2025-09-09 14:47:20] [Rank 0] step:5941/10000 train_time:283607ms step_avg:47.74ms +[2025-09-09 14:47:20] [Rank 0] step:5941/10000 train_time:283607ms step_avg:47.74ms +[2025-09-09 14:47:21] [Rank 0] step:5961/10000 train_time:284432ms step_avg:47.72ms +[2025-09-09 14:47:21] [Rank 0] step:5961/10000 train_time:284432ms step_avg:47.72ms +[2025-09-09 14:47:22] [Rank 0] step:5981/10000 train_time:285257ms step_avg:47.69ms +[2025-09-09 14:47:22] [Rank 0] step:5981/10000 train_time:285257ms step_avg:47.69ms +[2025-09-09 14:47:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:47:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:47:23] [Rank 0] PRINT: step:6000/10000 train_loss:0.6309 val_loss:0.6190 train_time:286085ms step_avg:47.68ms +[2025-09-09 14:47:23] [Rank 0] PRINT: step:6000/10000 train_loss:0.6309 val_loss:0.6190 train_time:286085ms step_avg:47.68ms +[2025-09-09 14:47:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:47:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:47:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:47:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:48:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:48:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:48:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:48:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:48:44] [Rank 0] Total Loss: 5.0062 +[2025-09-09 14:48:44] [Rank 0] Total Loss: 5.0062 +[2025-09-09 14:48:44] [Rank 0] Total FTA (Unweighted): 0.9687 +[2025-09-09 14:48:44] [Rank 0] Total FTA (Unweighted): 0.9687 +[2025-09-09 14:48:44] [Rank 0] Total FTA (Weighted): 0.9688 +[2025-09-09 14:48:44] [Rank 0] Total FTA (Weighted): 0.9688 +[2025-09-09 14:48:44] [Rank 0] Group 0 Loss: 5.0187 +[2025-09-09 14:48:44] [Rank 0] Group 0 Loss: 5.0187 +[2025-09-09 14:48:44] [Rank 0] Group 1 Loss: 4.4924 +[2025-09-09 14:48:44] [Rank 0] Group 1 Loss: 4.4924 +[2025-09-09 14:48:44] [Rank 0] Group 2 Loss: 4.3760 +[2025-09-09 14:48:44] [Rank 0] Group 2 Loss: 4.3760 +[2025-09-09 14:48:44] [Rank 0] Group 3 Loss: 4.8809 +[2025-09-09 14:48:44] [Rank 0] Group 3 Loss: 4.8809 +[2025-09-09 14:48:44] [Rank 0] Group 4 Loss: 4.9264 +[2025-09-09 14:48:44] [Rank 0] Group 4 Loss: 4.9264 +[2025-09-09 14:48:44] [Rank 0] Group 5 Loss: 4.9145 +[2025-09-09 14:48:44] [Rank 0] Group 5 Loss: 4.9145 +[2025-09-09 14:48:44] [Rank 0] Group 6 Loss: 4.8460 +[2025-09-09 14:48:44] [Rank 0] Group 6 Loss: 4.8460 +[2025-09-09 14:48:44] [Rank 0] Group 7 Loss: 5.0164 +[2025-09-09 14:48:44] [Rank 0] Group 7 Loss: 5.0164 +[2025-09-09 14:48:44] [Rank 0] Group 8 Loss: 5.1073 +[2025-09-09 14:48:44] [Rank 0] Group 8 Loss: 5.1073 +[2025-09-09 14:48:44] [Rank 0] Group 9 Loss: 5.0967 +[2025-09-09 14:48:44] [Rank 0] Group 9 Loss: 5.0967 +[2025-09-09 14:48:44] [Rank 0] Group 10 Loss: 5.1904 +[2025-09-09 14:48:44] [Rank 0] Group 10 Loss: 5.1904 +[2025-09-09 14:48:44] [Rank 0] Group 11 Loss: 5.2299 +[2025-09-09 14:48:44] [Rank 0] Group 11 Loss: 5.2299 +[2025-09-09 14:48:44] [Rank 0] Group 12 Loss: 5.1616 +[2025-09-09 14:48:44] [Rank 0] Group 12 Loss: 5.1616 +[2025-09-09 14:48:44] [Rank 0] Group 13 Loss: 5.3076 +[2025-09-09 14:48:44] [Rank 0] Group 13 Loss: 5.3076 +[2025-09-09 14:48:44] [Rank 0] Group 14 Loss: 5.2579 +[2025-09-09 14:48:44] [Rank 0] Group 14 Loss: 5.2579 +[2025-09-09 14:48:44] [Rank 0] Group 15 Loss: 5.2763 +[2025-09-09 14:48:44] [Rank 0] Group 15 Loss: 5.2763 +[2025-09-09 14:48:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:48:44] [Rank 0] Group 13 FTA: 0.9700 +[2025-09-09 14:48:44] [Rank 0] Group 13 FTA: 0.9700 +[2025-09-09 14:48:44] [Rank 0] Group 14 FTA: 0.9100 +[2025-09-09 14:48:44] [Rank 0] Group 14 FTA: 0.9100 +[2025-09-09 14:48:44] [Rank 0] Group 15 FTA: 0.6200 +[2025-09-09 14:48:44] [Rank 0] Group 15 FTA: 0.6200 +[2025-09-09 14:48:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:48:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:48:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:48:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:48:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:48:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:48:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:48:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:48:45] [Rank 0] step:6001/10000 train_time:286101ms step_avg:47.68ms +[2025-09-09 14:48:45] [Rank 0] step:6001/10000 train_time:286101ms step_avg:47.68ms +[2025-09-09 14:48:47] [Rank 0] step:6021/10000 train_time:287602ms step_avg:47.77ms +[2025-09-09 14:48:47] [Rank 0] step:6021/10000 train_time:287602ms step_avg:47.77ms +[2025-09-09 14:48:48] [Rank 0] step:6041/10000 train_time:288426ms step_avg:47.74ms +[2025-09-09 14:48:48] [Rank 0] step:6041/10000 train_time:288426ms step_avg:47.74ms +[2025-09-09 14:48:48] [Rank 0] step:6061/10000 train_time:289251ms step_avg:47.72ms +[2025-09-09 14:48:48] [Rank 0] step:6061/10000 train_time:289251ms step_avg:47.72ms +[2025-09-09 14:48:49] [Rank 0] step:6081/10000 train_time:290076ms step_avg:47.70ms +[2025-09-09 14:48:49] [Rank 0] step:6081/10000 train_time:290076ms step_avg:47.70ms +[2025-09-09 14:48:50] [Rank 0] step:6101/10000 train_time:290900ms step_avg:47.68ms +[2025-09-09 14:48:50] [Rank 0] step:6101/10000 train_time:290900ms step_avg:47.68ms +[2025-09-09 14:48:51] [Rank 0] step:6121/10000 train_time:291727ms step_avg:47.66ms +[2025-09-09 14:48:51] [Rank 0] step:6121/10000 train_time:291727ms step_avg:47.66ms +[2025-09-09 14:48:52] [Rank 0] step:6141/10000 train_time:292547ms step_avg:47.64ms +[2025-09-09 14:48:52] [Rank 0] step:6141/10000 train_time:292547ms step_avg:47.64ms +[2025-09-09 14:48:52] [Rank 0] step:6161/10000 train_time:293371ms step_avg:47.62ms +[2025-09-09 14:48:52] [Rank 0] step:6161/10000 train_time:293371ms step_avg:47.62ms +[2025-09-09 14:48:53] [Rank 0] step:6181/10000 train_time:294194ms step_avg:47.60ms +[2025-09-09 14:48:53] [Rank 0] step:6181/10000 train_time:294194ms step_avg:47.60ms +[2025-09-09 14:48:54] [Rank 0] step:6201/10000 train_time:295021ms step_avg:47.58ms +[2025-09-09 14:48:54] [Rank 0] step:6201/10000 train_time:295021ms step_avg:47.58ms +[2025-09-09 14:48:55] [Rank 0] step:6221/10000 train_time:295844ms step_avg:47.56ms +[2025-09-09 14:48:55] [Rank 0] step:6221/10000 train_time:295844ms step_avg:47.56ms +[2025-09-09 14:48:56] [Rank 0] step:6241/10000 train_time:296667ms step_avg:47.54ms +[2025-09-09 14:48:56] [Rank 0] step:6241/10000 train_time:296667ms step_avg:47.54ms +[2025-09-09 14:48:57] [Rank 0] step:6261/10000 train_time:297491ms step_avg:47.51ms +[2025-09-09 14:48:57] [Rank 0] step:6261/10000 train_time:297491ms step_avg:47.51ms +[2025-09-09 14:48:57] [Rank 0] step:6281/10000 train_time:298315ms step_avg:47.49ms +[2025-09-09 14:48:57] [Rank 0] step:6281/10000 train_time:298315ms step_avg:47.49ms +[2025-09-09 14:48:58] [Rank 0] step:6301/10000 train_time:299139ms step_avg:47.47ms +[2025-09-09 14:48:58] [Rank 0] step:6301/10000 train_time:299139ms step_avg:47.47ms +[2025-09-09 14:48:59] [Rank 0] step:6321/10000 train_time:299962ms step_avg:47.45ms +[2025-09-09 14:48:59] [Rank 0] step:6321/10000 train_time:299962ms step_avg:47.45ms +[2025-09-09 14:49:00] [Rank 0] step:6341/10000 train_time:300925ms step_avg:47.46ms +[2025-09-09 14:49:00] [Rank 0] step:6341/10000 train_time:300925ms step_avg:47.46ms +[2025-09-09 14:49:01] [Rank 0] step:6361/10000 train_time:302112ms step_avg:47.49ms +[2025-09-09 14:49:01] [Rank 0] step:6361/10000 train_time:302112ms step_avg:47.49ms +[2025-09-09 14:49:02] [Rank 0] step:6381/10000 train_time:302940ms step_avg:47.48ms +[2025-09-09 14:49:02] [Rank 0] step:6381/10000 train_time:302940ms step_avg:47.48ms +[2025-09-09 14:49:03] [Rank 0] step:6401/10000 train_time:303761ms step_avg:47.46ms +[2025-09-09 14:49:03] [Rank 0] step:6401/10000 train_time:303761ms step_avg:47.46ms +[2025-09-09 14:49:04] [Rank 0] step:6421/10000 train_time:304585ms step_avg:47.44ms +[2025-09-09 14:49:04] [Rank 0] step:6421/10000 train_time:304585ms step_avg:47.44ms +[2025-09-09 14:49:05] [Rank 0] step:6441/10000 train_time:305410ms step_avg:47.42ms +[2025-09-09 14:49:05] [Rank 0] step:6441/10000 train_time:305410ms step_avg:47.42ms +[2025-09-09 14:49:05] [Rank 0] step:6461/10000 train_time:306234ms step_avg:47.40ms +[2025-09-09 14:49:05] [Rank 0] step:6461/10000 train_time:306234ms step_avg:47.40ms +[2025-09-09 14:49:06] [Rank 0] step:6481/10000 train_time:307058ms step_avg:47.38ms +[2025-09-09 14:49:06] [Rank 0] step:6481/10000 train_time:307058ms step_avg:47.38ms +[2025-09-09 14:49:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:49:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:49:07] [Rank 0] PRINT: step:6500/10000 train_loss:0.6256 val_loss:0.6149 train_time:307885ms step_avg:47.37ms +[2025-09-09 14:49:07] [Rank 0] PRINT: step:6500/10000 train_loss:0.6256 val_loss:0.6149 train_time:307885ms step_avg:47.37ms +[2025-09-09 14:49:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:49:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:49:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:49:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:50:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:50:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:50:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:50:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:50:29] [Rank 0] Total Loss: 5.0068 +[2025-09-09 14:50:29] [Rank 0] Total Loss: 5.0068 +[2025-09-09 14:50:29] [Rank 0] Total FTA (Unweighted): 0.9806 +[2025-09-09 14:50:29] [Rank 0] Total FTA (Unweighted): 0.9806 +[2025-09-09 14:50:29] [Rank 0] Total FTA (Weighted): 0.9806 +[2025-09-09 14:50:29] [Rank 0] Total FTA (Weighted): 0.9806 +[2025-09-09 14:50:29] [Rank 0] Group 0 Loss: 5.1272 +[2025-09-09 14:50:29] [Rank 0] Group 0 Loss: 5.1272 +[2025-09-09 14:50:29] [Rank 0] Group 1 Loss: 4.5591 +[2025-09-09 14:50:29] [Rank 0] Group 1 Loss: 4.5591 +[2025-09-09 14:50:29] [Rank 0] Group 2 Loss: 4.4399 +[2025-09-09 14:50:29] [Rank 0] Group 2 Loss: 4.4399 +[2025-09-09 14:50:29] [Rank 0] Group 3 Loss: 4.8465 +[2025-09-09 14:50:29] [Rank 0] Group 3 Loss: 4.8465 +[2025-09-09 14:50:29] [Rank 0] Group 4 Loss: 4.9022 +[2025-09-09 14:50:29] [Rank 0] Group 4 Loss: 4.9022 +[2025-09-09 14:50:29] [Rank 0] Group 5 Loss: 4.8909 +[2025-09-09 14:50:29] [Rank 0] Group 5 Loss: 4.8909 +[2025-09-09 14:50:29] [Rank 0] Group 6 Loss: 4.8007 +[2025-09-09 14:50:29] [Rank 0] Group 6 Loss: 4.8007 +[2025-09-09 14:50:29] [Rank 0] Group 7 Loss: 4.9862 +[2025-09-09 14:50:29] [Rank 0] Group 7 Loss: 4.9862 +[2025-09-09 14:50:29] [Rank 0] Group 8 Loss: 5.0938 +[2025-09-09 14:50:29] [Rank 0] Group 8 Loss: 5.0938 +[2025-09-09 14:50:29] [Rank 0] Group 9 Loss: 5.0873 +[2025-09-09 14:50:29] [Rank 0] Group 9 Loss: 5.0873 +[2025-09-09 14:50:29] [Rank 0] Group 10 Loss: 5.1934 +[2025-09-09 14:50:29] [Rank 0] Group 10 Loss: 5.1934 +[2025-09-09 14:50:29] [Rank 0] Group 11 Loss: 5.2316 +[2025-09-09 14:50:29] [Rank 0] Group 11 Loss: 5.2316 +[2025-09-09 14:50:29] [Rank 0] Group 12 Loss: 5.1586 +[2025-09-09 14:50:29] [Rank 0] Group 12 Loss: 5.1586 +[2025-09-09 14:50:29] [Rank 0] Group 13 Loss: 5.2657 +[2025-09-09 14:50:29] [Rank 0] Group 13 Loss: 5.2657 +[2025-09-09 14:50:29] [Rank 0] Group 14 Loss: 5.2549 +[2025-09-09 14:50:29] [Rank 0] Group 14 Loss: 5.2549 +[2025-09-09 14:50:29] [Rank 0] Group 15 Loss: 5.2707 +[2025-09-09 14:50:29] [Rank 0] Group 15 Loss: 5.2707 +[2025-09-09 14:50:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:50:29] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:50:29] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:50:29] [Rank 0] Group 14 FTA: 0.9400 +[2025-09-09 14:50:29] [Rank 0] Group 14 FTA: 0.9400 +[2025-09-09 14:50:29] [Rank 0] Group 15 FTA: 0.7600 +[2025-09-09 14:50:29] [Rank 0] Group 15 FTA: 0.7600 +[2025-09-09 14:50:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:50:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:50:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:50:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:50:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:50:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:50:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:50:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:50:31] [Rank 0] step:6501/10000 train_time:307902ms step_avg:47.36ms +[2025-09-09 14:50:31] [Rank 0] step:6501/10000 train_time:307902ms step_avg:47.36ms +[2025-09-09 14:50:32] [Rank 0] step:6521/10000 train_time:308732ms step_avg:47.34ms +[2025-09-09 14:50:32] [Rank 0] step:6521/10000 train_time:308732ms step_avg:47.34ms +[2025-09-09 14:50:33] [Rank 0] step:6541/10000 train_time:309555ms step_avg:47.33ms +[2025-09-09 14:50:33] [Rank 0] step:6541/10000 train_time:309555ms step_avg:47.33ms +[2025-09-09 14:50:33] [Rank 0] step:6561/10000 train_time:310380ms step_avg:47.31ms +[2025-09-09 14:50:33] [Rank 0] step:6561/10000 train_time:310380ms step_avg:47.31ms +[2025-09-09 14:50:34] [Rank 0] step:6581/10000 train_time:311204ms step_avg:47.29ms +[2025-09-09 14:50:34] [Rank 0] step:6581/10000 train_time:311204ms step_avg:47.29ms +[2025-09-09 14:50:35] [Rank 0] step:6601/10000 train_time:312028ms step_avg:47.27ms +[2025-09-09 14:50:35] [Rank 0] step:6601/10000 train_time:312028ms step_avg:47.27ms +[2025-09-09 14:50:36] [Rank 0] step:6621/10000 train_time:312852ms step_avg:47.25ms +[2025-09-09 14:50:36] [Rank 0] step:6621/10000 train_time:312852ms step_avg:47.25ms +[2025-09-09 14:50:37] [Rank 0] step:6641/10000 train_time:313676ms step_avg:47.23ms +[2025-09-09 14:50:37] [Rank 0] step:6641/10000 train_time:313676ms step_avg:47.23ms +[2025-09-09 14:50:38] [Rank 0] step:6661/10000 train_time:314501ms step_avg:47.22ms +[2025-09-09 14:50:38] [Rank 0] step:6661/10000 train_time:314501ms step_avg:47.22ms +[2025-09-09 14:50:38] [Rank 0] step:6681/10000 train_time:315324ms step_avg:47.20ms +[2025-09-09 14:50:38] [Rank 0] step:6681/10000 train_time:315324ms step_avg:47.20ms +[2025-09-09 14:50:39] [Rank 0] step:6701/10000 train_time:316148ms step_avg:47.18ms +[2025-09-09 14:50:39] [Rank 0] step:6701/10000 train_time:316148ms step_avg:47.18ms +[2025-09-09 14:50:40] [Rank 0] step:6721/10000 train_time:316973ms step_avg:47.16ms +[2025-09-09 14:50:40] [Rank 0] step:6721/10000 train_time:316973ms step_avg:47.16ms +[2025-09-09 14:50:41] [Rank 0] step:6741/10000 train_time:317800ms step_avg:47.14ms +[2025-09-09 14:50:41] [Rank 0] step:6741/10000 train_time:317800ms step_avg:47.14ms +[2025-09-09 14:50:42] [Rank 0] step:6761/10000 train_time:318625ms step_avg:47.13ms +[2025-09-09 14:50:42] [Rank 0] step:6761/10000 train_time:318625ms step_avg:47.13ms +[2025-09-09 14:50:42] [Rank 0] step:6781/10000 train_time:319450ms step_avg:47.11ms +[2025-09-09 14:50:42] [Rank 0] step:6781/10000 train_time:319450ms step_avg:47.11ms +[2025-09-09 14:50:43] [Rank 0] step:6801/10000 train_time:320276ms step_avg:47.09ms +[2025-09-09 14:50:43] [Rank 0] step:6801/10000 train_time:320276ms step_avg:47.09ms +[2025-09-09 14:50:44] [Rank 0] step:6821/10000 train_time:321101ms step_avg:47.08ms +[2025-09-09 14:50:44] [Rank 0] step:6821/10000 train_time:321101ms step_avg:47.08ms +[2025-09-09 14:50:46] [Rank 0] step:6841/10000 train_time:322601ms step_avg:47.16ms +[2025-09-09 14:50:46] [Rank 0] step:6841/10000 train_time:322601ms step_avg:47.16ms +[2025-09-09 14:50:46] [Rank 0] step:6861/10000 train_time:323427ms step_avg:47.14ms +[2025-09-09 14:50:46] [Rank 0] step:6861/10000 train_time:323427ms step_avg:47.14ms +[2025-09-09 14:50:47] [Rank 0] step:6881/10000 train_time:324256ms step_avg:47.12ms +[2025-09-09 14:50:47] [Rank 0] step:6881/10000 train_time:324256ms step_avg:47.12ms +[2025-09-09 14:50:48] [Rank 0] step:6901/10000 train_time:325081ms step_avg:47.11ms +[2025-09-09 14:50:48] [Rank 0] step:6901/10000 train_time:325081ms step_avg:47.11ms +[2025-09-09 14:50:49] [Rank 0] step:6921/10000 train_time:325906ms step_avg:47.09ms +[2025-09-09 14:50:49] [Rank 0] step:6921/10000 train_time:325906ms step_avg:47.09ms +[2025-09-09 14:50:50] [Rank 0] step:6941/10000 train_time:326732ms step_avg:47.07ms +[2025-09-09 14:50:50] [Rank 0] step:6941/10000 train_time:326732ms step_avg:47.07ms +[2025-09-09 14:50:51] [Rank 0] step:6961/10000 train_time:327557ms step_avg:47.06ms +[2025-09-09 14:50:51] [Rank 0] step:6961/10000 train_time:327557ms step_avg:47.06ms +[2025-09-09 14:50:51] [Rank 0] step:6981/10000 train_time:328382ms step_avg:47.04ms +[2025-09-09 14:50:51] [Rank 0] step:6981/10000 train_time:328382ms step_avg:47.04ms +[2025-09-09 14:50:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:50:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:50:53] [Rank 0] PRINT: step:7000/10000 train_loss:0.6205 val_loss:0.6121 train_time:329211ms step_avg:47.03ms +[2025-09-09 14:50:53] [Rank 0] PRINT: step:7000/10000 train_loss:0.6205 val_loss:0.6121 train_time:329211ms step_avg:47.03ms +[2025-09-09 14:50:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:50:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:50:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:50:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:52:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:52:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:52:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:52:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:52:14] [Rank 0] Total Loss: 5.0064 +[2025-09-09 14:52:14] [Rank 0] Total Loss: 5.0064 +[2025-09-09 14:52:14] [Rank 0] Total FTA (Unweighted): 0.9862 +[2025-09-09 14:52:14] [Rank 0] Total FTA (Unweighted): 0.9862 +[2025-09-09 14:52:14] [Rank 0] Total FTA (Weighted): 0.9862 +[2025-09-09 14:52:14] [Rank 0] Total FTA (Weighted): 0.9862 +[2025-09-09 14:52:14] [Rank 0] Group 0 Loss: 4.8921 +[2025-09-09 14:52:14] [Rank 0] Group 0 Loss: 4.8921 +[2025-09-09 14:52:14] [Rank 0] Group 1 Loss: 4.5426 +[2025-09-09 14:52:14] [Rank 0] Group 1 Loss: 4.5426 +[2025-09-09 14:52:15] [Rank 0] Group 2 Loss: 4.3675 +[2025-09-09 14:52:15] [Rank 0] Group 2 Loss: 4.3675 +[2025-09-09 14:52:15] [Rank 0] Group 3 Loss: 4.8682 +[2025-09-09 14:52:15] [Rank 0] Group 3 Loss: 4.8682 +[2025-09-09 14:52:15] [Rank 0] Group 4 Loss: 4.8850 +[2025-09-09 14:52:15] [Rank 0] Group 4 Loss: 4.8850 +[2025-09-09 14:52:15] [Rank 0] Group 5 Loss: 4.9012 +[2025-09-09 14:52:15] [Rank 0] Group 5 Loss: 4.9012 +[2025-09-09 14:52:15] [Rank 0] Group 6 Loss: 4.8487 +[2025-09-09 14:52:15] [Rank 0] Group 6 Loss: 4.8487 +[2025-09-09 14:52:15] [Rank 0] Group 7 Loss: 5.0079 +[2025-09-09 14:52:15] [Rank 0] Group 7 Loss: 5.0079 +[2025-09-09 14:52:15] [Rank 0] Group 8 Loss: 5.1424 +[2025-09-09 14:52:15] [Rank 0] Group 8 Loss: 5.1424 +[2025-09-09 14:52:15] [Rank 0] Group 9 Loss: 5.0990 +[2025-09-09 14:52:15] [Rank 0] Group 9 Loss: 5.0990 +[2025-09-09 14:52:15] [Rank 0] Group 10 Loss: 5.2002 +[2025-09-09 14:52:15] [Rank 0] Group 10 Loss: 5.2002 +[2025-09-09 14:52:15] [Rank 0] Group 11 Loss: 5.2697 +[2025-09-09 14:52:15] [Rank 0] Group 11 Loss: 5.2697 +[2025-09-09 14:52:15] [Rank 0] Group 12 Loss: 5.1892 +[2025-09-09 14:52:15] [Rank 0] Group 12 Loss: 5.1892 +[2025-09-09 14:52:15] [Rank 0] Group 13 Loss: 5.3220 +[2025-09-09 14:52:15] [Rank 0] Group 13 Loss: 5.3220 +[2025-09-09 14:52:15] [Rank 0] Group 14 Loss: 5.2729 +[2025-09-09 14:52:15] [Rank 0] Group 14 Loss: 5.2729 +[2025-09-09 14:52:15] [Rank 0] Group 15 Loss: 5.2945 +[2025-09-09 14:52:15] [Rank 0] Group 15 Loss: 5.2945 +[2025-09-09 14:52:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:52:15] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:52:15] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:52:15] [Rank 0] Group 14 FTA: 0.9500 +[2025-09-09 14:52:15] [Rank 0] Group 14 FTA: 0.9500 +[2025-09-09 14:52:15] [Rank 0] Group 15 FTA: 0.8400 +[2025-09-09 14:52:15] [Rank 0] Group 15 FTA: 0.8400 +[2025-09-09 14:52:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:52:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:52:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:52:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:52:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:52:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:52:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:52:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:52:16] [Rank 0] step:7001/10000 train_time:329229ms step_avg:47.03ms +[2025-09-09 14:52:16] [Rank 0] step:7001/10000 train_time:329229ms step_avg:47.03ms +[2025-09-09 14:52:17] [Rank 0] step:7021/10000 train_time:330075ms step_avg:47.01ms +[2025-09-09 14:52:17] [Rank 0] step:7021/10000 train_time:330075ms step_avg:47.01ms +[2025-09-09 14:52:18] [Rank 0] step:7041/10000 train_time:330900ms step_avg:47.00ms +[2025-09-09 14:52:18] [Rank 0] step:7041/10000 train_time:330900ms step_avg:47.00ms +[2025-09-09 14:52:19] [Rank 0] step:7061/10000 train_time:331728ms step_avg:46.98ms +[2025-09-09 14:52:19] [Rank 0] step:7061/10000 train_time:331728ms step_avg:46.98ms +[2025-09-09 14:52:19] [Rank 0] step:7081/10000 train_time:332554ms step_avg:46.96ms +[2025-09-09 14:52:19] [Rank 0] step:7081/10000 train_time:332554ms step_avg:46.96ms +[2025-09-09 14:52:20] [Rank 0] step:7101/10000 train_time:333379ms step_avg:46.95ms +[2025-09-09 14:52:20] [Rank 0] step:7101/10000 train_time:333379ms step_avg:46.95ms +[2025-09-09 14:52:21] [Rank 0] step:7121/10000 train_time:334205ms step_avg:46.93ms +[2025-09-09 14:52:21] [Rank 0] step:7121/10000 train_time:334205ms step_avg:46.93ms +[2025-09-09 14:52:22] [Rank 0] step:7141/10000 train_time:335031ms step_avg:46.92ms +[2025-09-09 14:52:22] [Rank 0] step:7141/10000 train_time:335031ms step_avg:46.92ms +[2025-09-09 14:52:23] [Rank 0] step:7161/10000 train_time:335856ms step_avg:46.90ms +[2025-09-09 14:52:23] [Rank 0] step:7161/10000 train_time:335856ms step_avg:46.90ms +[2025-09-09 14:52:24] [Rank 0] step:7181/10000 train_time:336682ms step_avg:46.89ms +[2025-09-09 14:52:24] [Rank 0] step:7181/10000 train_time:336682ms step_avg:46.89ms +[2025-09-09 14:52:24] [Rank 0] step:7201/10000 train_time:337508ms step_avg:46.87ms +[2025-09-09 14:52:24] [Rank 0] step:7201/10000 train_time:337508ms step_avg:46.87ms +[2025-09-09 14:52:25] [Rank 0] step:7221/10000 train_time:338333ms step_avg:46.85ms +[2025-09-09 14:52:25] [Rank 0] step:7221/10000 train_time:338333ms step_avg:46.85ms +[2025-09-09 14:52:26] [Rank 0] step:7241/10000 train_time:339159ms step_avg:46.84ms +[2025-09-09 14:52:26] [Rank 0] step:7241/10000 train_time:339159ms step_avg:46.84ms +[2025-09-09 14:52:27] [Rank 0] step:7261/10000 train_time:339985ms step_avg:46.82ms +[2025-09-09 14:52:27] [Rank 0] step:7261/10000 train_time:339985ms step_avg:46.82ms +[2025-09-09 14:52:28] [Rank 0] step:7281/10000 train_time:340809ms step_avg:46.81ms +[2025-09-09 14:52:28] [Rank 0] step:7281/10000 train_time:340809ms step_avg:46.81ms +[2025-09-09 14:52:29] [Rank 0] step:7301/10000 train_time:341637ms step_avg:46.79ms +[2025-09-09 14:52:29] [Rank 0] step:7301/10000 train_time:341637ms step_avg:46.79ms +[2025-09-09 14:52:29] [Rank 0] step:7321/10000 train_time:342462ms step_avg:46.78ms +[2025-09-09 14:52:29] [Rank 0] step:7321/10000 train_time:342462ms step_avg:46.78ms +[2025-09-09 14:52:30] [Rank 0] step:7341/10000 train_time:343288ms step_avg:46.76ms +[2025-09-09 14:52:30] [Rank 0] step:7341/10000 train_time:343288ms step_avg:46.76ms +[2025-09-09 14:52:31] [Rank 0] step:7361/10000 train_time:344114ms step_avg:46.75ms +[2025-09-09 14:52:31] [Rank 0] step:7361/10000 train_time:344114ms step_avg:46.75ms +[2025-09-09 14:52:32] [Rank 0] step:7381/10000 train_time:344938ms step_avg:46.73ms +[2025-09-09 14:52:32] [Rank 0] step:7381/10000 train_time:344938ms step_avg:46.73ms +[2025-09-09 14:52:33] [Rank 0] step:7401/10000 train_time:345764ms step_avg:46.72ms +[2025-09-09 14:52:33] [Rank 0] step:7401/10000 train_time:345764ms step_avg:46.72ms +[2025-09-09 14:52:33] [Rank 0] step:7421/10000 train_time:346591ms step_avg:46.70ms +[2025-09-09 14:52:33] [Rank 0] step:7421/10000 train_time:346591ms step_avg:46.70ms +[2025-09-09 14:52:34] [Rank 0] step:7441/10000 train_time:347416ms step_avg:46.69ms +[2025-09-09 14:52:34] [Rank 0] step:7441/10000 train_time:347416ms step_avg:46.69ms +[2025-09-09 14:52:35] [Rank 0] step:7461/10000 train_time:348242ms step_avg:46.68ms +[2025-09-09 14:52:35] [Rank 0] step:7461/10000 train_time:348242ms step_avg:46.68ms +[2025-09-09 14:52:36] [Rank 0] step:7481/10000 train_time:349067ms step_avg:46.66ms +[2025-09-09 14:52:36] [Rank 0] step:7481/10000 train_time:349067ms step_avg:46.66ms +[2025-09-09 14:52:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:52:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:52:37] [Rank 0] PRINT: step:7500/10000 train_loss:0.6163 val_loss:0.6100 train_time:349895ms step_avg:46.65ms +[2025-09-09 14:52:37] [Rank 0] PRINT: step:7500/10000 train_loss:0.6163 val_loss:0.6100 train_time:349895ms step_avg:46.65ms +[2025-09-09 14:52:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:52:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:52:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:52:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:53:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:53:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:53:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:53:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:53:59] [Rank 0] Total Loss: 5.0037 +[2025-09-09 14:53:59] [Rank 0] Total Loss: 5.0037 +[2025-09-09 14:53:59] [Rank 0] Total FTA (Unweighted): 0.9888 +[2025-09-09 14:53:59] [Rank 0] Total FTA (Unweighted): 0.9888 +[2025-09-09 14:53:59] [Rank 0] Total FTA (Weighted): 0.9888 +[2025-09-09 14:53:59] [Rank 0] Total FTA (Weighted): 0.9888 +[2025-09-09 14:53:59] [Rank 0] Group 0 Loss: 4.9793 +[2025-09-09 14:53:59] [Rank 0] Group 0 Loss: 4.9793 +[2025-09-09 14:53:59] [Rank 0] Group 1 Loss: 4.6188 +[2025-09-09 14:53:59] [Rank 0] Group 1 Loss: 4.6188 +[2025-09-09 14:53:59] [Rank 0] Group 2 Loss: 4.4011 +[2025-09-09 14:53:59] [Rank 0] Group 2 Loss: 4.4011 +[2025-09-09 14:53:59] [Rank 0] Group 3 Loss: 4.8849 +[2025-09-09 14:53:59] [Rank 0] Group 3 Loss: 4.8849 +[2025-09-09 14:53:59] [Rank 0] Group 4 Loss: 4.8723 +[2025-09-09 14:53:59] [Rank 0] Group 4 Loss: 4.8723 +[2025-09-09 14:53:59] [Rank 0] Group 5 Loss: 4.9142 +[2025-09-09 14:53:59] [Rank 0] Group 5 Loss: 4.9142 +[2025-09-09 14:53:59] [Rank 0] Group 6 Loss: 4.8022 +[2025-09-09 14:53:59] [Rank 0] Group 6 Loss: 4.8022 +[2025-09-09 14:53:59] [Rank 0] Group 7 Loss: 4.9939 +[2025-09-09 14:53:59] [Rank 0] Group 7 Loss: 4.9939 +[2025-09-09 14:53:59] [Rank 0] Group 8 Loss: 5.0946 +[2025-09-09 14:53:59] [Rank 0] Group 8 Loss: 5.0946 +[2025-09-09 14:53:59] [Rank 0] Group 9 Loss: 5.0718 +[2025-09-09 14:53:59] [Rank 0] Group 9 Loss: 5.0718 +[2025-09-09 14:53:59] [Rank 0] Group 10 Loss: 5.2012 +[2025-09-09 14:53:59] [Rank 0] Group 10 Loss: 5.2012 +[2025-09-09 14:53:59] [Rank 0] Group 11 Loss: 5.2378 +[2025-09-09 14:53:59] [Rank 0] Group 11 Loss: 5.2378 +[2025-09-09 14:53:59] [Rank 0] Group 12 Loss: 5.1863 +[2025-09-09 14:53:59] [Rank 0] Group 12 Loss: 5.1863 +[2025-09-09 14:53:59] [Rank 0] Group 13 Loss: 5.2865 +[2025-09-09 14:53:59] [Rank 0] Group 13 Loss: 5.2865 +[2025-09-09 14:53:59] [Rank 0] Group 14 Loss: 5.2643 +[2025-09-09 14:53:59] [Rank 0] Group 14 Loss: 5.2643 +[2025-09-09 14:53:59] [Rank 0] Group 15 Loss: 5.2508 +[2025-09-09 14:53:59] [Rank 0] Group 15 Loss: 5.2508 +[2025-09-09 14:53:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:53:59] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 14:53:59] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 14:53:59] [Rank 0] Group 14 FTA: 0.9700 +[2025-09-09 14:53:59] [Rank 0] Group 14 FTA: 0.9700 +[2025-09-09 14:53:59] [Rank 0] Group 15 FTA: 0.8600 +[2025-09-09 14:53:59] [Rank 0] Group 15 FTA: 0.8600 +[2025-09-09 14:54:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:54:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:54:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:54:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:54:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:54:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:54:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:54:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:54:01] [Rank 0] step:7501/10000 train_time:349912ms step_avg:46.65ms +[2025-09-09 14:54:01] [Rank 0] step:7501/10000 train_time:349912ms step_avg:46.65ms +[2025-09-09 14:54:01] [Rank 0] step:7521/10000 train_time:350737ms step_avg:46.63ms +[2025-09-09 14:54:01] [Rank 0] step:7521/10000 train_time:350737ms step_avg:46.63ms +[2025-09-09 14:54:02] [Rank 0] step:7541/10000 train_time:351560ms step_avg:46.62ms +[2025-09-09 14:54:02] [Rank 0] step:7541/10000 train_time:351560ms step_avg:46.62ms +[2025-09-09 14:54:03] [Rank 0] step:7561/10000 train_time:352384ms step_avg:46.61ms +[2025-09-09 14:54:03] [Rank 0] step:7561/10000 train_time:352384ms step_avg:46.61ms +[2025-09-09 14:54:04] [Rank 0] step:7581/10000 train_time:353209ms step_avg:46.59ms +[2025-09-09 14:54:04] [Rank 0] step:7581/10000 train_time:353209ms step_avg:46.59ms +[2025-09-09 14:54:05] [Rank 0] step:7601/10000 train_time:354035ms step_avg:46.58ms +[2025-09-09 14:54:05] [Rank 0] step:7601/10000 train_time:354035ms step_avg:46.58ms +[2025-09-09 14:54:06] [Rank 0] step:7621/10000 train_time:354856ms step_avg:46.56ms +[2025-09-09 14:54:06] [Rank 0] step:7621/10000 train_time:354856ms step_avg:46.56ms +[2025-09-09 14:54:07] [Rank 0] step:7641/10000 train_time:355949ms step_avg:46.58ms +[2025-09-09 14:54:07] [Rank 0] step:7641/10000 train_time:355949ms step_avg:46.58ms +[2025-09-09 14:54:07] [Rank 0] step:7661/10000 train_time:356772ms step_avg:46.57ms +[2025-09-09 14:54:07] [Rank 0] step:7661/10000 train_time:356772ms step_avg:46.57ms +[2025-09-09 14:54:08] [Rank 0] step:7681/10000 train_time:357596ms step_avg:46.56ms +[2025-09-09 14:54:08] [Rank 0] step:7681/10000 train_time:357596ms step_avg:46.56ms +[2025-09-09 14:54:09] [Rank 0] step:7701/10000 train_time:358421ms step_avg:46.54ms +[2025-09-09 14:54:09] [Rank 0] step:7701/10000 train_time:358421ms step_avg:46.54ms +[2025-09-09 14:54:10] [Rank 0] step:7721/10000 train_time:359242ms step_avg:46.53ms +[2025-09-09 14:54:10] [Rank 0] step:7721/10000 train_time:359242ms step_avg:46.53ms +[2025-09-09 14:54:11] [Rank 0] step:7741/10000 train_time:360066ms step_avg:46.51ms +[2025-09-09 14:54:11] [Rank 0] step:7741/10000 train_time:360066ms step_avg:46.51ms +[2025-09-09 14:54:12] [Rank 0] step:7761/10000 train_time:360888ms step_avg:46.50ms +[2025-09-09 14:54:12] [Rank 0] step:7761/10000 train_time:360888ms step_avg:46.50ms +[2025-09-09 14:54:12] [Rank 0] step:7781/10000 train_time:361713ms step_avg:46.49ms +[2025-09-09 14:54:12] [Rank 0] step:7781/10000 train_time:361713ms step_avg:46.49ms +[2025-09-09 14:54:13] [Rank 0] step:7801/10000 train_time:362536ms step_avg:46.47ms +[2025-09-09 14:54:13] [Rank 0] step:7801/10000 train_time:362536ms step_avg:46.47ms +[2025-09-09 14:54:14] [Rank 0] step:7821/10000 train_time:363359ms step_avg:46.46ms +[2025-09-09 14:54:14] [Rank 0] step:7821/10000 train_time:363359ms step_avg:46.46ms +[2025-09-09 14:54:15] [Rank 0] step:7841/10000 train_time:364185ms step_avg:46.45ms +[2025-09-09 14:54:15] [Rank 0] step:7841/10000 train_time:364185ms step_avg:46.45ms +[2025-09-09 14:54:16] [Rank 0] step:7861/10000 train_time:365009ms step_avg:46.43ms +[2025-09-09 14:54:16] [Rank 0] step:7861/10000 train_time:365009ms step_avg:46.43ms +[2025-09-09 14:54:17] [Rank 0] step:7881/10000 train_time:366117ms step_avg:46.46ms +[2025-09-09 14:54:17] [Rank 0] step:7881/10000 train_time:366117ms step_avg:46.46ms +[2025-09-09 14:54:18] [Rank 0] step:7901/10000 train_time:367132ms step_avg:46.47ms +[2025-09-09 14:54:18] [Rank 0] step:7901/10000 train_time:367132ms step_avg:46.47ms +[2025-09-09 14:54:19] [Rank 0] step:7921/10000 train_time:367958ms step_avg:46.45ms +[2025-09-09 14:54:19] [Rank 0] step:7921/10000 train_time:367958ms step_avg:46.45ms +[2025-09-09 14:54:19] [Rank 0] step:7941/10000 train_time:368784ms step_avg:46.44ms +[2025-09-09 14:54:19] [Rank 0] step:7941/10000 train_time:368784ms step_avg:46.44ms +[2025-09-09 14:54:20] [Rank 0] step:7961/10000 train_time:369608ms step_avg:46.43ms +[2025-09-09 14:54:20] [Rank 0] step:7961/10000 train_time:369608ms step_avg:46.43ms +[2025-09-09 14:54:21] [Rank 0] step:7981/10000 train_time:370434ms step_avg:46.41ms +[2025-09-09 14:54:21] [Rank 0] step:7981/10000 train_time:370434ms step_avg:46.41ms +[2025-09-09 14:54:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:54:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:54:22] [Rank 0] PRINT: step:8000/10000 train_loss:0.6134 val_loss:0.6081 train_time:371263ms step_avg:46.41ms +[2025-09-09 14:54:22] [Rank 0] PRINT: step:8000/10000 train_loss:0.6134 val_loss:0.6081 train_time:371263ms step_avg:46.41ms +[2025-09-09 14:54:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:54:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:54:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:54:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:55:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:55:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:55:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:55:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:55:43] [Rank 0] Total Loss: 5.0505 +[2025-09-09 14:55:43] [Rank 0] Total Loss: 5.0505 +[2025-09-09 14:55:43] [Rank 0] Total FTA (Unweighted): 0.9950 +[2025-09-09 14:55:43] [Rank 0] Total FTA (Unweighted): 0.9950 +[2025-09-09 14:55:43] [Rank 0] Total FTA (Weighted): 0.9950 +[2025-09-09 14:55:43] [Rank 0] Total FTA (Weighted): 0.9950 +[2025-09-09 14:55:43] [Rank 0] Group 0 Loss: 4.9346 +[2025-09-09 14:55:43] [Rank 0] Group 0 Loss: 4.9346 +[2025-09-09 14:55:43] [Rank 0] Group 1 Loss: 4.6948 +[2025-09-09 14:55:43] [Rank 0] Group 1 Loss: 4.6948 +[2025-09-09 14:55:43] [Rank 0] Group 2 Loss: 4.4102 +[2025-09-09 14:55:43] [Rank 0] Group 2 Loss: 4.4102 +[2025-09-09 14:55:43] [Rank 0] Group 3 Loss: 4.9186 +[2025-09-09 14:55:43] [Rank 0] Group 3 Loss: 4.9186 +[2025-09-09 14:55:43] [Rank 0] Group 4 Loss: 4.8898 +[2025-09-09 14:55:43] [Rank 0] Group 4 Loss: 4.8898 +[2025-09-09 14:55:43] [Rank 0] Group 5 Loss: 4.9832 +[2025-09-09 14:55:43] [Rank 0] Group 5 Loss: 4.9832 +[2025-09-09 14:55:43] [Rank 0] Group 6 Loss: 4.8691 +[2025-09-09 14:55:43] [Rank 0] Group 6 Loss: 4.8691 +[2025-09-09 14:55:43] [Rank 0] Group 7 Loss: 5.0612 +[2025-09-09 14:55:43] [Rank 0] Group 7 Loss: 5.0612 +[2025-09-09 14:55:43] [Rank 0] Group 8 Loss: 5.1576 +[2025-09-09 14:55:43] [Rank 0] Group 8 Loss: 5.1576 +[2025-09-09 14:55:43] [Rank 0] Group 9 Loss: 5.1096 +[2025-09-09 14:55:43] [Rank 0] Group 9 Loss: 5.1096 +[2025-09-09 14:55:43] [Rank 0] Group 10 Loss: 5.2689 +[2025-09-09 14:55:43] [Rank 0] Group 10 Loss: 5.2689 +[2025-09-09 14:55:43] [Rank 0] Group 11 Loss: 5.2829 +[2025-09-09 14:55:43] [Rank 0] Group 11 Loss: 5.2829 +[2025-09-09 14:55:43] [Rank 0] Group 12 Loss: 5.2210 +[2025-09-09 14:55:43] [Rank 0] Group 12 Loss: 5.2210 +[2025-09-09 14:55:43] [Rank 0] Group 13 Loss: 5.3452 +[2025-09-09 14:55:43] [Rank 0] Group 13 Loss: 5.3452 +[2025-09-09 14:55:43] [Rank 0] Group 14 Loss: 5.3271 +[2025-09-09 14:55:43] [Rank 0] Group 14 Loss: 5.3271 +[2025-09-09 14:55:43] [Rank 0] Group 15 Loss: 5.3343 +[2025-09-09 14:55:43] [Rank 0] Group 15 Loss: 5.3343 +[2025-09-09 14:55:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:55:43] [Rank 0] Group 14 FTA: 0.9600 +[2025-09-09 14:55:43] [Rank 0] Group 14 FTA: 0.9600 +[2025-09-09 14:55:43] [Rank 0] Group 15 FTA: 0.9600 +[2025-09-09 14:55:43] [Rank 0] Group 15 FTA: 0.9600 +[2025-09-09 14:55:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:55:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:55:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:55:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:55:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:55:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:55:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:55:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:55:45] [Rank 0] step:8001/10000 train_time:371280ms step_avg:46.40ms +[2025-09-09 14:55:45] [Rank 0] step:8001/10000 train_time:371280ms step_avg:46.40ms +[2025-09-09 14:55:46] [Rank 0] step:8021/10000 train_time:372817ms step_avg:46.48ms +[2025-09-09 14:55:46] [Rank 0] step:8021/10000 train_time:372817ms step_avg:46.48ms +[2025-09-09 14:55:47] [Rank 0] step:8041/10000 train_time:373642ms step_avg:46.47ms +[2025-09-09 14:55:47] [Rank 0] step:8041/10000 train_time:373642ms step_avg:46.47ms +[2025-09-09 14:55:48] [Rank 0] step:8061/10000 train_time:374468ms step_avg:46.45ms +[2025-09-09 14:55:48] [Rank 0] step:8061/10000 train_time:374468ms step_avg:46.45ms +[2025-09-09 14:55:49] [Rank 0] step:8081/10000 train_time:375294ms step_avg:46.44ms +[2025-09-09 14:55:49] [Rank 0] step:8081/10000 train_time:375294ms step_avg:46.44ms +[2025-09-09 14:55:50] [Rank 0] step:8101/10000 train_time:376120ms step_avg:46.43ms +[2025-09-09 14:55:50] [Rank 0] step:8101/10000 train_time:376120ms step_avg:46.43ms +[2025-09-09 14:55:50] [Rank 0] step:8121/10000 train_time:376944ms step_avg:46.42ms +[2025-09-09 14:55:50] [Rank 0] step:8121/10000 train_time:376944ms step_avg:46.42ms +[2025-09-09 14:55:51] [Rank 0] step:8141/10000 train_time:377769ms step_avg:46.40ms +[2025-09-09 14:55:51] [Rank 0] step:8141/10000 train_time:377769ms step_avg:46.40ms +[2025-09-09 14:55:52] [Rank 0] step:8161/10000 train_time:378595ms step_avg:46.39ms +[2025-09-09 14:55:52] [Rank 0] step:8161/10000 train_time:378595ms step_avg:46.39ms +[2025-09-09 14:55:53] [Rank 0] step:8181/10000 train_time:379420ms step_avg:46.38ms +[2025-09-09 14:55:53] [Rank 0] step:8181/10000 train_time:379420ms step_avg:46.38ms +[2025-09-09 14:55:54] [Rank 0] step:8201/10000 train_time:380246ms step_avg:46.37ms +[2025-09-09 14:55:54] [Rank 0] step:8201/10000 train_time:380246ms step_avg:46.37ms +[2025-09-09 14:55:55] [Rank 0] step:8221/10000 train_time:381072ms step_avg:46.35ms +[2025-09-09 14:55:55] [Rank 0] step:8221/10000 train_time:381072ms step_avg:46.35ms +[2025-09-09 14:55:55] [Rank 0] step:8241/10000 train_time:381897ms step_avg:46.34ms +[2025-09-09 14:55:55] [Rank 0] step:8241/10000 train_time:381897ms step_avg:46.34ms +[2025-09-09 14:55:56] [Rank 0] step:8261/10000 train_time:382722ms step_avg:46.33ms +[2025-09-09 14:55:56] [Rank 0] step:8261/10000 train_time:382722ms step_avg:46.33ms +[2025-09-09 14:55:57] [Rank 0] step:8281/10000 train_time:383547ms step_avg:46.32ms +[2025-09-09 14:55:57] [Rank 0] step:8281/10000 train_time:383547ms step_avg:46.32ms +[2025-09-09 14:55:58] [Rank 0] step:8301/10000 train_time:384371ms step_avg:46.30ms +[2025-09-09 14:55:58] [Rank 0] step:8301/10000 train_time:384371ms step_avg:46.30ms +[2025-09-09 14:55:59] [Rank 0] step:8321/10000 train_time:385195ms step_avg:46.29ms +[2025-09-09 14:55:59] [Rank 0] step:8321/10000 train_time:385195ms step_avg:46.29ms +[2025-09-09 14:56:00] [Rank 0] step:8341/10000 train_time:386019ms step_avg:46.28ms +[2025-09-09 14:56:00] [Rank 0] step:8341/10000 train_time:386019ms step_avg:46.28ms +[2025-09-09 14:56:00] [Rank 0] step:8361/10000 train_time:386844ms step_avg:46.27ms +[2025-09-09 14:56:00] [Rank 0] step:8361/10000 train_time:386844ms step_avg:46.27ms +[2025-09-09 14:56:01] [Rank 0] step:8381/10000 train_time:387669ms step_avg:46.26ms +[2025-09-09 14:56:01] [Rank 0] step:8381/10000 train_time:387669ms step_avg:46.26ms +[2025-09-09 14:56:02] [Rank 0] step:8401/10000 train_time:388494ms step_avg:46.24ms +[2025-09-09 14:56:02] [Rank 0] step:8401/10000 train_time:388494ms step_avg:46.24ms +[2025-09-09 14:56:03] [Rank 0] step:8421/10000 train_time:389319ms step_avg:46.23ms +[2025-09-09 14:56:03] [Rank 0] step:8421/10000 train_time:389319ms step_avg:46.23ms +[2025-09-09 14:56:04] [Rank 0] step:8441/10000 train_time:390145ms step_avg:46.22ms +[2025-09-09 14:56:04] [Rank 0] step:8441/10000 train_time:390145ms step_avg:46.22ms +[2025-09-09 14:56:05] [Rank 0] step:8461/10000 train_time:390969ms step_avg:46.21ms +[2025-09-09 14:56:05] [Rank 0] step:8461/10000 train_time:390969ms step_avg:46.21ms +[2025-09-09 14:56:05] [Rank 0] step:8481/10000 train_time:391794ms step_avg:46.20ms +[2025-09-09 14:56:05] [Rank 0] step:8481/10000 train_time:391794ms step_avg:46.20ms +[2025-09-09 14:56:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:56:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:56:07] [Rank 0] PRINT: step:8500/10000 train_loss:0.6111 val_loss:0.6068 train_time:392622ms step_avg:46.19ms +[2025-09-09 14:56:07] [Rank 0] PRINT: step:8500/10000 train_loss:0.6111 val_loss:0.6068 train_time:392622ms step_avg:46.19ms +[2025-09-09 14:56:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:56:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:56:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:56:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:57:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:57:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:57:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:57:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:57:27] [Rank 0] Total Loss: 5.0141 +[2025-09-09 14:57:27] [Rank 0] Total Loss: 5.0141 +[2025-09-09 14:57:27] [Rank 0] Total FTA (Unweighted): 0.9975 +[2025-09-09 14:57:27] [Rank 0] Total FTA (Unweighted): 0.9975 +[2025-09-09 14:57:27] [Rank 0] Total FTA (Weighted): 0.9975 +[2025-09-09 14:57:27] [Rank 0] Total FTA (Weighted): 0.9975 +[2025-09-09 14:57:27] [Rank 0] Group 0 Loss: 4.9687 +[2025-09-09 14:57:27] [Rank 0] Group 0 Loss: 4.9687 +[2025-09-09 14:57:27] [Rank 0] Group 1 Loss: 4.6138 +[2025-09-09 14:57:27] [Rank 0] Group 1 Loss: 4.6138 +[2025-09-09 14:57:28] [Rank 0] Group 2 Loss: 4.4330 +[2025-09-09 14:57:28] [Rank 0] Group 2 Loss: 4.4330 +[2025-09-09 14:57:28] [Rank 0] Group 3 Loss: 4.8721 +[2025-09-09 14:57:28] [Rank 0] Group 3 Loss: 4.8721 +[2025-09-09 14:57:28] [Rank 0] Group 4 Loss: 4.8588 +[2025-09-09 14:57:28] [Rank 0] Group 4 Loss: 4.8588 +[2025-09-09 14:57:28] [Rank 0] Group 5 Loss: 4.9348 +[2025-09-09 14:57:28] [Rank 0] Group 5 Loss: 4.9348 +[2025-09-09 14:57:28] [Rank 0] Group 6 Loss: 4.8319 +[2025-09-09 14:57:28] [Rank 0] Group 6 Loss: 4.8319 +[2025-09-09 14:57:28] [Rank 0] Group 7 Loss: 5.0121 +[2025-09-09 14:57:28] [Rank 0] Group 7 Loss: 5.0121 +[2025-09-09 14:57:28] [Rank 0] Group 8 Loss: 5.1307 +[2025-09-09 14:57:28] [Rank 0] Group 8 Loss: 5.1307 +[2025-09-09 14:57:28] [Rank 0] Group 9 Loss: 5.0769 +[2025-09-09 14:57:28] [Rank 0] Group 9 Loss: 5.0769 +[2025-09-09 14:57:28] [Rank 0] Group 10 Loss: 5.1807 +[2025-09-09 14:57:28] [Rank 0] Group 10 Loss: 5.1807 +[2025-09-09 14:57:28] [Rank 0] Group 11 Loss: 5.2348 +[2025-09-09 14:57:28] [Rank 0] Group 11 Loss: 5.2348 +[2025-09-09 14:57:28] [Rank 0] Group 12 Loss: 5.1971 +[2025-09-09 14:57:28] [Rank 0] Group 12 Loss: 5.1971 +[2025-09-09 14:57:28] [Rank 0] Group 13 Loss: 5.3024 +[2025-09-09 14:57:28] [Rank 0] Group 13 Loss: 5.3024 +[2025-09-09 14:57:28] [Rank 0] Group 14 Loss: 5.2939 +[2025-09-09 14:57:28] [Rank 0] Group 14 Loss: 5.2939 +[2025-09-09 14:57:28] [Rank 0] Group 15 Loss: 5.2838 +[2025-09-09 14:57:28] [Rank 0] Group 15 Loss: 5.2838 +[2025-09-09 14:57:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:57:28] [Rank 0] Group 14 FTA: 0.9800 +[2025-09-09 14:57:28] [Rank 0] Group 14 FTA: 0.9800 +[2025-09-09 14:57:28] [Rank 0] Group 15 FTA: 0.9800 +[2025-09-09 14:57:28] [Rank 0] Group 15 FTA: 0.9800 +[2025-09-09 14:57:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:57:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:57:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:57:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:57:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:57:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:57:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:57:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:57:29] [Rank 0] step:8501/10000 train_time:392638ms step_avg:46.19ms +[2025-09-09 14:57:29] [Rank 0] step:8501/10000 train_time:392638ms step_avg:46.19ms +[2025-09-09 14:57:30] [Rank 0] step:8521/10000 train_time:393472ms step_avg:46.18ms +[2025-09-09 14:57:30] [Rank 0] step:8521/10000 train_time:393472ms step_avg:46.18ms +[2025-09-09 14:57:31] [Rank 0] step:8541/10000 train_time:394295ms step_avg:46.17ms +[2025-09-09 14:57:31] [Rank 0] step:8541/10000 train_time:394295ms step_avg:46.17ms +[2025-09-09 14:57:32] [Rank 0] step:8561/10000 train_time:395123ms step_avg:46.15ms +[2025-09-09 14:57:32] [Rank 0] step:8561/10000 train_time:395123ms step_avg:46.15ms +[2025-09-09 14:57:32] [Rank 0] step:8581/10000 train_time:395945ms step_avg:46.14ms +[2025-09-09 14:57:32] [Rank 0] step:8581/10000 train_time:395945ms step_avg:46.14ms +[2025-09-09 14:57:33] [Rank 0] step:8601/10000 train_time:396770ms step_avg:46.13ms +[2025-09-09 14:57:33] [Rank 0] step:8601/10000 train_time:396770ms step_avg:46.13ms +[2025-09-09 14:57:34] [Rank 0] step:8621/10000 train_time:397595ms step_avg:46.12ms +[2025-09-09 14:57:34] [Rank 0] step:8621/10000 train_time:397595ms step_avg:46.12ms +[2025-09-09 14:57:35] [Rank 0] step:8641/10000 train_time:398418ms step_avg:46.11ms +[2025-09-09 14:57:35] [Rank 0] step:8641/10000 train_time:398418ms step_avg:46.11ms +[2025-09-09 14:57:36] [Rank 0] step:8661/10000 train_time:399242ms step_avg:46.10ms +[2025-09-09 14:57:36] [Rank 0] step:8661/10000 train_time:399242ms step_avg:46.10ms +[2025-09-09 14:57:37] [Rank 0] step:8681/10000 train_time:400066ms step_avg:46.09ms +[2025-09-09 14:57:37] [Rank 0] step:8681/10000 train_time:400066ms step_avg:46.09ms +[2025-09-09 14:57:37] [Rank 0] step:8701/10000 train_time:400890ms step_avg:46.07ms +[2025-09-09 14:57:37] [Rank 0] step:8701/10000 train_time:400890ms step_avg:46.07ms +[2025-09-09 14:57:38] [Rank 0] step:8721/10000 train_time:401714ms step_avg:46.06ms +[2025-09-09 14:57:38] [Rank 0] step:8721/10000 train_time:401714ms step_avg:46.06ms +[2025-09-09 14:57:39] [Rank 0] step:8741/10000 train_time:402538ms step_avg:46.05ms +[2025-09-09 14:57:39] [Rank 0] step:8741/10000 train_time:402538ms step_avg:46.05ms +[2025-09-09 14:57:40] [Rank 0] step:8761/10000 train_time:403370ms step_avg:46.04ms +[2025-09-09 14:57:40] [Rank 0] step:8761/10000 train_time:403370ms step_avg:46.04ms +[2025-09-09 14:57:41] [Rank 0] step:8781/10000 train_time:404202ms step_avg:46.03ms +[2025-09-09 14:57:41] [Rank 0] step:8781/10000 train_time:404202ms step_avg:46.03ms +[2025-09-09 14:57:41] [Rank 0] step:8801/10000 train_time:405024ms step_avg:46.02ms +[2025-09-09 14:57:41] [Rank 0] step:8801/10000 train_time:405024ms step_avg:46.02ms +[2025-09-09 14:57:42] [Rank 0] step:8821/10000 train_time:405849ms step_avg:46.01ms +[2025-09-09 14:57:42] [Rank 0] step:8821/10000 train_time:405849ms step_avg:46.01ms +[2025-09-09 14:57:43] [Rank 0] step:8841/10000 train_time:406744ms step_avg:46.01ms +[2025-09-09 14:57:43] [Rank 0] step:8841/10000 train_time:406744ms step_avg:46.01ms +[2025-09-09 14:57:44] [Rank 0] step:8861/10000 train_time:407573ms step_avg:46.00ms +[2025-09-09 14:57:44] [Rank 0] step:8861/10000 train_time:407573ms step_avg:46.00ms +[2025-09-09 14:57:45] [Rank 0] step:8881/10000 train_time:408396ms step_avg:45.99ms +[2025-09-09 14:57:45] [Rank 0] step:8881/10000 train_time:408396ms step_avg:45.99ms +[2025-09-09 14:57:46] [Rank 0] step:8901/10000 train_time:409219ms step_avg:45.97ms +[2025-09-09 14:57:46] [Rank 0] step:8901/10000 train_time:409219ms step_avg:45.97ms +[2025-09-09 14:57:47] [Rank 0] step:8921/10000 train_time:410042ms step_avg:45.96ms +[2025-09-09 14:57:47] [Rank 0] step:8921/10000 train_time:410042ms step_avg:45.96ms +[2025-09-09 14:57:47] [Rank 0] step:8941/10000 train_time:410866ms step_avg:45.95ms +[2025-09-09 14:57:47] [Rank 0] step:8941/10000 train_time:410866ms step_avg:45.95ms +[2025-09-09 14:57:48] [Rank 0] step:8961/10000 train_time:411688ms step_avg:45.94ms +[2025-09-09 14:57:48] [Rank 0] step:8961/10000 train_time:411688ms step_avg:45.94ms +[2025-09-09 14:57:49] [Rank 0] step:8981/10000 train_time:412512ms step_avg:45.93ms +[2025-09-09 14:57:49] [Rank 0] step:8981/10000 train_time:412512ms step_avg:45.93ms +[2025-09-09 14:57:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:57:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:57:50] [Rank 0] PRINT: step:9000/10000 train_loss:0.6094 val_loss:0.6059 train_time:413337ms step_avg:45.93ms +[2025-09-09 14:57:50] [Rank 0] PRINT: step:9000/10000 train_loss:0.6094 val_loss:0.6059 train_time:413337ms step_avg:45.93ms +[2025-09-09 14:57:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:57:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:57:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:57:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:59:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:59:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 14:59:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:59:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 14:59:12] [Rank 0] Total Loss: 5.0892 +[2025-09-09 14:59:12] [Rank 0] Total Loss: 5.0892 +[2025-09-09 14:59:12] [Rank 0] Total FTA (Unweighted): 0.9969 +[2025-09-09 14:59:12] [Rank 0] Total FTA (Unweighted): 0.9969 +[2025-09-09 14:59:12] [Rank 0] Total FTA (Weighted): 0.9969 +[2025-09-09 14:59:12] [Rank 0] Total FTA (Weighted): 0.9969 +[2025-09-09 14:59:12] [Rank 0] Group 0 Loss: 4.9616 +[2025-09-09 14:59:12] [Rank 0] Group 0 Loss: 4.9616 +[2025-09-09 14:59:12] [Rank 0] Group 1 Loss: 4.6975 +[2025-09-09 14:59:12] [Rank 0] Group 1 Loss: 4.6975 +[2025-09-09 14:59:12] [Rank 0] Group 2 Loss: 4.4273 +[2025-09-09 14:59:12] [Rank 0] Group 2 Loss: 4.4273 +[2025-09-09 14:59:12] [Rank 0] Group 3 Loss: 4.9559 +[2025-09-09 14:59:12] [Rank 0] Group 3 Loss: 4.9559 +[2025-09-09 14:59:12] [Rank 0] Group 4 Loss: 4.9036 +[2025-09-09 14:59:12] [Rank 0] Group 4 Loss: 4.9036 +[2025-09-09 14:59:12] [Rank 0] Group 5 Loss: 5.0056 +[2025-09-09 14:59:12] [Rank 0] Group 5 Loss: 5.0056 +[2025-09-09 14:59:12] [Rank 0] Group 6 Loss: 4.9247 +[2025-09-09 14:59:12] [Rank 0] Group 6 Loss: 4.9247 +[2025-09-09 14:59:12] [Rank 0] Group 7 Loss: 5.1079 +[2025-09-09 14:59:12] [Rank 0] Group 7 Loss: 5.1079 +[2025-09-09 14:59:12] [Rank 0] Group 8 Loss: 5.2264 +[2025-09-09 14:59:12] [Rank 0] Group 8 Loss: 5.2264 +[2025-09-09 14:59:12] [Rank 0] Group 9 Loss: 5.1699 +[2025-09-09 14:59:12] [Rank 0] Group 9 Loss: 5.1699 +[2025-09-09 14:59:12] [Rank 0] Group 10 Loss: 5.3106 +[2025-09-09 14:59:12] [Rank 0] Group 10 Loss: 5.3106 +[2025-09-09 14:59:12] [Rank 0] Group 11 Loss: 5.3446 +[2025-09-09 14:59:12] [Rank 0] Group 11 Loss: 5.3446 +[2025-09-09 14:59:12] [Rank 0] Group 12 Loss: 5.2507 +[2025-09-09 14:59:12] [Rank 0] Group 12 Loss: 5.2507 +[2025-09-09 14:59:12] [Rank 0] Group 13 Loss: 5.3981 +[2025-09-09 14:59:12] [Rank 0] Group 13 Loss: 5.3981 +[2025-09-09 14:59:12] [Rank 0] Group 14 Loss: 5.3624 +[2025-09-09 14:59:12] [Rank 0] Group 14 Loss: 5.3624 +[2025-09-09 14:59:12] [Rank 0] Group 15 Loss: 5.3798 +[2025-09-09 14:59:12] [Rank 0] Group 15 Loss: 5.3798 +[2025-09-09 14:59:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:59:12] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 14:59:12] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 14:59:12] [Rank 0] Group 14 FTA: 0.9900 +[2025-09-09 14:59:12] [Rank 0] Group 14 FTA: 0.9900 +[2025-09-09 14:59:12] [Rank 0] Group 15 FTA: 0.9700 +[2025-09-09 14:59:12] [Rank 0] Group 15 FTA: 0.9700 +[2025-09-09 14:59:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:59:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 14:59:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:59:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 14:59:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:59:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 14:59:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:59:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 14:59:14] [Rank 0] step:9001/10000 train_time:413354ms step_avg:45.92ms +[2025-09-09 14:59:14] [Rank 0] step:9001/10000 train_time:413354ms step_avg:45.92ms +[2025-09-09 14:59:15] [Rank 0] step:9021/10000 train_time:414184ms step_avg:45.91ms +[2025-09-09 14:59:15] [Rank 0] step:9021/10000 train_time:414184ms step_avg:45.91ms +[2025-09-09 14:59:15] [Rank 0] step:9041/10000 train_time:415009ms step_avg:45.90ms +[2025-09-09 14:59:15] [Rank 0] step:9041/10000 train_time:415009ms step_avg:45.90ms +[2025-09-09 14:59:16] [Rank 0] step:9061/10000 train_time:415834ms step_avg:45.89ms +[2025-09-09 14:59:16] [Rank 0] step:9061/10000 train_time:415834ms step_avg:45.89ms +[2025-09-09 14:59:17] [Rank 0] step:9081/10000 train_time:416658ms step_avg:45.88ms +[2025-09-09 14:59:17] [Rank 0] step:9081/10000 train_time:416658ms step_avg:45.88ms +[2025-09-09 14:59:18] [Rank 0] step:9101/10000 train_time:417483ms step_avg:45.87ms +[2025-09-09 14:59:18] [Rank 0] step:9101/10000 train_time:417483ms step_avg:45.87ms +[2025-09-09 14:59:19] [Rank 0] step:9121/10000 train_time:418306ms step_avg:45.86ms +[2025-09-09 14:59:19] [Rank 0] step:9121/10000 train_time:418306ms step_avg:45.86ms +[2025-09-09 14:59:20] [Rank 0] step:9141/10000 train_time:419130ms step_avg:45.85ms +[2025-09-09 14:59:20] [Rank 0] step:9141/10000 train_time:419130ms step_avg:45.85ms +[2025-09-09 14:59:20] [Rank 0] step:9161/10000 train_time:419955ms step_avg:45.84ms +[2025-09-09 14:59:20] [Rank 0] step:9161/10000 train_time:419955ms step_avg:45.84ms +[2025-09-09 14:59:21] [Rank 0] step:9181/10000 train_time:420779ms step_avg:45.83ms +[2025-09-09 14:59:21] [Rank 0] step:9181/10000 train_time:420779ms step_avg:45.83ms +[2025-09-09 14:59:22] [Rank 0] step:9201/10000 train_time:421604ms step_avg:45.82ms +[2025-09-09 14:59:22] [Rank 0] step:9201/10000 train_time:421604ms step_avg:45.82ms +[2025-09-09 14:59:23] [Rank 0] step:9221/10000 train_time:422428ms step_avg:45.81ms +[2025-09-09 14:59:23] [Rank 0] step:9221/10000 train_time:422428ms step_avg:45.81ms +[2025-09-09 14:59:24] [Rank 0] step:9241/10000 train_time:423253ms step_avg:45.80ms +[2025-09-09 14:59:24] [Rank 0] step:9241/10000 train_time:423253ms step_avg:45.80ms +[2025-09-09 14:59:24] [Rank 0] step:9261/10000 train_time:424077ms step_avg:45.79ms +[2025-09-09 14:59:24] [Rank 0] step:9261/10000 train_time:424077ms step_avg:45.79ms +[2025-09-09 14:59:25] [Rank 0] step:9281/10000 train_time:424904ms step_avg:45.78ms +[2025-09-09 14:59:25] [Rank 0] step:9281/10000 train_time:424904ms step_avg:45.78ms +[2025-09-09 14:59:26] [Rank 0] step:9301/10000 train_time:425727ms step_avg:45.77ms +[2025-09-09 14:59:26] [Rank 0] step:9301/10000 train_time:425727ms step_avg:45.77ms +[2025-09-09 14:59:27] [Rank 0] step:9321/10000 train_time:426552ms step_avg:45.76ms +[2025-09-09 14:59:27] [Rank 0] step:9321/10000 train_time:426552ms step_avg:45.76ms +[2025-09-09 14:59:28] [Rank 0] step:9341/10000 train_time:427376ms step_avg:45.75ms +[2025-09-09 14:59:28] [Rank 0] step:9341/10000 train_time:427376ms step_avg:45.75ms +[2025-09-09 14:59:29] [Rank 0] step:9361/10000 train_time:428200ms step_avg:45.74ms +[2025-09-09 14:59:29] [Rank 0] step:9361/10000 train_time:428200ms step_avg:45.74ms +[2025-09-09 14:59:29] [Rank 0] step:9381/10000 train_time:429025ms step_avg:45.73ms +[2025-09-09 14:59:29] [Rank 0] step:9381/10000 train_time:429025ms step_avg:45.73ms +[2025-09-09 14:59:30] [Rank 0] step:9401/10000 train_time:429849ms step_avg:45.72ms +[2025-09-09 14:59:30] [Rank 0] step:9401/10000 train_time:429849ms step_avg:45.72ms +[2025-09-09 14:59:31] [Rank 0] step:9421/10000 train_time:430673ms step_avg:45.71ms +[2025-09-09 14:59:31] [Rank 0] step:9421/10000 train_time:430673ms step_avg:45.71ms +[2025-09-09 14:59:32] [Rank 0] step:9441/10000 train_time:431497ms step_avg:45.70ms +[2025-09-09 14:59:32] [Rank 0] step:9441/10000 train_time:431497ms step_avg:45.70ms +[2025-09-09 14:59:33] [Rank 0] step:9461/10000 train_time:432535ms step_avg:45.72ms +[2025-09-09 14:59:33] [Rank 0] step:9461/10000 train_time:432535ms step_avg:45.72ms +[2025-09-09 14:59:34] [Rank 0] step:9481/10000 train_time:433586ms step_avg:45.73ms +[2025-09-09 14:59:34] [Rank 0] step:9481/10000 train_time:433586ms step_avg:45.73ms +[2025-09-09 14:59:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:59:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 14:59:35] [Rank 0] PRINT: step:9500/10000 train_loss:0.6081 val_loss:0.6051 train_time:434413ms step_avg:45.73ms +[2025-09-09 14:59:35] [Rank 0] PRINT: step:9500/10000 train_loss:0.6081 val_loss:0.6051 train_time:434413ms step_avg:45.73ms +[2025-09-09 14:59:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:59:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 14:59:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 14:59:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:00:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:00:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:00:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:00:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:00:58] [Rank 0] Total Loss: 5.0984 +[2025-09-09 15:00:58] [Rank 0] Total Loss: 5.0984 +[2025-09-09 15:00:58] [Rank 0] Total FTA (Unweighted): 0.9981 +[2025-09-09 15:00:58] [Rank 0] Total FTA (Unweighted): 0.9981 +[2025-09-09 15:00:58] [Rank 0] Total FTA (Weighted): 0.9981 +[2025-09-09 15:00:58] [Rank 0] Total FTA (Weighted): 0.9981 +[2025-09-09 15:00:58] [Rank 0] Group 0 Loss: 5.0740 +[2025-09-09 15:00:58] [Rank 0] Group 0 Loss: 5.0740 +[2025-09-09 15:00:58] [Rank 0] Group 1 Loss: 4.6523 +[2025-09-09 15:00:58] [Rank 0] Group 1 Loss: 4.6523 +[2025-09-09 15:00:58] [Rank 0] Group 2 Loss: 4.5058 +[2025-09-09 15:00:58] [Rank 0] Group 2 Loss: 4.5058 +[2025-09-09 15:00:58] [Rank 0] Group 3 Loss: 4.9754 +[2025-09-09 15:00:58] [Rank 0] Group 3 Loss: 4.9754 +[2025-09-09 15:00:58] [Rank 0] Group 4 Loss: 4.9299 +[2025-09-09 15:00:58] [Rank 0] Group 4 Loss: 4.9299 +[2025-09-09 15:00:58] [Rank 0] Group 5 Loss: 4.9917 +[2025-09-09 15:00:58] [Rank 0] Group 5 Loss: 4.9917 +[2025-09-09 15:00:58] [Rank 0] Group 6 Loss: 4.9105 +[2025-09-09 15:00:58] [Rank 0] Group 6 Loss: 4.9105 +[2025-09-09 15:00:58] [Rank 0] Group 7 Loss: 5.1091 +[2025-09-09 15:00:58] [Rank 0] Group 7 Loss: 5.1091 +[2025-09-09 15:00:58] [Rank 0] Group 8 Loss: 5.2160 +[2025-09-09 15:00:58] [Rank 0] Group 8 Loss: 5.2160 +[2025-09-09 15:00:58] [Rank 0] Group 9 Loss: 5.1581 +[2025-09-09 15:00:58] [Rank 0] Group 9 Loss: 5.1581 +[2025-09-09 15:00:58] [Rank 0] Group 10 Loss: 5.2989 +[2025-09-09 15:00:58] [Rank 0] Group 10 Loss: 5.2989 +[2025-09-09 15:00:58] [Rank 0] Group 11 Loss: 5.3297 +[2025-09-09 15:00:58] [Rank 0] Group 11 Loss: 5.3297 +[2025-09-09 15:00:58] [Rank 0] Group 12 Loss: 5.2736 +[2025-09-09 15:00:58] [Rank 0] Group 12 Loss: 5.2736 +[2025-09-09 15:00:58] [Rank 0] Group 13 Loss: 5.4043 +[2025-09-09 15:00:58] [Rank 0] Group 13 Loss: 5.4043 +[2025-09-09 15:00:58] [Rank 0] Group 14 Loss: 5.3583 +[2025-09-09 15:00:58] [Rank 0] Group 14 Loss: 5.3583 +[2025-09-09 15:00:58] [Rank 0] Group 15 Loss: 5.3864 +[2025-09-09 15:00:58] [Rank 0] Group 15 Loss: 5.3864 +[2025-09-09 15:00:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:00:58] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:00:59] [Rank 0] Group 15 FTA: 0.9700 +[2025-09-09 15:00:59] [Rank 0] Group 15 FTA: 0.9700 +[2025-09-09 15:00:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 15:00:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 15:00:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 15:00:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 15:01:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 15:01:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 15:01:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 15:01:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 15:01:00] [Rank 0] step:9501/10000 train_time:434431ms step_avg:45.72ms +[2025-09-09 15:01:00] [Rank 0] step:9501/10000 train_time:434431ms step_avg:45.72ms +[2025-09-09 15:01:01] [Rank 0] step:9521/10000 train_time:435257ms step_avg:45.72ms +[2025-09-09 15:01:01] [Rank 0] step:9521/10000 train_time:435257ms step_avg:45.72ms +[2025-09-09 15:01:02] [Rank 0] step:9541/10000 train_time:436085ms step_avg:45.71ms +[2025-09-09 15:01:02] [Rank 0] step:9541/10000 train_time:436085ms step_avg:45.71ms +[2025-09-09 15:01:02] [Rank 0] step:9561/10000 train_time:436909ms step_avg:45.70ms +[2025-09-09 15:01:02] [Rank 0] step:9561/10000 train_time:436909ms step_avg:45.70ms +[2025-09-09 15:01:03] [Rank 0] step:9581/10000 train_time:437734ms step_avg:45.69ms +[2025-09-09 15:01:03] [Rank 0] step:9581/10000 train_time:437734ms step_avg:45.69ms +[2025-09-09 15:01:04] [Rank 0] step:9601/10000 train_time:438558ms step_avg:45.68ms +[2025-09-09 15:01:04] [Rank 0] step:9601/10000 train_time:438558ms step_avg:45.68ms +[2025-09-09 15:01:05] [Rank 0] step:9621/10000 train_time:439384ms step_avg:45.67ms +[2025-09-09 15:01:05] [Rank 0] step:9621/10000 train_time:439384ms step_avg:45.67ms +[2025-09-09 15:01:06] [Rank 0] step:9641/10000 train_time:440209ms step_avg:45.66ms +[2025-09-09 15:01:06] [Rank 0] step:9641/10000 train_time:440209ms step_avg:45.66ms +[2025-09-09 15:01:07] [Rank 0] step:9661/10000 train_time:441311ms step_avg:45.68ms +[2025-09-09 15:01:07] [Rank 0] step:9661/10000 train_time:441311ms step_avg:45.68ms +[2025-09-09 15:01:08] [Rank 0] step:9681/10000 train_time:442136ms step_avg:45.67ms +[2025-09-09 15:01:08] [Rank 0] step:9681/10000 train_time:442136ms step_avg:45.67ms +[2025-09-09 15:01:08] [Rank 0] step:9701/10000 train_time:442963ms step_avg:45.66ms +[2025-09-09 15:01:08] [Rank 0] step:9701/10000 train_time:442963ms step_avg:45.66ms +[2025-09-09 15:01:09] [Rank 0] step:9721/10000 train_time:443790ms step_avg:45.65ms +[2025-09-09 15:01:09] [Rank 0] step:9721/10000 train_time:443790ms step_avg:45.65ms +[2025-09-09 15:01:10] [Rank 0] step:9741/10000 train_time:444617ms step_avg:45.64ms +[2025-09-09 15:01:10] [Rank 0] step:9741/10000 train_time:444617ms step_avg:45.64ms +[2025-09-09 15:01:11] [Rank 0] step:9761/10000 train_time:445444ms step_avg:45.64ms +[2025-09-09 15:01:11] [Rank 0] step:9761/10000 train_time:445444ms step_avg:45.64ms +[2025-09-09 15:01:12] [Rank 0] step:9781/10000 train_time:446273ms step_avg:45.63ms +[2025-09-09 15:01:12] [Rank 0] step:9781/10000 train_time:446273ms step_avg:45.63ms +[2025-09-09 15:01:13] [Rank 0] step:9801/10000 train_time:447097ms step_avg:45.62ms +[2025-09-09 15:01:13] [Rank 0] step:9801/10000 train_time:447097ms step_avg:45.62ms +[2025-09-09 15:01:13] [Rank 0] step:9821/10000 train_time:447924ms step_avg:45.61ms +[2025-09-09 15:01:13] [Rank 0] step:9821/10000 train_time:447924ms step_avg:45.61ms +[2025-09-09 15:01:14] [Rank 0] step:9841/10000 train_time:448750ms step_avg:45.60ms +[2025-09-09 15:01:14] [Rank 0] step:9841/10000 train_time:448750ms step_avg:45.60ms +[2025-09-09 15:01:15] [Rank 0] step:9861/10000 train_time:449577ms step_avg:45.59ms +[2025-09-09 15:01:15] [Rank 0] step:9861/10000 train_time:449577ms step_avg:45.59ms +[2025-09-09 15:01:16] [Rank 0] step:9881/10000 train_time:450403ms step_avg:45.58ms +[2025-09-09 15:01:16] [Rank 0] step:9881/10000 train_time:450403ms step_avg:45.58ms +[2025-09-09 15:01:17] [Rank 0] step:9901/10000 train_time:451229ms step_avg:45.57ms +[2025-09-09 15:01:17] [Rank 0] step:9901/10000 train_time:451229ms step_avg:45.57ms +[2025-09-09 15:01:18] [Rank 0] step:9921/10000 train_time:452055ms step_avg:45.57ms +[2025-09-09 15:01:18] [Rank 0] step:9921/10000 train_time:452055ms step_avg:45.57ms +[2025-09-09 15:01:18] [Rank 0] step:9941/10000 train_time:452881ms step_avg:45.56ms +[2025-09-09 15:01:18] [Rank 0] step:9941/10000 train_time:452881ms step_avg:45.56ms +[2025-09-09 15:01:19] [Rank 0] step:9961/10000 train_time:453708ms step_avg:45.55ms +[2025-09-09 15:01:19] [Rank 0] step:9961/10000 train_time:453708ms step_avg:45.55ms +[2025-09-09 15:01:20] [Rank 0] step:9981/10000 train_time:454535ms step_avg:45.54ms +[2025-09-09 15:01:20] [Rank 0] step:9981/10000 train_time:454535ms step_avg:45.54ms +[2025-09-09 15:01:21] [Rank 0] step:10000/10000 train_time:455319ms step_avg:45.53ms +[2025-09-09 15:01:21] [Rank 0] step:10000/10000 train_time:455319ms step_avg:45.53ms +[2025-09-09 15:01:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:01:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:01:21] [Rank 0] PRINT: step:10000/10000 train_loss:0.6070 val_loss:0.6044 train_time:455370ms step_avg:45.54ms +[2025-09-09 15:01:21] [Rank 0] PRINT: step:10000/10000 train_loss:0.6070 val_loss:0.6044 train_time:455370ms step_avg:45.54ms +[2025-09-09 15:01:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:01:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:01:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:01:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:02:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:02:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:02:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:02:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:02:42] [Rank 0] Total Loss: 5.1222 +[2025-09-09 15:02:42] [Rank 0] Total Loss: 5.1222 +[2025-09-09 15:02:42] [Rank 0] Total FTA (Unweighted): 0.9994 +[2025-09-09 15:02:42] [Rank 0] Total FTA (Unweighted): 0.9994 +[2025-09-09 15:02:42] [Rank 0] Total FTA (Weighted): 0.9994 +[2025-09-09 15:02:42] [Rank 0] Total FTA (Weighted): 0.9994 +[2025-09-09 15:02:42] [Rank 0] Group 0 Loss: 5.1143 +[2025-09-09 15:02:42] [Rank 0] Group 0 Loss: 5.1143 +[2025-09-09 15:02:42] [Rank 0] Group 1 Loss: 4.6764 +[2025-09-09 15:02:42] [Rank 0] Group 1 Loss: 4.6764 +[2025-09-09 15:02:42] [Rank 0] Group 2 Loss: 4.5094 +[2025-09-09 15:02:42] [Rank 0] Group 2 Loss: 4.5094 +[2025-09-09 15:02:42] [Rank 0] Group 3 Loss: 5.0008 +[2025-09-09 15:02:42] [Rank 0] Group 3 Loss: 5.0008 +[2025-09-09 15:02:42] [Rank 0] Group 4 Loss: 4.9507 +[2025-09-09 15:02:42] [Rank 0] Group 4 Loss: 4.9507 +[2025-09-09 15:02:42] [Rank 0] Group 5 Loss: 5.0269 +[2025-09-09 15:02:42] [Rank 0] Group 5 Loss: 5.0269 +[2025-09-09 15:02:42] [Rank 0] Group 6 Loss: 4.9303 +[2025-09-09 15:02:42] [Rank 0] Group 6 Loss: 4.9303 +[2025-09-09 15:02:42] [Rank 0] Group 7 Loss: 5.1392 +[2025-09-09 15:02:42] [Rank 0] Group 7 Loss: 5.1392 +[2025-09-09 15:02:42] [Rank 0] Group 8 Loss: 5.2400 +[2025-09-09 15:02:42] [Rank 0] Group 8 Loss: 5.2400 +[2025-09-09 15:02:42] [Rank 0] Group 9 Loss: 5.1780 +[2025-09-09 15:02:42] [Rank 0] Group 9 Loss: 5.1780 +[2025-09-09 15:02:42] [Rank 0] Group 10 Loss: 5.3310 +[2025-09-09 15:02:42] [Rank 0] Group 10 Loss: 5.3310 +[2025-09-09 15:02:42] [Rank 0] Group 11 Loss: 5.3523 +[2025-09-09 15:02:42] [Rank 0] Group 11 Loss: 5.3523 +[2025-09-09 15:02:42] [Rank 0] Group 12 Loss: 5.2919 +[2025-09-09 15:02:42] [Rank 0] Group 12 Loss: 5.2919 +[2025-09-09 15:02:42] [Rank 0] Group 13 Loss: 5.4172 +[2025-09-09 15:02:42] [Rank 0] Group 13 Loss: 5.4172 +[2025-09-09 15:02:42] [Rank 0] Group 14 Loss: 5.3890 +[2025-09-09 15:02:42] [Rank 0] Group 14 Loss: 5.3890 +[2025-09-09 15:02:42] [Rank 0] Group 15 Loss: 5.4080 +[2025-09-09 15:02:42] [Rank 0] Group 15 Loss: 5.4080 +[2025-09-09 15:02:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:02:42] [Rank 0] Group 15 FTA: 0.9900 +[2025-09-09 15:02:42] [Rank 0] Group 15 FTA: 0.9900 +[2025-09-09 15:02:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 15:02:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_loss_curves.png +[2025-09-09 15:02:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 15:02:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/per_class_acc_curves.png +[2025-09-09 15:02:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 15:02:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_loss_curve.png +[2025-09-09 15:02:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 15:02:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_44/total_acc_curve.png +[2025-09-09 15:02:44] [Rank 0] step:10001/10000 train_time:455386ms step_avg:45.53ms +[2025-09-09 15:02:44] [Rank 0] step:10001/10000 train_time:455386ms step_avg:45.53ms +[2025-09-09 15:02:44] [Rank 0] PRINT: --- Training Finished: Tue Sep 9 15:02:44 2025 --- +[2025-09-09 15:02:44] [Rank 0] PRINT: --- Training Finished: Tue Sep 9 15:02:44 2025 --- +[2025-09-09 15:02:44] [Rank 0] PRINT: Peak memory allocated: 3880 MiB reserved: 4808 MiB +[2025-09-09 15:02:44] [Rank 0] PRINT: Peak memory allocated: 3880 MiB reserved: 4808 MiB diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/config.json b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..251388e7b17b7f88815323fee7fd364d9baae42a --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 7, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.001, + "adam_lr": 0.002, + "base_dir": "logs_qa_muon_gated/diff_mode", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "b27e498f-692c-4d93-bc7c-3d62f6063a1e", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/fixed_eval_indices.json b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..08a2c3235f3cb7088345686cbe2ff146b7e0ed39 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb76ff167e7b390bca43df3e7e2974cdcec31751f7b51b4755be907068e3eca4 +size 335641 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7c6e75fcd14306a91dfbbabfaccae1593af46e1b --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a86e4c73f6e62da3a139c977ce0ad78c4821d4c6ef653275264ad4d0abe0b56f +size 455829 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..1ef0e8232f123cc30bc33b0209284f83a1b4f7a5 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c8ee230453aa947a8700e00da51773b8aedb8a0394e332442b51bc7b064e22e +size 93356 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..b1de11e4d75a8fa02ca35903e99e3ea9facc6db3 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7660f6f9d667023df3547cab46bfebbe9aa8f30c68224263ebaaef8a2f4a936f +size 108827 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/training_log_b27e498f-692c-4d93-bc7c-3d62f6063a1e.txt b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/training_log_b27e498f-692c-4d93-bc7c-3d62f6063a1e.txt new file mode 100644 index 0000000000000000000000000000000000000000..09acca26d5964c8ee0ceb6762622de6d1a068736 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/training_log_b27e498f-692c-4d93-bc7c-3d62f6063a1e.txt @@ -0,0 +1,5618 @@ +[2025-09-09 15:03:09] [Rank 0] PRINT: --- Script Start: Tue Sep 9 15:03:09 2025 --- +[2025-09-09 15:03:09] [Rank 0] PRINT: --- Script Start: Tue Sep 9 15:03:09 2025 --- +[2025-09-09 15:03:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=7, model_parameterization='gated', per_group_k=100, muon_lr=0.001, adam_lr=0.002, base_dir='logs_qa_muon_gated/diff_mode', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-09 15:03:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=7, model_parameterization='gated', per_group_k=100, muon_lr=0.001, adam_lr=0.002, base_dir='logs_qa_muon_gated/diff_mode', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-09 15:03:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-09 15:03:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-09 15:03:09] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-09 15:03:09] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-09 15:03:09] [Rank 0] PRINT: Run directory: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45 +[2025-09-09 15:03:09] [Rank 0] PRINT: Run directory: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45 +[2025-09-09 15:03:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + muon_lr = exp_args.muon_lr + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-09 15:03:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + muon_lr = exp_args.muon_lr + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-09 15:03:09] [Rank 0] PRINT: Constructing model... +[2025-09-09 15:03:09] [Rank 0] PRINT: Constructing model... +[2025-09-09 15:03:10] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-09 15:03:10] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-09 15:03:10] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-09 15:03:10] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-09 15:03:10] [Rank 0] PRINT: Testing model forward function: +[2025-09-09 15:03:10] [Rank 0] PRINT: Testing model forward function: +[2025-09-09 15:03:15] [Rank 0] PRINT: Model test - Result type: +[2025-09-09 15:03:15] [Rank 0] PRINT: Model test - Result type: +[2025-09-09 15:03:15] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-09 15:03:15] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-09 15:03:15] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-09 15:03:15] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-09 15:03:15] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-09 15:03:15] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-09 15:03:15] [Rank 0] PRINT: Model returns: +[2025-09-09 15:03:15] [Rank 0] PRINT: Model returns: +[2025-09-09 15:03:15] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-09 15:03:15] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-09 15:03:15] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 7 +[2025-09-09 15:03:15] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 7 +[2025-09-09 15:03:15] [Rank 0] PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: 0.002). +[2025-09-09 15:03:15] [Rank 0] PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: 0.002). +[2025-09-09 15:03:15] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-09 15:03:15] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-09 15:03:15] [Rank 0] PRINT: Muon optimizer is active with 48 parameters. +[2025-09-09 15:03:15] [Rank 0] PRINT: Muon optimizer is active with 48 parameters. +[2025-09-09 15:03:15] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-09 15:03:15] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-09 15:03:19] [Rank 0] PRINT: Model compilation complete. +[2025-09-09 15:03:19] [Rank 0] PRINT: Model compilation complete. +[2025-09-09 15:03:19] [Rank 0] PRINT: Starting warmup... +[2025-09-09 15:03:19] [Rank 0] PRINT: Starting warmup... +[2025-09-09 15:04:06] [Rank 0] PRINT: Warmup complete. +[2025-09-09 15:04:06] [Rank 0] PRINT: Warmup complete. +[2025-09-09 15:04:06] [Rank 0] PRINT: Starting training... +[2025-09-09 15:04:06] [Rank 0] PRINT: Starting training... +[2025-09-09 15:04:13] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/fixed_eval_indices.json +[2025-09-09 15:04:13] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/fixed_eval_indices.json +[2025-09-09 15:04:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:04:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:04:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-09 15:04:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-09 15:04:50] [Rank 0] step:21/10000 train_time:33301ms step_avg:1585.77ms +[2025-09-09 15:04:50] [Rank 0] step:21/10000 train_time:33301ms step_avg:1585.77ms +[2025-09-09 15:04:51] [Rank 0] step:41/10000 train_time:34113ms step_avg:832.01ms +[2025-09-09 15:04:51] [Rank 0] step:41/10000 train_time:34113ms step_avg:832.01ms +[2025-09-09 15:04:51] [Rank 0] step:61/10000 train_time:34923ms step_avg:572.51ms +[2025-09-09 15:04:51] [Rank 0] step:61/10000 train_time:34923ms step_avg:572.51ms +[2025-09-09 15:04:52] [Rank 0] step:81/10000 train_time:35731ms step_avg:441.12ms +[2025-09-09 15:04:52] [Rank 0] step:81/10000 train_time:35731ms step_avg:441.12ms +[2025-09-09 15:04:53] [Rank 0] step:101/10000 train_time:36540ms step_avg:361.78ms +[2025-09-09 15:04:53] [Rank 0] step:101/10000 train_time:36540ms step_avg:361.78ms +[2025-09-09 15:04:54] [Rank 0] step:121/10000 train_time:37348ms step_avg:308.66ms +[2025-09-09 15:04:54] [Rank 0] step:121/10000 train_time:37348ms step_avg:308.66ms +[2025-09-09 15:04:55] [Rank 0] step:141/10000 train_time:38157ms step_avg:270.61ms +[2025-09-09 15:04:55] [Rank 0] step:141/10000 train_time:38157ms step_avg:270.61ms +[2025-09-09 15:04:55] [Rank 0] step:161/10000 train_time:38965ms step_avg:242.02ms +[2025-09-09 15:04:55] [Rank 0] step:161/10000 train_time:38965ms step_avg:242.02ms +[2025-09-09 15:04:56] [Rank 0] step:181/10000 train_time:39773ms step_avg:219.74ms +[2025-09-09 15:04:56] [Rank 0] step:181/10000 train_time:39773ms step_avg:219.74ms +[2025-09-09 15:04:57] [Rank 0] step:201/10000 train_time:40582ms step_avg:201.90ms +[2025-09-09 15:04:57] [Rank 0] step:201/10000 train_time:40582ms step_avg:201.90ms +[2025-09-09 15:04:58] [Rank 0] step:221/10000 train_time:41391ms step_avg:187.29ms +[2025-09-09 15:04:58] [Rank 0] step:221/10000 train_time:41391ms step_avg:187.29ms +[2025-09-09 15:04:59] [Rank 0] step:241/10000 train_time:42199ms step_avg:175.10ms +[2025-09-09 15:04:59] [Rank 0] step:241/10000 train_time:42199ms step_avg:175.10ms +[2025-09-09 15:04:59] [Rank 0] step:261/10000 train_time:43008ms step_avg:164.78ms +[2025-09-09 15:04:59] [Rank 0] step:261/10000 train_time:43008ms step_avg:164.78ms +[2025-09-09 15:05:00] [Rank 0] step:281/10000 train_time:43823ms step_avg:155.96ms +[2025-09-09 15:05:00] [Rank 0] step:281/10000 train_time:43823ms step_avg:155.96ms +[2025-09-09 15:05:01] [Rank 0] step:301/10000 train_time:44714ms step_avg:148.55ms +[2025-09-09 15:05:01] [Rank 0] step:301/10000 train_time:44714ms step_avg:148.55ms +[2025-09-09 15:05:02] [Rank 0] step:321/10000 train_time:45523ms step_avg:141.82ms +[2025-09-09 15:05:02] [Rank 0] step:321/10000 train_time:45523ms step_avg:141.82ms +[2025-09-09 15:05:03] [Rank 0] step:341/10000 train_time:46333ms step_avg:135.87ms +[2025-09-09 15:05:03] [Rank 0] step:341/10000 train_time:46333ms step_avg:135.87ms +[2025-09-09 15:05:04] [Rank 0] step:361/10000 train_time:47142ms step_avg:130.59ms +[2025-09-09 15:05:04] [Rank 0] step:361/10000 train_time:47142ms step_avg:130.59ms +[2025-09-09 15:05:04] [Rank 0] step:381/10000 train_time:47953ms step_avg:125.86ms +[2025-09-09 15:05:04] [Rank 0] step:381/10000 train_time:47953ms step_avg:125.86ms +[2025-09-09 15:05:05] [Rank 0] step:401/10000 train_time:48761ms step_avg:121.60ms +[2025-09-09 15:05:05] [Rank 0] step:401/10000 train_time:48761ms step_avg:121.60ms +[2025-09-09 15:05:06] [Rank 0] step:421/10000 train_time:49572ms step_avg:117.75ms +[2025-09-09 15:05:06] [Rank 0] step:421/10000 train_time:49572ms step_avg:117.75ms +[2025-09-09 15:05:07] [Rank 0] step:441/10000 train_time:50382ms step_avg:114.25ms +[2025-09-09 15:05:07] [Rank 0] step:441/10000 train_time:50382ms step_avg:114.25ms +[2025-09-09 15:05:08] [Rank 0] step:461/10000 train_time:51193ms step_avg:111.05ms +[2025-09-09 15:05:08] [Rank 0] step:461/10000 train_time:51193ms step_avg:111.05ms +[2025-09-09 15:05:08] [Rank 0] step:481/10000 train_time:52002ms step_avg:108.11ms +[2025-09-09 15:05:08] [Rank 0] step:481/10000 train_time:52002ms step_avg:108.11ms +[2025-09-09 15:05:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:05:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:05:10] [Rank 0] PRINT: step:500/10000 train_loss:2.9285 val_loss:1.0534 train_time:52814ms step_avg:105.63ms +[2025-09-09 15:05:10] [Rank 0] PRINT: step:500/10000 train_loss:2.9285 val_loss:1.0534 train_time:52814ms step_avg:105.63ms +[2025-09-09 15:05:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:05:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:05:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:05:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:06:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:06:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:06:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:06:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:06:31] [Rank 0] Total Loss: 4.0158 +[2025-09-09 15:06:31] [Rank 0] Total Loss: 4.0158 +[2025-09-09 15:06:31] [Rank 0] Total FTA (Unweighted): 0.5319 +[2025-09-09 15:06:31] [Rank 0] Total FTA (Unweighted): 0.5319 +[2025-09-09 15:06:31] [Rank 0] Total FTA (Weighted): 0.5319 +[2025-09-09 15:06:31] [Rank 0] Total FTA (Weighted): 0.5319 +[2025-09-09 15:06:31] [Rank 0] Group 0 Loss: 3.5806 +[2025-09-09 15:06:31] [Rank 0] Group 0 Loss: 3.5806 +[2025-09-09 15:06:31] [Rank 0] Group 1 Loss: 3.4399 +[2025-09-09 15:06:31] [Rank 0] Group 1 Loss: 3.4399 +[2025-09-09 15:06:31] [Rank 0] Group 2 Loss: 3.3461 +[2025-09-09 15:06:31] [Rank 0] Group 2 Loss: 3.3461 +[2025-09-09 15:06:31] [Rank 0] Group 3 Loss: 3.6681 +[2025-09-09 15:06:31] [Rank 0] Group 3 Loss: 3.6681 +[2025-09-09 15:06:31] [Rank 0] Group 4 Loss: 3.5665 +[2025-09-09 15:06:31] [Rank 0] Group 4 Loss: 3.5665 +[2025-09-09 15:06:31] [Rank 0] Group 5 Loss: 3.7317 +[2025-09-09 15:06:31] [Rank 0] Group 5 Loss: 3.7317 +[2025-09-09 15:06:31] [Rank 0] Group 6 Loss: 3.7420 +[2025-09-09 15:06:31] [Rank 0] Group 6 Loss: 3.7420 +[2025-09-09 15:06:31] [Rank 0] Group 7 Loss: 3.8296 +[2025-09-09 15:06:31] [Rank 0] Group 7 Loss: 3.8296 +[2025-09-09 15:06:31] [Rank 0] Group 8 Loss: 4.0578 +[2025-09-09 15:06:31] [Rank 0] Group 8 Loss: 4.0578 +[2025-09-09 15:06:31] [Rank 0] Group 9 Loss: 4.1537 +[2025-09-09 15:06:31] [Rank 0] Group 9 Loss: 4.1537 +[2025-09-09 15:06:31] [Rank 0] Group 10 Loss: 4.3690 +[2025-09-09 15:06:31] [Rank 0] Group 10 Loss: 4.3690 +[2025-09-09 15:06:31] [Rank 0] Group 11 Loss: 4.4315 +[2025-09-09 15:06:31] [Rank 0] Group 11 Loss: 4.4315 +[2025-09-09 15:06:31] [Rank 0] Group 12 Loss: 4.4812 +[2025-09-09 15:06:31] [Rank 0] Group 12 Loss: 4.4812 +[2025-09-09 15:06:31] [Rank 0] Group 13 Loss: 4.6043 +[2025-09-09 15:06:31] [Rank 0] Group 13 Loss: 4.6043 +[2025-09-09 15:06:31] [Rank 0] Group 14 Loss: 4.6095 +[2025-09-09 15:06:31] [Rank 0] Group 14 Loss: 4.6095 +[2025-09-09 15:06:31] [Rank 0] Group 15 Loss: 4.6419 +[2025-09-09 15:06:31] [Rank 0] Group 15 Loss: 4.6419 +[2025-09-09 15:06:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:06:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:06:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:06:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:06:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:06:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:06:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:06:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:06:31] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:06:31] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:06:31] [Rank 0] Group 5 FTA: 0.8100 +[2025-09-09 15:06:31] [Rank 0] Group 5 FTA: 0.8100 +[2025-09-09 15:06:31] [Rank 0] Group 6 FTA: 0.5900 +[2025-09-09 15:06:31] [Rank 0] Group 6 FTA: 0.5900 +[2025-09-09 15:06:31] [Rank 0] Group 7 FTA: 0.5200 +[2025-09-09 15:06:31] [Rank 0] Group 7 FTA: 0.5200 +[2025-09-09 15:06:31] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-09 15:06:31] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-09 15:06:31] [Rank 0] Group 9 FTA: 0.3000 +[2025-09-09 15:06:31] [Rank 0] Group 9 FTA: 0.3000 +[2025-09-09 15:06:31] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-09 15:06:31] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-09 15:06:31] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-09 15:06:31] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-09 15:06:31] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-09 15:06:31] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-09 15:06:31] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-09 15:06:31] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-09 15:06:31] [Rank 0] Group 14 FTA: 0.0800 +[2025-09-09 15:06:31] [Rank 0] Group 14 FTA: 0.0800 +[2025-09-09 15:06:31] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-09 15:06:31] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-09 15:06:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:06:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:06:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:06:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:06:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:06:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:06:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:06:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:06:33] [Rank 0] step:501/10000 train_time:52832ms step_avg:105.45ms +[2025-09-09 15:06:33] [Rank 0] step:501/10000 train_time:52832ms step_avg:105.45ms +[2025-09-09 15:06:34] [Rank 0] step:521/10000 train_time:53666ms step_avg:103.01ms +[2025-09-09 15:06:34] [Rank 0] step:521/10000 train_time:53666ms step_avg:103.01ms +[2025-09-09 15:06:35] [Rank 0] step:541/10000 train_time:54476ms step_avg:100.70ms +[2025-09-09 15:06:35] [Rank 0] step:541/10000 train_time:54476ms step_avg:100.70ms +[2025-09-09 15:06:35] [Rank 0] step:561/10000 train_time:55286ms step_avg:98.55ms +[2025-09-09 15:06:35] [Rank 0] step:561/10000 train_time:55286ms step_avg:98.55ms +[2025-09-09 15:06:36] [Rank 0] step:581/10000 train_time:56098ms step_avg:96.55ms +[2025-09-09 15:06:36] [Rank 0] step:581/10000 train_time:56098ms step_avg:96.55ms +[2025-09-09 15:06:37] [Rank 0] step:601/10000 train_time:56906ms step_avg:94.68ms +[2025-09-09 15:06:37] [Rank 0] step:601/10000 train_time:56906ms step_avg:94.68ms +[2025-09-09 15:06:38] [Rank 0] step:621/10000 train_time:57715ms step_avg:92.94ms +[2025-09-09 15:06:38] [Rank 0] step:621/10000 train_time:57715ms step_avg:92.94ms +[2025-09-09 15:06:39] [Rank 0] step:641/10000 train_time:58524ms step_avg:91.30ms +[2025-09-09 15:06:39] [Rank 0] step:641/10000 train_time:58524ms step_avg:91.30ms +[2025-09-09 15:06:39] [Rank 0] step:661/10000 train_time:59334ms step_avg:89.76ms +[2025-09-09 15:06:39] [Rank 0] step:661/10000 train_time:59334ms step_avg:89.76ms +[2025-09-09 15:06:40] [Rank 0] step:681/10000 train_time:60143ms step_avg:88.32ms +[2025-09-09 15:06:40] [Rank 0] step:681/10000 train_time:60143ms step_avg:88.32ms +[2025-09-09 15:06:41] [Rank 0] step:701/10000 train_time:60951ms step_avg:86.95ms +[2025-09-09 15:06:41] [Rank 0] step:701/10000 train_time:60951ms step_avg:86.95ms +[2025-09-09 15:06:42] [Rank 0] step:721/10000 train_time:61760ms step_avg:85.66ms +[2025-09-09 15:06:42] [Rank 0] step:721/10000 train_time:61760ms step_avg:85.66ms +[2025-09-09 15:06:43] [Rank 0] step:741/10000 train_time:62568ms step_avg:84.44ms +[2025-09-09 15:06:43] [Rank 0] step:741/10000 train_time:62568ms step_avg:84.44ms +[2025-09-09 15:06:43] [Rank 0] step:761/10000 train_time:63381ms step_avg:83.29ms +[2025-09-09 15:06:43] [Rank 0] step:761/10000 train_time:63381ms step_avg:83.29ms +[2025-09-09 15:06:44] [Rank 0] step:781/10000 train_time:64195ms step_avg:82.20ms +[2025-09-09 15:06:44] [Rank 0] step:781/10000 train_time:64195ms step_avg:82.20ms +[2025-09-09 15:06:45] [Rank 0] step:801/10000 train_time:65008ms step_avg:81.16ms +[2025-09-09 15:06:45] [Rank 0] step:801/10000 train_time:65008ms step_avg:81.16ms +[2025-09-09 15:06:46] [Rank 0] step:821/10000 train_time:66092ms step_avg:80.50ms +[2025-09-09 15:06:46] [Rank 0] step:821/10000 train_time:66092ms step_avg:80.50ms +[2025-09-09 15:06:47] [Rank 0] step:841/10000 train_time:66906ms step_avg:79.56ms +[2025-09-09 15:06:47] [Rank 0] step:841/10000 train_time:66906ms step_avg:79.56ms +[2025-09-09 15:06:48] [Rank 0] step:861/10000 train_time:67720ms step_avg:78.65ms +[2025-09-09 15:06:48] [Rank 0] step:861/10000 train_time:67720ms step_avg:78.65ms +[2025-09-09 15:06:49] [Rank 0] step:881/10000 train_time:68536ms step_avg:77.79ms +[2025-09-09 15:06:49] [Rank 0] step:881/10000 train_time:68536ms step_avg:77.79ms +[2025-09-09 15:06:49] [Rank 0] step:901/10000 train_time:69350ms step_avg:76.97ms +[2025-09-09 15:06:49] [Rank 0] step:901/10000 train_time:69350ms step_avg:76.97ms +[2025-09-09 15:06:50] [Rank 0] step:921/10000 train_time:70162ms step_avg:76.18ms +[2025-09-09 15:06:50] [Rank 0] step:921/10000 train_time:70162ms step_avg:76.18ms +[2025-09-09 15:06:51] [Rank 0] step:941/10000 train_time:70976ms step_avg:75.43ms +[2025-09-09 15:06:51] [Rank 0] step:941/10000 train_time:70976ms step_avg:75.43ms +[2025-09-09 15:06:52] [Rank 0] step:961/10000 train_time:71792ms step_avg:74.71ms +[2025-09-09 15:06:52] [Rank 0] step:961/10000 train_time:71792ms step_avg:74.71ms +[2025-09-09 15:06:53] [Rank 0] step:981/10000 train_time:72607ms step_avg:74.01ms +[2025-09-09 15:06:53] [Rank 0] step:981/10000 train_time:72607ms step_avg:74.01ms +[2025-09-09 15:06:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:06:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:06:54] [Rank 0] PRINT: step:1000/10000 train_loss:0.9228 val_loss:0.8236 train_time:73425ms step_avg:73.42ms +[2025-09-09 15:06:54] [Rank 0] PRINT: step:1000/10000 train_loss:0.9228 val_loss:0.8236 train_time:73425ms step_avg:73.42ms +[2025-09-09 15:06:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:06:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:06:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:06:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:08:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:08:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:08:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:08:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:08:15] [Rank 0] Total Loss: 4.2022 +[2025-09-09 15:08:15] [Rank 0] Total Loss: 4.2022 +[2025-09-09 15:08:15] [Rank 0] Total FTA (Unweighted): 0.7188 +[2025-09-09 15:08:15] [Rank 0] Total FTA (Unweighted): 0.7188 +[2025-09-09 15:08:15] [Rank 0] Total FTA (Weighted): 0.7188 +[2025-09-09 15:08:15] [Rank 0] Total FTA (Weighted): 0.7188 +[2025-09-09 15:08:15] [Rank 0] Group 0 Loss: 4.1308 +[2025-09-09 15:08:15] [Rank 0] Group 0 Loss: 4.1308 +[2025-09-09 15:08:15] [Rank 0] Group 1 Loss: 3.9240 +[2025-09-09 15:08:15] [Rank 0] Group 1 Loss: 3.9240 +[2025-09-09 15:08:15] [Rank 0] Group 2 Loss: 3.6017 +[2025-09-09 15:08:15] [Rank 0] Group 2 Loss: 3.6017 +[2025-09-09 15:08:15] [Rank 0] Group 3 Loss: 4.0041 +[2025-09-09 15:08:15] [Rank 0] Group 3 Loss: 4.0041 +[2025-09-09 15:08:15] [Rank 0] Group 4 Loss: 3.9782 +[2025-09-09 15:08:15] [Rank 0] Group 4 Loss: 3.9782 +[2025-09-09 15:08:15] [Rank 0] Group 5 Loss: 3.9545 +[2025-09-09 15:08:15] [Rank 0] Group 5 Loss: 3.9545 +[2025-09-09 15:08:15] [Rank 0] Group 6 Loss: 3.9482 +[2025-09-09 15:08:15] [Rank 0] Group 6 Loss: 3.9482 +[2025-09-09 15:08:15] [Rank 0] Group 7 Loss: 4.0115 +[2025-09-09 15:08:15] [Rank 0] Group 7 Loss: 4.0115 +[2025-09-09 15:08:15] [Rank 0] Group 8 Loss: 4.1300 +[2025-09-09 15:08:15] [Rank 0] Group 8 Loss: 4.1300 +[2025-09-09 15:08:15] [Rank 0] Group 9 Loss: 4.1375 +[2025-09-09 15:08:15] [Rank 0] Group 9 Loss: 4.1375 +[2025-09-09 15:08:15] [Rank 0] Group 10 Loss: 4.3453 +[2025-09-09 15:08:15] [Rank 0] Group 10 Loss: 4.3453 +[2025-09-09 15:08:15] [Rank 0] Group 11 Loss: 4.4834 +[2025-09-09 15:08:15] [Rank 0] Group 11 Loss: 4.4834 +[2025-09-09 15:08:15] [Rank 0] Group 12 Loss: 4.5021 +[2025-09-09 15:08:15] [Rank 0] Group 12 Loss: 4.5021 +[2025-09-09 15:08:15] [Rank 0] Group 13 Loss: 4.6523 +[2025-09-09 15:08:15] [Rank 0] Group 13 Loss: 4.6523 +[2025-09-09 15:08:15] [Rank 0] Group 14 Loss: 4.6548 +[2025-09-09 15:08:15] [Rank 0] Group 14 Loss: 4.6548 +[2025-09-09 15:08:15] [Rank 0] Group 15 Loss: 4.7765 +[2025-09-09 15:08:15] [Rank 0] Group 15 Loss: 4.7765 +[2025-09-09 15:08:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:08:15] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-09 15:08:15] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-09 15:08:15] [Rank 0] Group 8 FTA: 0.8400 +[2025-09-09 15:08:15] [Rank 0] Group 8 FTA: 0.8400 +[2025-09-09 15:08:15] [Rank 0] Group 9 FTA: 0.7100 +[2025-09-09 15:08:15] [Rank 0] Group 9 FTA: 0.7100 +[2025-09-09 15:08:15] [Rank 0] Group 10 FTA: 0.7000 +[2025-09-09 15:08:15] [Rank 0] Group 10 FTA: 0.7000 +[2025-09-09 15:08:15] [Rank 0] Group 11 FTA: 0.4900 +[2025-09-09 15:08:15] [Rank 0] Group 11 FTA: 0.4900 +[2025-09-09 15:08:15] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-09 15:08:15] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-09 15:08:15] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-09 15:08:15] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-09 15:08:15] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-09 15:08:15] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-09 15:08:16] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-09 15:08:16] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-09 15:08:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:08:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:08:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:08:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:08:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:08:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:08:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:08:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:08:17] [Rank 0] step:1001/10000 train_time:73442ms step_avg:73.37ms +[2025-09-09 15:08:17] [Rank 0] step:1001/10000 train_time:73442ms step_avg:73.37ms +[2025-09-09 15:08:18] [Rank 0] step:1021/10000 train_time:74262ms step_avg:72.73ms +[2025-09-09 15:08:18] [Rank 0] step:1021/10000 train_time:74262ms step_avg:72.73ms +[2025-09-09 15:08:19] [Rank 0] step:1041/10000 train_time:75079ms step_avg:72.12ms +[2025-09-09 15:08:19] [Rank 0] step:1041/10000 train_time:75079ms step_avg:72.12ms +[2025-09-09 15:08:20] [Rank 0] step:1061/10000 train_time:75894ms step_avg:71.53ms +[2025-09-09 15:08:20] [Rank 0] step:1061/10000 train_time:75894ms step_avg:71.53ms +[2025-09-09 15:08:21] [Rank 0] step:1081/10000 train_time:76709ms step_avg:70.96ms +[2025-09-09 15:08:21] [Rank 0] step:1081/10000 train_time:76709ms step_avg:70.96ms +[2025-09-09 15:08:21] [Rank 0] step:1101/10000 train_time:77524ms step_avg:70.41ms +[2025-09-09 15:08:21] [Rank 0] step:1101/10000 train_time:77524ms step_avg:70.41ms +[2025-09-09 15:08:22] [Rank 0] step:1121/10000 train_time:78340ms step_avg:69.88ms +[2025-09-09 15:08:22] [Rank 0] step:1121/10000 train_time:78340ms step_avg:69.88ms +[2025-09-09 15:08:23] [Rank 0] step:1141/10000 train_time:79156ms step_avg:69.37ms +[2025-09-09 15:08:23] [Rank 0] step:1141/10000 train_time:79156ms step_avg:69.37ms +[2025-09-09 15:08:24] [Rank 0] step:1161/10000 train_time:79971ms step_avg:68.88ms +[2025-09-09 15:08:24] [Rank 0] step:1161/10000 train_time:79971ms step_avg:68.88ms +[2025-09-09 15:08:25] [Rank 0] step:1181/10000 train_time:80787ms step_avg:68.41ms +[2025-09-09 15:08:25] [Rank 0] step:1181/10000 train_time:80787ms step_avg:68.41ms +[2025-09-09 15:08:26] [Rank 0] step:1201/10000 train_time:81602ms step_avg:67.94ms +[2025-09-09 15:08:26] [Rank 0] step:1201/10000 train_time:81602ms step_avg:67.94ms +[2025-09-09 15:08:26] [Rank 0] step:1221/10000 train_time:82416ms step_avg:67.50ms +[2025-09-09 15:08:26] [Rank 0] step:1221/10000 train_time:82416ms step_avg:67.50ms +[2025-09-09 15:08:27] [Rank 0] step:1241/10000 train_time:83232ms step_avg:67.07ms +[2025-09-09 15:08:27] [Rank 0] step:1241/10000 train_time:83232ms step_avg:67.07ms +[2025-09-09 15:08:28] [Rank 0] step:1261/10000 train_time:84046ms step_avg:66.65ms +[2025-09-09 15:08:28] [Rank 0] step:1261/10000 train_time:84046ms step_avg:66.65ms +[2025-09-09 15:08:29] [Rank 0] step:1281/10000 train_time:84860ms step_avg:66.25ms +[2025-09-09 15:08:29] [Rank 0] step:1281/10000 train_time:84860ms step_avg:66.25ms +[2025-09-09 15:08:30] [Rank 0] step:1301/10000 train_time:85675ms step_avg:65.85ms +[2025-09-09 15:08:30] [Rank 0] step:1301/10000 train_time:85675ms step_avg:65.85ms +[2025-09-09 15:08:30] [Rank 0] step:1321/10000 train_time:86490ms step_avg:65.47ms +[2025-09-09 15:08:30] [Rank 0] step:1321/10000 train_time:86490ms step_avg:65.47ms +[2025-09-09 15:08:31] [Rank 0] step:1341/10000 train_time:87304ms step_avg:65.10ms +[2025-09-09 15:08:31] [Rank 0] step:1341/10000 train_time:87304ms step_avg:65.10ms +[2025-09-09 15:08:32] [Rank 0] step:1361/10000 train_time:88118ms step_avg:64.75ms +[2025-09-09 15:08:32] [Rank 0] step:1361/10000 train_time:88118ms step_avg:64.75ms +[2025-09-09 15:08:33] [Rank 0] step:1381/10000 train_time:88932ms step_avg:64.40ms +[2025-09-09 15:08:33] [Rank 0] step:1381/10000 train_time:88932ms step_avg:64.40ms +[2025-09-09 15:08:34] [Rank 0] step:1401/10000 train_time:89746ms step_avg:64.06ms +[2025-09-09 15:08:34] [Rank 0] step:1401/10000 train_time:89746ms step_avg:64.06ms +[2025-09-09 15:08:34] [Rank 0] step:1421/10000 train_time:90561ms step_avg:63.73ms +[2025-09-09 15:08:34] [Rank 0] step:1421/10000 train_time:90561ms step_avg:63.73ms +[2025-09-09 15:08:35] [Rank 0] step:1441/10000 train_time:91375ms step_avg:63.41ms +[2025-09-09 15:08:35] [Rank 0] step:1441/10000 train_time:91375ms step_avg:63.41ms +[2025-09-09 15:08:36] [Rank 0] step:1461/10000 train_time:92189ms step_avg:63.10ms +[2025-09-09 15:08:36] [Rank 0] step:1461/10000 train_time:92189ms step_avg:63.10ms +[2025-09-09 15:08:37] [Rank 0] step:1481/10000 train_time:93003ms step_avg:62.80ms +[2025-09-09 15:08:37] [Rank 0] step:1481/10000 train_time:93003ms step_avg:62.80ms +[2025-09-09 15:08:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:08:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:08:38] [Rank 0] PRINT: step:1500/10000 train_loss:0.7925 val_loss:0.7467 train_time:93820ms step_avg:62.55ms +[2025-09-09 15:08:38] [Rank 0] PRINT: step:1500/10000 train_loss:0.7925 val_loss:0.7467 train_time:93820ms step_avg:62.55ms +[2025-09-09 15:08:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:08:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:08:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:08:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:10:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:10:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:10:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:10:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:10:00] [Rank 0] Total Loss: 4.3794 +[2025-09-09 15:10:00] [Rank 0] Total Loss: 4.3794 +[2025-09-09 15:10:00] [Rank 0] Total FTA (Unweighted): 0.7975 +[2025-09-09 15:10:00] [Rank 0] Total FTA (Unweighted): 0.7975 +[2025-09-09 15:10:00] [Rank 0] Total FTA (Weighted): 0.7975 +[2025-09-09 15:10:00] [Rank 0] Total FTA (Weighted): 0.7975 +[2025-09-09 15:10:00] [Rank 0] Group 0 Loss: 4.2422 +[2025-09-09 15:10:00] [Rank 0] Group 0 Loss: 4.2422 +[2025-09-09 15:10:00] [Rank 0] Group 1 Loss: 4.0001 +[2025-09-09 15:10:00] [Rank 0] Group 1 Loss: 4.0001 +[2025-09-09 15:10:00] [Rank 0] Group 2 Loss: 3.8525 +[2025-09-09 15:10:00] [Rank 0] Group 2 Loss: 3.8525 +[2025-09-09 15:10:00] [Rank 0] Group 3 Loss: 4.2974 +[2025-09-09 15:10:00] [Rank 0] Group 3 Loss: 4.2974 +[2025-09-09 15:10:00] [Rank 0] Group 4 Loss: 4.2144 +[2025-09-09 15:10:00] [Rank 0] Group 4 Loss: 4.2144 +[2025-09-09 15:10:00] [Rank 0] Group 5 Loss: 4.2287 +[2025-09-09 15:10:00] [Rank 0] Group 5 Loss: 4.2287 +[2025-09-09 15:10:00] [Rank 0] Group 6 Loss: 4.1686 +[2025-09-09 15:10:00] [Rank 0] Group 6 Loss: 4.1686 +[2025-09-09 15:10:00] [Rank 0] Group 7 Loss: 4.2214 +[2025-09-09 15:10:00] [Rank 0] Group 7 Loss: 4.2214 +[2025-09-09 15:10:00] [Rank 0] Group 8 Loss: 4.3653 +[2025-09-09 15:10:00] [Rank 0] Group 8 Loss: 4.3653 +[2025-09-09 15:10:00] [Rank 0] Group 9 Loss: 4.3078 +[2025-09-09 15:10:00] [Rank 0] Group 9 Loss: 4.3078 +[2025-09-09 15:10:00] [Rank 0] Group 10 Loss: 4.5001 +[2025-09-09 15:10:00] [Rank 0] Group 10 Loss: 4.5001 +[2025-09-09 15:10:00] [Rank 0] Group 11 Loss: 4.5691 +[2025-09-09 15:10:00] [Rank 0] Group 11 Loss: 4.5691 +[2025-09-09 15:10:00] [Rank 0] Group 12 Loss: 4.6234 +[2025-09-09 15:10:00] [Rank 0] Group 12 Loss: 4.6234 +[2025-09-09 15:10:00] [Rank 0] Group 13 Loss: 4.7476 +[2025-09-09 15:10:00] [Rank 0] Group 13 Loss: 4.7476 +[2025-09-09 15:10:00] [Rank 0] Group 14 Loss: 4.8238 +[2025-09-09 15:10:00] [Rank 0] Group 14 Loss: 4.8238 +[2025-09-09 15:10:00] [Rank 0] Group 15 Loss: 4.9084 +[2025-09-09 15:10:00] [Rank 0] Group 15 Loss: 4.9084 +[2025-09-09 15:10:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:10:00] [Rank 0] Group 8 FTA: 0.9700 +[2025-09-09 15:10:00] [Rank 0] Group 8 FTA: 0.9700 +[2025-09-09 15:10:00] [Rank 0] Group 9 FTA: 0.8600 +[2025-09-09 15:10:00] [Rank 0] Group 9 FTA: 0.8600 +[2025-09-09 15:10:00] [Rank 0] Group 10 FTA: 0.9200 +[2025-09-09 15:10:00] [Rank 0] Group 10 FTA: 0.9200 +[2025-09-09 15:10:00] [Rank 0] Group 11 FTA: 0.7500 +[2025-09-09 15:10:00] [Rank 0] Group 11 FTA: 0.7500 +[2025-09-09 15:10:00] [Rank 0] Group 12 FTA: 0.6300 +[2025-09-09 15:10:00] [Rank 0] Group 12 FTA: 0.6300 +[2025-09-09 15:10:00] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-09 15:10:00] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-09 15:10:00] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-09 15:10:00] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-09 15:10:00] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-09 15:10:00] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-09 15:10:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:10:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:10:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:10:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:10:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:10:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:10:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:10:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:10:01] [Rank 0] step:1501/10000 train_time:93838ms step_avg:62.52ms +[2025-09-09 15:10:01] [Rank 0] step:1501/10000 train_time:93838ms step_avg:62.52ms +[2025-09-09 15:10:02] [Rank 0] step:1521/10000 train_time:94667ms step_avg:62.24ms +[2025-09-09 15:10:02] [Rank 0] step:1521/10000 train_time:94667ms step_avg:62.24ms +[2025-09-09 15:10:03] [Rank 0] step:1541/10000 train_time:95481ms step_avg:61.96ms +[2025-09-09 15:10:03] [Rank 0] step:1541/10000 train_time:95481ms step_avg:61.96ms +[2025-09-09 15:10:04] [Rank 0] step:1561/10000 train_time:96296ms step_avg:61.69ms +[2025-09-09 15:10:04] [Rank 0] step:1561/10000 train_time:96296ms step_avg:61.69ms +[2025-09-09 15:10:05] [Rank 0] step:1581/10000 train_time:97111ms step_avg:61.42ms +[2025-09-09 15:10:05] [Rank 0] step:1581/10000 train_time:97111ms step_avg:61.42ms +[2025-09-09 15:10:06] [Rank 0] step:1601/10000 train_time:98142ms step_avg:61.30ms +[2025-09-09 15:10:06] [Rank 0] step:1601/10000 train_time:98142ms step_avg:61.30ms +[2025-09-09 15:10:07] [Rank 0] step:1621/10000 train_time:99174ms step_avg:61.18ms +[2025-09-09 15:10:07] [Rank 0] step:1621/10000 train_time:99174ms step_avg:61.18ms +[2025-09-09 15:10:08] [Rank 0] step:1641/10000 train_time:100314ms step_avg:61.13ms +[2025-09-09 15:10:08] [Rank 0] step:1641/10000 train_time:100314ms step_avg:61.13ms +[2025-09-09 15:10:09] [Rank 0] step:1661/10000 train_time:101129ms step_avg:60.88ms +[2025-09-09 15:10:09] [Rank 0] step:1661/10000 train_time:101129ms step_avg:60.88ms +[2025-09-09 15:10:10] [Rank 0] step:1681/10000 train_time:101944ms step_avg:60.64ms +[2025-09-09 15:10:10] [Rank 0] step:1681/10000 train_time:101944ms step_avg:60.64ms +[2025-09-09 15:10:10] [Rank 0] step:1701/10000 train_time:102760ms step_avg:60.41ms +[2025-09-09 15:10:10] [Rank 0] step:1701/10000 train_time:102760ms step_avg:60.41ms +[2025-09-09 15:10:11] [Rank 0] step:1721/10000 train_time:103574ms step_avg:60.18ms +[2025-09-09 15:10:11] [Rank 0] step:1721/10000 train_time:103574ms step_avg:60.18ms +[2025-09-09 15:10:12] [Rank 0] step:1741/10000 train_time:104389ms step_avg:59.96ms +[2025-09-09 15:10:12] [Rank 0] step:1741/10000 train_time:104389ms step_avg:59.96ms +[2025-09-09 15:10:13] [Rank 0] step:1761/10000 train_time:105205ms step_avg:59.74ms +[2025-09-09 15:10:13] [Rank 0] step:1761/10000 train_time:105205ms step_avg:59.74ms +[2025-09-09 15:10:14] [Rank 0] step:1781/10000 train_time:106020ms step_avg:59.53ms +[2025-09-09 15:10:14] [Rank 0] step:1781/10000 train_time:106020ms step_avg:59.53ms +[2025-09-09 15:10:14] [Rank 0] step:1801/10000 train_time:106836ms step_avg:59.32ms +[2025-09-09 15:10:14] [Rank 0] step:1801/10000 train_time:106836ms step_avg:59.32ms +[2025-09-09 15:10:15] [Rank 0] step:1821/10000 train_time:107651ms step_avg:59.12ms +[2025-09-09 15:10:15] [Rank 0] step:1821/10000 train_time:107651ms step_avg:59.12ms +[2025-09-09 15:10:16] [Rank 0] step:1841/10000 train_time:108467ms step_avg:58.92ms +[2025-09-09 15:10:16] [Rank 0] step:1841/10000 train_time:108467ms step_avg:58.92ms +[2025-09-09 15:10:17] [Rank 0] step:1861/10000 train_time:109281ms step_avg:58.72ms +[2025-09-09 15:10:17] [Rank 0] step:1861/10000 train_time:109281ms step_avg:58.72ms +[2025-09-09 15:10:18] [Rank 0] step:1881/10000 train_time:110095ms step_avg:58.53ms +[2025-09-09 15:10:18] [Rank 0] step:1881/10000 train_time:110095ms step_avg:58.53ms +[2025-09-09 15:10:19] [Rank 0] step:1901/10000 train_time:110909ms step_avg:58.34ms +[2025-09-09 15:10:19] [Rank 0] step:1901/10000 train_time:110909ms step_avg:58.34ms +[2025-09-09 15:10:19] [Rank 0] step:1921/10000 train_time:111724ms step_avg:58.16ms +[2025-09-09 15:10:19] [Rank 0] step:1921/10000 train_time:111724ms step_avg:58.16ms +[2025-09-09 15:10:20] [Rank 0] step:1941/10000 train_time:112539ms step_avg:57.98ms +[2025-09-09 15:10:20] [Rank 0] step:1941/10000 train_time:112539ms step_avg:57.98ms +[2025-09-09 15:10:21] [Rank 0] step:1961/10000 train_time:113353ms step_avg:57.80ms +[2025-09-09 15:10:21] [Rank 0] step:1961/10000 train_time:113353ms step_avg:57.80ms +[2025-09-09 15:10:22] [Rank 0] step:1981/10000 train_time:114170ms step_avg:57.63ms +[2025-09-09 15:10:22] [Rank 0] step:1981/10000 train_time:114170ms step_avg:57.63ms +[2025-09-09 15:10:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:10:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:10:23] [Rank 0] PRINT: step:2000/10000 train_loss:0.7397 val_loss:0.7071 train_time:114986ms step_avg:57.49ms +[2025-09-09 15:10:23] [Rank 0] PRINT: step:2000/10000 train_loss:0.7397 val_loss:0.7071 train_time:114986ms step_avg:57.49ms +[2025-09-09 15:10:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:10:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:10:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:10:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:11:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:11:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:11:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:11:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:11:45] [Rank 0] Total Loss: 4.5404 +[2025-09-09 15:11:45] [Rank 0] Total Loss: 4.5404 +[2025-09-09 15:11:45] [Rank 0] Total FTA (Unweighted): 0.8444 +[2025-09-09 15:11:45] [Rank 0] Total FTA (Unweighted): 0.8444 +[2025-09-09 15:11:45] [Rank 0] Total FTA (Weighted): 0.8444 +[2025-09-09 15:11:45] [Rank 0] Total FTA (Weighted): 0.8444 +[2025-09-09 15:11:45] [Rank 0] Group 0 Loss: 4.4626 +[2025-09-09 15:11:45] [Rank 0] Group 0 Loss: 4.4626 +[2025-09-09 15:11:45] [Rank 0] Group 1 Loss: 4.4528 +[2025-09-09 15:11:45] [Rank 0] Group 1 Loss: 4.4528 +[2025-09-09 15:11:45] [Rank 0] Group 2 Loss: 4.0173 +[2025-09-09 15:11:45] [Rank 0] Group 2 Loss: 4.0173 +[2025-09-09 15:11:45] [Rank 0] Group 3 Loss: 4.4258 +[2025-09-09 15:11:45] [Rank 0] Group 3 Loss: 4.4258 +[2025-09-09 15:11:45] [Rank 0] Group 4 Loss: 4.4081 +[2025-09-09 15:11:45] [Rank 0] Group 4 Loss: 4.4081 +[2025-09-09 15:11:45] [Rank 0] Group 5 Loss: 4.3697 +[2025-09-09 15:11:45] [Rank 0] Group 5 Loss: 4.3697 +[2025-09-09 15:11:45] [Rank 0] Group 6 Loss: 4.3822 +[2025-09-09 15:11:45] [Rank 0] Group 6 Loss: 4.3822 +[2025-09-09 15:11:45] [Rank 0] Group 7 Loss: 4.4138 +[2025-09-09 15:11:45] [Rank 0] Group 7 Loss: 4.4138 +[2025-09-09 15:11:45] [Rank 0] Group 8 Loss: 4.4970 +[2025-09-09 15:11:45] [Rank 0] Group 8 Loss: 4.4970 +[2025-09-09 15:11:45] [Rank 0] Group 9 Loss: 4.4671 +[2025-09-09 15:11:45] [Rank 0] Group 9 Loss: 4.4671 +[2025-09-09 15:11:45] [Rank 0] Group 10 Loss: 4.6261 +[2025-09-09 15:11:45] [Rank 0] Group 10 Loss: 4.6261 +[2025-09-09 15:11:45] [Rank 0] Group 11 Loss: 4.6979 +[2025-09-09 15:11:45] [Rank 0] Group 11 Loss: 4.6979 +[2025-09-09 15:11:45] [Rank 0] Group 12 Loss: 4.7051 +[2025-09-09 15:11:45] [Rank 0] Group 12 Loss: 4.7051 +[2025-09-09 15:11:45] [Rank 0] Group 13 Loss: 4.8536 +[2025-09-09 15:11:45] [Rank 0] Group 13 Loss: 4.8536 +[2025-09-09 15:11:45] [Rank 0] Group 14 Loss: 4.8708 +[2025-09-09 15:11:45] [Rank 0] Group 14 Loss: 4.8708 +[2025-09-09 15:11:45] [Rank 0] Group 15 Loss: 4.9973 +[2025-09-09 15:11:45] [Rank 0] Group 15 Loss: 4.9973 +[2025-09-09 15:11:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:11:45] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-09 15:11:45] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-09 15:11:45] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-09 15:11:45] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-09 15:11:45] [Rank 0] Group 11 FTA: 0.8900 +[2025-09-09 15:11:45] [Rank 0] Group 11 FTA: 0.8900 +[2025-09-09 15:11:45] [Rank 0] Group 12 FTA: 0.7400 +[2025-09-09 15:11:45] [Rank 0] Group 12 FTA: 0.7400 +[2025-09-09 15:11:45] [Rank 0] Group 13 FTA: 0.4700 +[2025-09-09 15:11:45] [Rank 0] Group 13 FTA: 0.4700 +[2025-09-09 15:11:45] [Rank 0] Group 14 FTA: 0.2700 +[2025-09-09 15:11:45] [Rank 0] Group 14 FTA: 0.2700 +[2025-09-09 15:11:45] [Rank 0] Group 15 FTA: 0.1900 +[2025-09-09 15:11:45] [Rank 0] Group 15 FTA: 0.1900 +[2025-09-09 15:11:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:11:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:11:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:11:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:11:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:11:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:11:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:11:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:11:47] [Rank 0] step:2001/10000 train_time:115003ms step_avg:57.47ms +[2025-09-09 15:11:47] [Rank 0] step:2001/10000 train_time:115003ms step_avg:57.47ms +[2025-09-09 15:11:48] [Rank 0] step:2021/10000 train_time:116515ms step_avg:57.65ms +[2025-09-09 15:11:48] [Rank 0] step:2021/10000 train_time:116515ms step_avg:57.65ms +[2025-09-09 15:11:49] [Rank 0] step:2041/10000 train_time:117329ms step_avg:57.49ms +[2025-09-09 15:11:49] [Rank 0] step:2041/10000 train_time:117329ms step_avg:57.49ms +[2025-09-09 15:11:50] [Rank 0] step:2061/10000 train_time:118143ms step_avg:57.32ms +[2025-09-09 15:11:50] [Rank 0] step:2061/10000 train_time:118143ms step_avg:57.32ms +[2025-09-09 15:11:51] [Rank 0] step:2081/10000 train_time:118957ms step_avg:57.16ms +[2025-09-09 15:11:51] [Rank 0] step:2081/10000 train_time:118957ms step_avg:57.16ms +[2025-09-09 15:11:51] [Rank 0] step:2101/10000 train_time:119812ms step_avg:57.03ms +[2025-09-09 15:11:51] [Rank 0] step:2101/10000 train_time:119812ms step_avg:57.03ms +[2025-09-09 15:11:52] [Rank 0] step:2121/10000 train_time:120626ms step_avg:56.87ms +[2025-09-09 15:11:52] [Rank 0] step:2121/10000 train_time:120626ms step_avg:56.87ms +[2025-09-09 15:11:53] [Rank 0] step:2141/10000 train_time:121440ms step_avg:56.72ms +[2025-09-09 15:11:53] [Rank 0] step:2141/10000 train_time:121440ms step_avg:56.72ms +[2025-09-09 15:11:54] [Rank 0] step:2161/10000 train_time:122255ms step_avg:56.57ms +[2025-09-09 15:11:54] [Rank 0] step:2161/10000 train_time:122255ms step_avg:56.57ms +[2025-09-09 15:11:55] [Rank 0] step:2181/10000 train_time:123070ms step_avg:56.43ms +[2025-09-09 15:11:55] [Rank 0] step:2181/10000 train_time:123070ms step_avg:56.43ms +[2025-09-09 15:11:55] [Rank 0] step:2201/10000 train_time:123886ms step_avg:56.29ms +[2025-09-09 15:11:55] [Rank 0] step:2201/10000 train_time:123886ms step_avg:56.29ms +[2025-09-09 15:11:56] [Rank 0] step:2221/10000 train_time:124701ms step_avg:56.15ms +[2025-09-09 15:11:56] [Rank 0] step:2221/10000 train_time:124701ms step_avg:56.15ms +[2025-09-09 15:11:57] [Rank 0] step:2241/10000 train_time:125521ms step_avg:56.01ms +[2025-09-09 15:11:57] [Rank 0] step:2241/10000 train_time:125521ms step_avg:56.01ms +[2025-09-09 15:11:58] [Rank 0] step:2261/10000 train_time:126345ms step_avg:55.88ms +[2025-09-09 15:11:58] [Rank 0] step:2261/10000 train_time:126345ms step_avg:55.88ms +[2025-09-09 15:11:59] [Rank 0] step:2281/10000 train_time:127164ms step_avg:55.75ms +[2025-09-09 15:11:59] [Rank 0] step:2281/10000 train_time:127164ms step_avg:55.75ms +[2025-09-09 15:12:00] [Rank 0] step:2301/10000 train_time:127986ms step_avg:55.62ms +[2025-09-09 15:12:00] [Rank 0] step:2301/10000 train_time:127986ms step_avg:55.62ms +[2025-09-09 15:12:00] [Rank 0] step:2321/10000 train_time:128807ms step_avg:55.50ms +[2025-09-09 15:12:00] [Rank 0] step:2321/10000 train_time:128807ms step_avg:55.50ms +[2025-09-09 15:12:01] [Rank 0] step:2341/10000 train_time:129627ms step_avg:55.37ms +[2025-09-09 15:12:01] [Rank 0] step:2341/10000 train_time:129627ms step_avg:55.37ms +[2025-09-09 15:12:02] [Rank 0] step:2361/10000 train_time:130447ms step_avg:55.25ms +[2025-09-09 15:12:02] [Rank 0] step:2361/10000 train_time:130447ms step_avg:55.25ms +[2025-09-09 15:12:03] [Rank 0] step:2381/10000 train_time:131267ms step_avg:55.13ms +[2025-09-09 15:12:03] [Rank 0] step:2381/10000 train_time:131267ms step_avg:55.13ms +[2025-09-09 15:12:04] [Rank 0] step:2401/10000 train_time:132088ms step_avg:55.01ms +[2025-09-09 15:12:04] [Rank 0] step:2401/10000 train_time:132088ms step_avg:55.01ms +[2025-09-09 15:12:04] [Rank 0] step:2421/10000 train_time:132908ms step_avg:54.90ms +[2025-09-09 15:12:04] [Rank 0] step:2421/10000 train_time:132908ms step_avg:54.90ms +[2025-09-09 15:12:05] [Rank 0] step:2441/10000 train_time:133728ms step_avg:54.78ms +[2025-09-09 15:12:05] [Rank 0] step:2441/10000 train_time:133728ms step_avg:54.78ms +[2025-09-09 15:12:06] [Rank 0] step:2461/10000 train_time:134548ms step_avg:54.67ms +[2025-09-09 15:12:06] [Rank 0] step:2461/10000 train_time:134548ms step_avg:54.67ms +[2025-09-09 15:12:07] [Rank 0] step:2481/10000 train_time:135369ms step_avg:54.56ms +[2025-09-09 15:12:07] [Rank 0] step:2481/10000 train_time:135369ms step_avg:54.56ms +[2025-09-09 15:12:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:12:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:12:08] [Rank 0] PRINT: step:2500/10000 train_loss:0.7089 val_loss:0.6814 train_time:136192ms step_avg:54.48ms +[2025-09-09 15:12:08] [Rank 0] PRINT: step:2500/10000 train_loss:0.7089 val_loss:0.6814 train_time:136192ms step_avg:54.48ms +[2025-09-09 15:12:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:12:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:12:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:12:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:13:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:13:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:13:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:13:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:13:30] [Rank 0] Total Loss: 4.6739 +[2025-09-09 15:13:30] [Rank 0] Total Loss: 4.6739 +[2025-09-09 15:13:30] [Rank 0] Total FTA (Unweighted): 0.8738 +[2025-09-09 15:13:30] [Rank 0] Total FTA (Unweighted): 0.8738 +[2025-09-09 15:13:30] [Rank 0] Total FTA (Weighted): 0.8738 +[2025-09-09 15:13:30] [Rank 0] Total FTA (Weighted): 0.8738 +[2025-09-09 15:13:30] [Rank 0] Group 0 Loss: 4.5288 +[2025-09-09 15:13:30] [Rank 0] Group 0 Loss: 4.5288 +[2025-09-09 15:13:30] [Rank 0] Group 1 Loss: 4.3740 +[2025-09-09 15:13:30] [Rank 0] Group 1 Loss: 4.3740 +[2025-09-09 15:13:30] [Rank 0] Group 2 Loss: 4.1767 +[2025-09-09 15:13:30] [Rank 0] Group 2 Loss: 4.1767 +[2025-09-09 15:13:30] [Rank 0] Group 3 Loss: 4.6086 +[2025-09-09 15:13:30] [Rank 0] Group 3 Loss: 4.6086 +[2025-09-09 15:13:30] [Rank 0] Group 4 Loss: 4.5560 +[2025-09-09 15:13:30] [Rank 0] Group 4 Loss: 4.5560 +[2025-09-09 15:13:30] [Rank 0] Group 5 Loss: 4.5836 +[2025-09-09 15:13:30] [Rank 0] Group 5 Loss: 4.5836 +[2025-09-09 15:13:30] [Rank 0] Group 6 Loss: 4.5144 +[2025-09-09 15:13:30] [Rank 0] Group 6 Loss: 4.5144 +[2025-09-09 15:13:30] [Rank 0] Group 7 Loss: 4.5812 +[2025-09-09 15:13:30] [Rank 0] Group 7 Loss: 4.5812 +[2025-09-09 15:13:30] [Rank 0] Group 8 Loss: 4.7119 +[2025-09-09 15:13:30] [Rank 0] Group 8 Loss: 4.7119 +[2025-09-09 15:13:30] [Rank 0] Group 9 Loss: 4.6336 +[2025-09-09 15:13:30] [Rank 0] Group 9 Loss: 4.6336 +[2025-09-09 15:13:30] [Rank 0] Group 10 Loss: 4.8191 +[2025-09-09 15:13:30] [Rank 0] Group 10 Loss: 4.8191 +[2025-09-09 15:13:30] [Rank 0] Group 11 Loss: 4.8370 +[2025-09-09 15:13:30] [Rank 0] Group 11 Loss: 4.8370 +[2025-09-09 15:13:30] [Rank 0] Group 12 Loss: 4.8323 +[2025-09-09 15:13:30] [Rank 0] Group 12 Loss: 4.8323 +[2025-09-09 15:13:30] [Rank 0] Group 13 Loss: 4.9655 +[2025-09-09 15:13:30] [Rank 0] Group 13 Loss: 4.9655 +[2025-09-09 15:13:30] [Rank 0] Group 14 Loss: 4.9929 +[2025-09-09 15:13:30] [Rank 0] Group 14 Loss: 4.9929 +[2025-09-09 15:13:30] [Rank 0] Group 15 Loss: 5.0672 +[2025-09-09 15:13:30] [Rank 0] Group 15 Loss: 5.0672 +[2025-09-09 15:13:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-09 15:13:30] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-09 15:13:30] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:13:30] [Rank 0] Group 11 FTA: 0.9500 +[2025-09-09 15:13:30] [Rank 0] Group 11 FTA: 0.9500 +[2025-09-09 15:13:30] [Rank 0] Group 12 FTA: 0.8800 +[2025-09-09 15:13:30] [Rank 0] Group 12 FTA: 0.8800 +[2025-09-09 15:13:30] [Rank 0] Group 13 FTA: 0.7200 +[2025-09-09 15:13:30] [Rank 0] Group 13 FTA: 0.7200 +[2025-09-09 15:13:30] [Rank 0] Group 14 FTA: 0.2900 +[2025-09-09 15:13:30] [Rank 0] Group 14 FTA: 0.2900 +[2025-09-09 15:13:30] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-09 15:13:30] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-09 15:13:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:13:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:13:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:13:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:13:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:13:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:13:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:13:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:13:31] [Rank 0] step:2501/10000 train_time:136210ms step_avg:54.46ms +[2025-09-09 15:13:31] [Rank 0] step:2501/10000 train_time:136210ms step_avg:54.46ms +[2025-09-09 15:13:32] [Rank 0] step:2521/10000 train_time:137046ms step_avg:54.36ms +[2025-09-09 15:13:32] [Rank 0] step:2521/10000 train_time:137046ms step_avg:54.36ms +[2025-09-09 15:13:33] [Rank 0] step:2541/10000 train_time:137868ms step_avg:54.26ms +[2025-09-09 15:13:33] [Rank 0] step:2541/10000 train_time:137868ms step_avg:54.26ms +[2025-09-09 15:13:34] [Rank 0] step:2561/10000 train_time:138687ms step_avg:54.15ms +[2025-09-09 15:13:34] [Rank 0] step:2561/10000 train_time:138687ms step_avg:54.15ms +[2025-09-09 15:13:35] [Rank 0] step:2581/10000 train_time:139508ms step_avg:54.05ms +[2025-09-09 15:13:35] [Rank 0] step:2581/10000 train_time:139508ms step_avg:54.05ms +[2025-09-09 15:13:36] [Rank 0] step:2601/10000 train_time:140329ms step_avg:53.95ms +[2025-09-09 15:13:36] [Rank 0] step:2601/10000 train_time:140329ms step_avg:53.95ms +[2025-09-09 15:13:36] [Rank 0] step:2621/10000 train_time:141149ms step_avg:53.85ms +[2025-09-09 15:13:36] [Rank 0] step:2621/10000 train_time:141149ms step_avg:53.85ms +[2025-09-09 15:13:37] [Rank 0] step:2641/10000 train_time:141971ms step_avg:53.76ms +[2025-09-09 15:13:37] [Rank 0] step:2641/10000 train_time:141971ms step_avg:53.76ms +[2025-09-09 15:13:38] [Rank 0] step:2661/10000 train_time:142791ms step_avg:53.66ms +[2025-09-09 15:13:38] [Rank 0] step:2661/10000 train_time:142791ms step_avg:53.66ms +[2025-09-09 15:13:39] [Rank 0] step:2681/10000 train_time:143612ms step_avg:53.57ms +[2025-09-09 15:13:39] [Rank 0] step:2681/10000 train_time:143612ms step_avg:53.57ms +[2025-09-09 15:13:40] [Rank 0] step:2701/10000 train_time:144433ms step_avg:53.47ms +[2025-09-09 15:13:40] [Rank 0] step:2701/10000 train_time:144433ms step_avg:53.47ms +[2025-09-09 15:13:41] [Rank 0] step:2721/10000 train_time:145253ms step_avg:53.38ms +[2025-09-09 15:13:41] [Rank 0] step:2721/10000 train_time:145253ms step_avg:53.38ms +[2025-09-09 15:13:41] [Rank 0] step:2741/10000 train_time:146074ms step_avg:53.29ms +[2025-09-09 15:13:41] [Rank 0] step:2741/10000 train_time:146074ms step_avg:53.29ms +[2025-09-09 15:13:42] [Rank 0] step:2761/10000 train_time:146896ms step_avg:53.20ms +[2025-09-09 15:13:42] [Rank 0] step:2761/10000 train_time:146896ms step_avg:53.20ms +[2025-09-09 15:13:43] [Rank 0] step:2781/10000 train_time:147716ms step_avg:53.12ms +[2025-09-09 15:13:43] [Rank 0] step:2781/10000 train_time:147716ms step_avg:53.12ms +[2025-09-09 15:13:44] [Rank 0] step:2801/10000 train_time:148537ms step_avg:53.03ms +[2025-09-09 15:13:44] [Rank 0] step:2801/10000 train_time:148537ms step_avg:53.03ms +[2025-09-09 15:13:45] [Rank 0] step:2821/10000 train_time:150062ms step_avg:53.19ms +[2025-09-09 15:13:45] [Rank 0] step:2821/10000 train_time:150062ms step_avg:53.19ms +[2025-09-09 15:13:46] [Rank 0] step:2841/10000 train_time:150882ms step_avg:53.11ms +[2025-09-09 15:13:46] [Rank 0] step:2841/10000 train_time:150882ms step_avg:53.11ms +[2025-09-09 15:13:47] [Rank 0] step:2861/10000 train_time:151703ms step_avg:53.02ms +[2025-09-09 15:13:47] [Rank 0] step:2861/10000 train_time:151703ms step_avg:53.02ms +[2025-09-09 15:13:48] [Rank 0] step:2881/10000 train_time:152523ms step_avg:52.94ms +[2025-09-09 15:13:48] [Rank 0] step:2881/10000 train_time:152523ms step_avg:52.94ms +[2025-09-09 15:13:49] [Rank 0] step:2901/10000 train_time:153343ms step_avg:52.86ms +[2025-09-09 15:13:49] [Rank 0] step:2901/10000 train_time:153343ms step_avg:52.86ms +[2025-09-09 15:13:49] [Rank 0] step:2921/10000 train_time:154164ms step_avg:52.78ms +[2025-09-09 15:13:49] [Rank 0] step:2921/10000 train_time:154164ms step_avg:52.78ms +[2025-09-09 15:13:50] [Rank 0] step:2941/10000 train_time:154984ms step_avg:52.70ms +[2025-09-09 15:13:50] [Rank 0] step:2941/10000 train_time:154984ms step_avg:52.70ms +[2025-09-09 15:13:51] [Rank 0] step:2961/10000 train_time:155806ms step_avg:52.62ms +[2025-09-09 15:13:51] [Rank 0] step:2961/10000 train_time:155806ms step_avg:52.62ms +[2025-09-09 15:13:52] [Rank 0] step:2981/10000 train_time:156626ms step_avg:52.54ms +[2025-09-09 15:13:52] [Rank 0] step:2981/10000 train_time:156626ms step_avg:52.54ms +[2025-09-09 15:13:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:13:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:13:53] [Rank 0] PRINT: step:3000/10000 train_loss:0.6873 val_loss:0.6652 train_time:157449ms step_avg:52.48ms +[2025-09-09 15:13:53] [Rank 0] PRINT: step:3000/10000 train_loss:0.6873 val_loss:0.6652 train_time:157449ms step_avg:52.48ms +[2025-09-09 15:13:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:13:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:13:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:13:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:15:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:15:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:15:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:15:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:15:15] [Rank 0] Total Loss: 4.7327 +[2025-09-09 15:15:15] [Rank 0] Total Loss: 4.7327 +[2025-09-09 15:15:15] [Rank 0] Total FTA (Unweighted): 0.8906 +[2025-09-09 15:15:15] [Rank 0] Total FTA (Unweighted): 0.8906 +[2025-09-09 15:15:15] [Rank 0] Total FTA (Weighted): 0.8906 +[2025-09-09 15:15:15] [Rank 0] Total FTA (Weighted): 0.8906 +[2025-09-09 15:15:15] [Rank 0] Group 0 Loss: 4.6914 +[2025-09-09 15:15:15] [Rank 0] Group 0 Loss: 4.6914 +[2025-09-09 15:15:15] [Rank 0] Group 1 Loss: 4.5080 +[2025-09-09 15:15:15] [Rank 0] Group 1 Loss: 4.5080 +[2025-09-09 15:15:15] [Rank 0] Group 2 Loss: 4.1768 +[2025-09-09 15:15:15] [Rank 0] Group 2 Loss: 4.1768 +[2025-09-09 15:15:15] [Rank 0] Group 3 Loss: 4.6830 +[2025-09-09 15:15:15] [Rank 0] Group 3 Loss: 4.6830 +[2025-09-09 15:15:15] [Rank 0] Group 4 Loss: 4.5829 +[2025-09-09 15:15:15] [Rank 0] Group 4 Loss: 4.5829 +[2025-09-09 15:15:15] [Rank 0] Group 5 Loss: 4.6105 +[2025-09-09 15:15:15] [Rank 0] Group 5 Loss: 4.6105 +[2025-09-09 15:15:15] [Rank 0] Group 6 Loss: 4.5718 +[2025-09-09 15:15:15] [Rank 0] Group 6 Loss: 4.5718 +[2025-09-09 15:15:15] [Rank 0] Group 7 Loss: 4.6699 +[2025-09-09 15:15:15] [Rank 0] Group 7 Loss: 4.6699 +[2025-09-09 15:15:15] [Rank 0] Group 8 Loss: 4.7726 +[2025-09-09 15:15:15] [Rank 0] Group 8 Loss: 4.7726 +[2025-09-09 15:15:15] [Rank 0] Group 9 Loss: 4.7071 +[2025-09-09 15:15:15] [Rank 0] Group 9 Loss: 4.7071 +[2025-09-09 15:15:15] [Rank 0] Group 10 Loss: 4.8588 +[2025-09-09 15:15:15] [Rank 0] Group 10 Loss: 4.8588 +[2025-09-09 15:15:15] [Rank 0] Group 11 Loss: 4.9035 +[2025-09-09 15:15:15] [Rank 0] Group 11 Loss: 4.9035 +[2025-09-09 15:15:15] [Rank 0] Group 12 Loss: 4.8688 +[2025-09-09 15:15:15] [Rank 0] Group 12 Loss: 4.8688 +[2025-09-09 15:15:15] [Rank 0] Group 13 Loss: 5.0417 +[2025-09-09 15:15:15] [Rank 0] Group 13 Loss: 5.0417 +[2025-09-09 15:15:15] [Rank 0] Group 14 Loss: 5.0000 +[2025-09-09 15:15:15] [Rank 0] Group 14 Loss: 5.0000 +[2025-09-09 15:15:15] [Rank 0] Group 15 Loss: 5.0769 +[2025-09-09 15:15:15] [Rank 0] Group 15 Loss: 5.0769 +[2025-09-09 15:15:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 15:15:15] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 15:15:15] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:15:15] [Rank 0] Group 12 FTA: 0.9300 +[2025-09-09 15:15:15] [Rank 0] Group 12 FTA: 0.9300 +[2025-09-09 15:15:15] [Rank 0] Group 13 FTA: 0.7600 +[2025-09-09 15:15:15] [Rank 0] Group 13 FTA: 0.7600 +[2025-09-09 15:15:15] [Rank 0] Group 14 FTA: 0.3900 +[2025-09-09 15:15:15] [Rank 0] Group 14 FTA: 0.3900 +[2025-09-09 15:15:15] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-09 15:15:15] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-09 15:15:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:15:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:15:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:15:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:15:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:15:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:15:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:15:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:15:17] [Rank 0] step:3001/10000 train_time:157466ms step_avg:52.47ms +[2025-09-09 15:15:17] [Rank 0] step:3001/10000 train_time:157466ms step_avg:52.47ms +[2025-09-09 15:15:18] [Rank 0] step:3021/10000 train_time:158287ms step_avg:52.40ms +[2025-09-09 15:15:18] [Rank 0] step:3021/10000 train_time:158287ms step_avg:52.40ms +[2025-09-09 15:15:18] [Rank 0] step:3041/10000 train_time:159108ms step_avg:52.32ms +[2025-09-09 15:15:18] [Rank 0] step:3041/10000 train_time:159108ms step_avg:52.32ms +[2025-09-09 15:15:19] [Rank 0] step:3061/10000 train_time:159940ms step_avg:52.25ms +[2025-09-09 15:15:19] [Rank 0] step:3061/10000 train_time:159940ms step_avg:52.25ms +[2025-09-09 15:15:20] [Rank 0] step:3081/10000 train_time:160761ms step_avg:52.18ms +[2025-09-09 15:15:20] [Rank 0] step:3081/10000 train_time:160761ms step_avg:52.18ms +[2025-09-09 15:15:21] [Rank 0] step:3101/10000 train_time:161581ms step_avg:52.11ms +[2025-09-09 15:15:21] [Rank 0] step:3101/10000 train_time:161581ms step_avg:52.11ms +[2025-09-09 15:15:22] [Rank 0] step:3121/10000 train_time:162789ms step_avg:52.16ms +[2025-09-09 15:15:22] [Rank 0] step:3121/10000 train_time:162789ms step_avg:52.16ms +[2025-09-09 15:15:23] [Rank 0] step:3141/10000 train_time:163670ms step_avg:52.11ms +[2025-09-09 15:15:23] [Rank 0] step:3141/10000 train_time:163670ms step_avg:52.11ms +[2025-09-09 15:15:24] [Rank 0] step:3161/10000 train_time:164491ms step_avg:52.04ms +[2025-09-09 15:15:24] [Rank 0] step:3161/10000 train_time:164491ms step_avg:52.04ms +[2025-09-09 15:15:25] [Rank 0] step:3181/10000 train_time:165313ms step_avg:51.97ms +[2025-09-09 15:15:25] [Rank 0] step:3181/10000 train_time:165313ms step_avg:51.97ms +[2025-09-09 15:15:25] [Rank 0] step:3201/10000 train_time:166134ms step_avg:51.90ms +[2025-09-09 15:15:25] [Rank 0] step:3201/10000 train_time:166134ms step_avg:51.90ms +[2025-09-09 15:15:26] [Rank 0] step:3221/10000 train_time:166956ms step_avg:51.83ms +[2025-09-09 15:15:26] [Rank 0] step:3221/10000 train_time:166956ms step_avg:51.83ms +[2025-09-09 15:15:27] [Rank 0] step:3241/10000 train_time:167777ms step_avg:51.77ms +[2025-09-09 15:15:27] [Rank 0] step:3241/10000 train_time:167777ms step_avg:51.77ms +[2025-09-09 15:15:28] [Rank 0] step:3261/10000 train_time:168598ms step_avg:51.70ms +[2025-09-09 15:15:28] [Rank 0] step:3261/10000 train_time:168598ms step_avg:51.70ms +[2025-09-09 15:15:29] [Rank 0] step:3281/10000 train_time:169419ms step_avg:51.64ms +[2025-09-09 15:15:29] [Rank 0] step:3281/10000 train_time:169419ms step_avg:51.64ms +[2025-09-09 15:15:30] [Rank 0] step:3301/10000 train_time:170241ms step_avg:51.57ms +[2025-09-09 15:15:30] [Rank 0] step:3301/10000 train_time:170241ms step_avg:51.57ms +[2025-09-09 15:15:30] [Rank 0] step:3321/10000 train_time:171063ms step_avg:51.51ms +[2025-09-09 15:15:30] [Rank 0] step:3321/10000 train_time:171063ms step_avg:51.51ms +[2025-09-09 15:15:31] [Rank 0] step:3341/10000 train_time:171884ms step_avg:51.45ms +[2025-09-09 15:15:31] [Rank 0] step:3341/10000 train_time:171884ms step_avg:51.45ms +[2025-09-09 15:15:32] [Rank 0] step:3361/10000 train_time:172705ms step_avg:51.39ms +[2025-09-09 15:15:32] [Rank 0] step:3361/10000 train_time:172705ms step_avg:51.39ms +[2025-09-09 15:15:33] [Rank 0] step:3381/10000 train_time:173527ms step_avg:51.32ms +[2025-09-09 15:15:33] [Rank 0] step:3381/10000 train_time:173527ms step_avg:51.32ms +[2025-09-09 15:15:34] [Rank 0] step:3401/10000 train_time:174348ms step_avg:51.26ms +[2025-09-09 15:15:34] [Rank 0] step:3401/10000 train_time:174348ms step_avg:51.26ms +[2025-09-09 15:15:34] [Rank 0] step:3421/10000 train_time:175169ms step_avg:51.20ms +[2025-09-09 15:15:34] [Rank 0] step:3421/10000 train_time:175169ms step_avg:51.20ms +[2025-09-09 15:15:35] [Rank 0] step:3441/10000 train_time:175993ms step_avg:51.15ms +[2025-09-09 15:15:35] [Rank 0] step:3441/10000 train_time:175993ms step_avg:51.15ms +[2025-09-09 15:15:36] [Rank 0] step:3461/10000 train_time:176814ms step_avg:51.09ms +[2025-09-09 15:15:36] [Rank 0] step:3461/10000 train_time:176814ms step_avg:51.09ms +[2025-09-09 15:15:37] [Rank 0] step:3481/10000 train_time:177636ms step_avg:51.03ms +[2025-09-09 15:15:37] [Rank 0] step:3481/10000 train_time:177636ms step_avg:51.03ms +[2025-09-09 15:15:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:15:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:15:38] [Rank 0] PRINT: step:3500/10000 train_loss:0.6731 val_loss:0.6537 train_time:178461ms step_avg:50.99ms +[2025-09-09 15:15:38] [Rank 0] PRINT: step:3500/10000 train_loss:0.6731 val_loss:0.6537 train_time:178461ms step_avg:50.99ms +[2025-09-09 15:15:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:15:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:15:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:15:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:17:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:17:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:17:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:17:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:17:00] [Rank 0] Total Loss: 4.8433 +[2025-09-09 15:17:00] [Rank 0] Total Loss: 4.8433 +[2025-09-09 15:17:00] [Rank 0] Total FTA (Unweighted): 0.9081 +[2025-09-09 15:17:00] [Rank 0] Total FTA (Unweighted): 0.9081 +[2025-09-09 15:17:00] [Rank 0] Total FTA (Weighted): 0.9081 +[2025-09-09 15:17:00] [Rank 0] Total FTA (Weighted): 0.9081 +[2025-09-09 15:17:00] [Rank 0] Group 0 Loss: 4.8817 +[2025-09-09 15:17:00] [Rank 0] Group 0 Loss: 4.8817 +[2025-09-09 15:17:00] [Rank 0] Group 1 Loss: 4.5846 +[2025-09-09 15:17:00] [Rank 0] Group 1 Loss: 4.5846 +[2025-09-09 15:17:00] [Rank 0] Group 2 Loss: 4.1833 +[2025-09-09 15:17:00] [Rank 0] Group 2 Loss: 4.1833 +[2025-09-09 15:17:00] [Rank 0] Group 3 Loss: 4.7444 +[2025-09-09 15:17:00] [Rank 0] Group 3 Loss: 4.7444 +[2025-09-09 15:17:00] [Rank 0] Group 4 Loss: 4.7329 +[2025-09-09 15:17:00] [Rank 0] Group 4 Loss: 4.7329 +[2025-09-09 15:17:00] [Rank 0] Group 5 Loss: 4.7747 +[2025-09-09 15:17:00] [Rank 0] Group 5 Loss: 4.7747 +[2025-09-09 15:17:00] [Rank 0] Group 6 Loss: 4.6790 +[2025-09-09 15:17:00] [Rank 0] Group 6 Loss: 4.6790 +[2025-09-09 15:17:00] [Rank 0] Group 7 Loss: 4.7911 +[2025-09-09 15:17:00] [Rank 0] Group 7 Loss: 4.7911 +[2025-09-09 15:17:00] [Rank 0] Group 8 Loss: 4.8833 +[2025-09-09 15:17:00] [Rank 0] Group 8 Loss: 4.8833 +[2025-09-09 15:17:00] [Rank 0] Group 9 Loss: 4.8255 +[2025-09-09 15:17:00] [Rank 0] Group 9 Loss: 4.8255 +[2025-09-09 15:17:00] [Rank 0] Group 10 Loss: 4.9865 +[2025-09-09 15:17:00] [Rank 0] Group 10 Loss: 4.9865 +[2025-09-09 15:17:00] [Rank 0] Group 11 Loss: 5.0176 +[2025-09-09 15:17:00] [Rank 0] Group 11 Loss: 5.0176 +[2025-09-09 15:17:00] [Rank 0] Group 12 Loss: 5.0052 +[2025-09-09 15:17:00] [Rank 0] Group 12 Loss: 5.0052 +[2025-09-09 15:17:00] [Rank 0] Group 13 Loss: 5.1208 +[2025-09-09 15:17:00] [Rank 0] Group 13 Loss: 5.1208 +[2025-09-09 15:17:00] [Rank 0] Group 14 Loss: 5.1087 +[2025-09-09 15:17:00] [Rank 0] Group 14 Loss: 5.1087 +[2025-09-09 15:17:00] [Rank 0] Group 15 Loss: 5.1743 +[2025-09-09 15:17:00] [Rank 0] Group 15 Loss: 5.1743 +[2025-09-09 15:17:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:17:00] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 15:17:00] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 15:17:00] [Rank 0] Group 12 FTA: 0.9600 +[2025-09-09 15:17:00] [Rank 0] Group 12 FTA: 0.9600 +[2025-09-09 15:17:00] [Rank 0] Group 13 FTA: 0.8400 +[2025-09-09 15:17:00] [Rank 0] Group 13 FTA: 0.8400 +[2025-09-09 15:17:00] [Rank 0] Group 14 FTA: 0.5100 +[2025-09-09 15:17:00] [Rank 0] Group 14 FTA: 0.5100 +[2025-09-09 15:17:00] [Rank 0] Group 15 FTA: 0.2300 +[2025-09-09 15:17:00] [Rank 0] Group 15 FTA: 0.2300 +[2025-09-09 15:17:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:17:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:17:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:17:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:17:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:17:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:17:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:17:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:17:01] [Rank 0] step:3501/10000 train_time:178477ms step_avg:50.98ms +[2025-09-09 15:17:01] [Rank 0] step:3501/10000 train_time:178477ms step_avg:50.98ms +[2025-09-09 15:17:02] [Rank 0] step:3521/10000 train_time:179297ms step_avg:50.92ms +[2025-09-09 15:17:02] [Rank 0] step:3521/10000 train_time:179297ms step_avg:50.92ms +[2025-09-09 15:17:03] [Rank 0] step:3541/10000 train_time:180118ms step_avg:50.87ms +[2025-09-09 15:17:03] [Rank 0] step:3541/10000 train_time:180118ms step_avg:50.87ms +[2025-09-09 15:17:04] [Rank 0] step:3561/10000 train_time:180941ms step_avg:50.81ms +[2025-09-09 15:17:04] [Rank 0] step:3561/10000 train_time:180941ms step_avg:50.81ms +[2025-09-09 15:17:04] [Rank 0] step:3581/10000 train_time:181762ms step_avg:50.76ms +[2025-09-09 15:17:04] [Rank 0] step:3581/10000 train_time:181762ms step_avg:50.76ms +[2025-09-09 15:17:05] [Rank 0] step:3601/10000 train_time:182583ms step_avg:50.70ms +[2025-09-09 15:17:05] [Rank 0] step:3601/10000 train_time:182583ms step_avg:50.70ms +[2025-09-09 15:17:06] [Rank 0] step:3621/10000 train_time:183404ms step_avg:50.65ms +[2025-09-09 15:17:06] [Rank 0] step:3621/10000 train_time:183404ms step_avg:50.65ms +[2025-09-09 15:17:08] [Rank 0] step:3641/10000 train_time:184912ms step_avg:50.79ms +[2025-09-09 15:17:08] [Rank 0] step:3641/10000 train_time:184912ms step_avg:50.79ms +[2025-09-09 15:17:08] [Rank 0] step:3661/10000 train_time:185732ms step_avg:50.73ms +[2025-09-09 15:17:08] [Rank 0] step:3661/10000 train_time:185732ms step_avg:50.73ms +[2025-09-09 15:17:09] [Rank 0] step:3681/10000 train_time:186553ms step_avg:50.68ms +[2025-09-09 15:17:09] [Rank 0] step:3681/10000 train_time:186553ms step_avg:50.68ms +[2025-09-09 15:17:10] [Rank 0] step:3701/10000 train_time:187375ms step_avg:50.63ms +[2025-09-09 15:17:10] [Rank 0] step:3701/10000 train_time:187375ms step_avg:50.63ms +[2025-09-09 15:17:11] [Rank 0] step:3721/10000 train_time:188196ms step_avg:50.58ms +[2025-09-09 15:17:11] [Rank 0] step:3721/10000 train_time:188196ms step_avg:50.58ms +[2025-09-09 15:17:12] [Rank 0] step:3741/10000 train_time:189017ms step_avg:50.53ms +[2025-09-09 15:17:12] [Rank 0] step:3741/10000 train_time:189017ms step_avg:50.53ms +[2025-09-09 15:17:12] [Rank 0] step:3761/10000 train_time:189838ms step_avg:50.48ms +[2025-09-09 15:17:12] [Rank 0] step:3761/10000 train_time:189838ms step_avg:50.48ms +[2025-09-09 15:17:13] [Rank 0] step:3781/10000 train_time:190660ms step_avg:50.43ms +[2025-09-09 15:17:13] [Rank 0] step:3781/10000 train_time:190660ms step_avg:50.43ms +[2025-09-09 15:17:14] [Rank 0] step:3801/10000 train_time:191482ms step_avg:50.38ms +[2025-09-09 15:17:14] [Rank 0] step:3801/10000 train_time:191482ms step_avg:50.38ms +[2025-09-09 15:17:15] [Rank 0] step:3821/10000 train_time:192302ms step_avg:50.33ms +[2025-09-09 15:17:15] [Rank 0] step:3821/10000 train_time:192302ms step_avg:50.33ms +[2025-09-09 15:17:16] [Rank 0] step:3841/10000 train_time:193124ms step_avg:50.28ms +[2025-09-09 15:17:16] [Rank 0] step:3841/10000 train_time:193124ms step_avg:50.28ms +[2025-09-09 15:17:17] [Rank 0] step:3861/10000 train_time:193945ms step_avg:50.23ms +[2025-09-09 15:17:17] [Rank 0] step:3861/10000 train_time:193945ms step_avg:50.23ms +[2025-09-09 15:17:17] [Rank 0] step:3881/10000 train_time:194767ms step_avg:50.18ms +[2025-09-09 15:17:17] [Rank 0] step:3881/10000 train_time:194767ms step_avg:50.18ms +[2025-09-09 15:17:18] [Rank 0] step:3901/10000 train_time:195587ms step_avg:50.14ms +[2025-09-09 15:17:18] [Rank 0] step:3901/10000 train_time:195587ms step_avg:50.14ms +[2025-09-09 15:17:19] [Rank 0] step:3921/10000 train_time:196408ms step_avg:50.09ms +[2025-09-09 15:17:19] [Rank 0] step:3921/10000 train_time:196408ms step_avg:50.09ms +[2025-09-09 15:17:20] [Rank 0] step:3941/10000 train_time:197228ms step_avg:50.05ms +[2025-09-09 15:17:20] [Rank 0] step:3941/10000 train_time:197228ms step_avg:50.05ms +[2025-09-09 15:17:21] [Rank 0] step:3961/10000 train_time:198049ms step_avg:50.00ms +[2025-09-09 15:17:21] [Rank 0] step:3961/10000 train_time:198049ms step_avg:50.00ms +[2025-09-09 15:17:21] [Rank 0] step:3981/10000 train_time:198870ms step_avg:49.95ms +[2025-09-09 15:17:21] [Rank 0] step:3981/10000 train_time:198870ms step_avg:49.95ms +[2025-09-09 15:17:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:17:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:17:23] [Rank 0] PRINT: step:4000/10000 train_loss:0.6622 val_loss:0.6436 train_time:199692ms step_avg:49.92ms +[2025-09-09 15:17:23] [Rank 0] PRINT: step:4000/10000 train_loss:0.6622 val_loss:0.6436 train_time:199692ms step_avg:49.92ms +[2025-09-09 15:17:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:17:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:17:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:17:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:18:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:18:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:18:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:18:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:18:45] [Rank 0] Total Loss: 4.8529 +[2025-09-09 15:18:45] [Rank 0] Total Loss: 4.8529 +[2025-09-09 15:18:45] [Rank 0] Total FTA (Unweighted): 0.9294 +[2025-09-09 15:18:45] [Rank 0] Total FTA (Unweighted): 0.9294 +[2025-09-09 15:18:45] [Rank 0] Total FTA (Weighted): 0.9294 +[2025-09-09 15:18:45] [Rank 0] Total FTA (Weighted): 0.9294 +[2025-09-09 15:18:45] [Rank 0] Group 0 Loss: 4.7987 +[2025-09-09 15:18:45] [Rank 0] Group 0 Loss: 4.7987 +[2025-09-09 15:18:45] [Rank 0] Group 1 Loss: 4.4989 +[2025-09-09 15:18:45] [Rank 0] Group 1 Loss: 4.4989 +[2025-09-09 15:18:45] [Rank 0] Group 2 Loss: 4.3556 +[2025-09-09 15:18:45] [Rank 0] Group 2 Loss: 4.3556 +[2025-09-09 15:18:45] [Rank 0] Group 3 Loss: 4.7842 +[2025-09-09 15:18:45] [Rank 0] Group 3 Loss: 4.7842 +[2025-09-09 15:18:45] [Rank 0] Group 4 Loss: 4.7280 +[2025-09-09 15:18:45] [Rank 0] Group 4 Loss: 4.7280 +[2025-09-09 15:18:45] [Rank 0] Group 5 Loss: 4.7740 +[2025-09-09 15:18:45] [Rank 0] Group 5 Loss: 4.7740 +[2025-09-09 15:18:45] [Rank 0] Group 6 Loss: 4.7084 +[2025-09-09 15:18:45] [Rank 0] Group 6 Loss: 4.7084 +[2025-09-09 15:18:45] [Rank 0] Group 7 Loss: 4.8002 +[2025-09-09 15:18:45] [Rank 0] Group 7 Loss: 4.8002 +[2025-09-09 15:18:45] [Rank 0] Group 8 Loss: 4.8829 +[2025-09-09 15:18:45] [Rank 0] Group 8 Loss: 4.8829 +[2025-09-09 15:18:45] [Rank 0] Group 9 Loss: 4.8682 +[2025-09-09 15:18:45] [Rank 0] Group 9 Loss: 4.8682 +[2025-09-09 15:18:45] [Rank 0] Group 10 Loss: 5.0236 +[2025-09-09 15:18:45] [Rank 0] Group 10 Loss: 5.0236 +[2025-09-09 15:18:45] [Rank 0] Group 11 Loss: 5.0598 +[2025-09-09 15:18:45] [Rank 0] Group 11 Loss: 5.0598 +[2025-09-09 15:18:45] [Rank 0] Group 12 Loss: 4.9655 +[2025-09-09 15:18:45] [Rank 0] Group 12 Loss: 4.9655 +[2025-09-09 15:18:45] [Rank 0] Group 13 Loss: 5.1433 +[2025-09-09 15:18:45] [Rank 0] Group 13 Loss: 5.1433 +[2025-09-09 15:18:45] [Rank 0] Group 14 Loss: 5.0935 +[2025-09-09 15:18:45] [Rank 0] Group 14 Loss: 5.0935 +[2025-09-09 15:18:45] [Rank 0] Group 15 Loss: 5.1611 +[2025-09-09 15:18:45] [Rank 0] Group 15 Loss: 5.1611 +[2025-09-09 15:18:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:18:45] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 15:18:45] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 15:18:45] [Rank 0] Group 13 FTA: 0.9100 +[2025-09-09 15:18:45] [Rank 0] Group 13 FTA: 0.9100 +[2025-09-09 15:18:45] [Rank 0] Group 14 FTA: 0.6400 +[2025-09-09 15:18:45] [Rank 0] Group 14 FTA: 0.6400 +[2025-09-09 15:18:45] [Rank 0] Group 15 FTA: 0.3400 +[2025-09-09 15:18:45] [Rank 0] Group 15 FTA: 0.3400 +[2025-09-09 15:18:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:18:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:18:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:18:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:18:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:18:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:18:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:18:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:18:46] [Rank 0] step:4001/10000 train_time:199709ms step_avg:49.91ms +[2025-09-09 15:18:46] [Rank 0] step:4001/10000 train_time:199709ms step_avg:49.91ms +[2025-09-09 15:18:48] [Rank 0] step:4021/10000 train_time:201220ms step_avg:50.04ms +[2025-09-09 15:18:48] [Rank 0] step:4021/10000 train_time:201220ms step_avg:50.04ms +[2025-09-09 15:18:49] [Rank 0] step:4041/10000 train_time:202041ms step_avg:50.00ms +[2025-09-09 15:18:49] [Rank 0] step:4041/10000 train_time:202041ms step_avg:50.00ms +[2025-09-09 15:18:50] [Rank 0] step:4061/10000 train_time:202862ms step_avg:49.95ms +[2025-09-09 15:18:50] [Rank 0] step:4061/10000 train_time:202862ms step_avg:49.95ms +[2025-09-09 15:18:50] [Rank 0] step:4081/10000 train_time:203684ms step_avg:49.91ms +[2025-09-09 15:18:50] [Rank 0] step:4081/10000 train_time:203684ms step_avg:49.91ms +[2025-09-09 15:18:51] [Rank 0] step:4101/10000 train_time:204505ms step_avg:49.87ms +[2025-09-09 15:18:51] [Rank 0] step:4101/10000 train_time:204505ms step_avg:49.87ms +[2025-09-09 15:18:52] [Rank 0] step:4121/10000 train_time:205327ms step_avg:49.82ms +[2025-09-09 15:18:52] [Rank 0] step:4121/10000 train_time:205327ms step_avg:49.82ms +[2025-09-09 15:18:53] [Rank 0] step:4141/10000 train_time:206148ms step_avg:49.78ms +[2025-09-09 15:18:53] [Rank 0] step:4141/10000 train_time:206148ms step_avg:49.78ms +[2025-09-09 15:18:54] [Rank 0] step:4161/10000 train_time:206969ms step_avg:49.74ms +[2025-09-09 15:18:54] [Rank 0] step:4161/10000 train_time:206969ms step_avg:49.74ms +[2025-09-09 15:18:54] [Rank 0] step:4181/10000 train_time:207790ms step_avg:49.70ms +[2025-09-09 15:18:54] [Rank 0] step:4181/10000 train_time:207790ms step_avg:49.70ms +[2025-09-09 15:18:55] [Rank 0] step:4201/10000 train_time:208611ms step_avg:49.66ms +[2025-09-09 15:18:55] [Rank 0] step:4201/10000 train_time:208611ms step_avg:49.66ms +[2025-09-09 15:18:56] [Rank 0] step:4221/10000 train_time:209433ms step_avg:49.62ms +[2025-09-09 15:18:56] [Rank 0] step:4221/10000 train_time:209433ms step_avg:49.62ms +[2025-09-09 15:18:57] [Rank 0] step:4241/10000 train_time:210255ms step_avg:49.58ms +[2025-09-09 15:18:57] [Rank 0] step:4241/10000 train_time:210255ms step_avg:49.58ms +[2025-09-09 15:18:58] [Rank 0] step:4261/10000 train_time:211077ms step_avg:49.54ms +[2025-09-09 15:18:58] [Rank 0] step:4261/10000 train_time:211077ms step_avg:49.54ms +[2025-09-09 15:18:59] [Rank 0] step:4281/10000 train_time:211898ms step_avg:49.50ms +[2025-09-09 15:18:59] [Rank 0] step:4281/10000 train_time:211898ms step_avg:49.50ms +[2025-09-09 15:18:59] [Rank 0] step:4301/10000 train_time:212718ms step_avg:49.46ms +[2025-09-09 15:18:59] [Rank 0] step:4301/10000 train_time:212718ms step_avg:49.46ms +[2025-09-09 15:19:00] [Rank 0] step:4321/10000 train_time:213539ms step_avg:49.42ms +[2025-09-09 15:19:00] [Rank 0] step:4321/10000 train_time:213539ms step_avg:49.42ms +[2025-09-09 15:19:01] [Rank 0] step:4341/10000 train_time:214363ms step_avg:49.38ms +[2025-09-09 15:19:01] [Rank 0] step:4341/10000 train_time:214363ms step_avg:49.38ms +[2025-09-09 15:19:02] [Rank 0] step:4361/10000 train_time:215182ms step_avg:49.34ms +[2025-09-09 15:19:02] [Rank 0] step:4361/10000 train_time:215182ms step_avg:49.34ms +[2025-09-09 15:19:03] [Rank 0] step:4381/10000 train_time:216003ms step_avg:49.30ms +[2025-09-09 15:19:03] [Rank 0] step:4381/10000 train_time:216003ms step_avg:49.30ms +[2025-09-09 15:19:04] [Rank 0] step:4401/10000 train_time:216825ms step_avg:49.27ms +[2025-09-09 15:19:04] [Rank 0] step:4401/10000 train_time:216825ms step_avg:49.27ms +[2025-09-09 15:19:04] [Rank 0] step:4421/10000 train_time:217646ms step_avg:49.23ms +[2025-09-09 15:19:04] [Rank 0] step:4421/10000 train_time:217646ms step_avg:49.23ms +[2025-09-09 15:19:05] [Rank 0] step:4441/10000 train_time:218467ms step_avg:49.19ms +[2025-09-09 15:19:05] [Rank 0] step:4441/10000 train_time:218467ms step_avg:49.19ms +[2025-09-09 15:19:06] [Rank 0] step:4461/10000 train_time:219287ms step_avg:49.16ms +[2025-09-09 15:19:06] [Rank 0] step:4461/10000 train_time:219287ms step_avg:49.16ms +[2025-09-09 15:19:07] [Rank 0] step:4481/10000 train_time:220108ms step_avg:49.12ms +[2025-09-09 15:19:07] [Rank 0] step:4481/10000 train_time:220108ms step_avg:49.12ms +[2025-09-09 15:19:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:19:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:19:08] [Rank 0] PRINT: step:4500/10000 train_loss:0.6530 val_loss:0.6359 train_time:220931ms step_avg:49.10ms +[2025-09-09 15:19:08] [Rank 0] PRINT: step:4500/10000 train_loss:0.6530 val_loss:0.6359 train_time:220931ms step_avg:49.10ms +[2025-09-09 15:19:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:19:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:19:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:19:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:20:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:20:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:20:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:20:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:20:30] [Rank 0] Total Loss: 4.8815 +[2025-09-09 15:20:30] [Rank 0] Total Loss: 4.8815 +[2025-09-09 15:20:30] [Rank 0] Total FTA (Unweighted): 0.9406 +[2025-09-09 15:20:30] [Rank 0] Total FTA (Unweighted): 0.9406 +[2025-09-09 15:20:30] [Rank 0] Total FTA (Weighted): 0.9406 +[2025-09-09 15:20:30] [Rank 0] Total FTA (Weighted): 0.9406 +[2025-09-09 15:20:30] [Rank 0] Group 0 Loss: 4.9754 +[2025-09-09 15:20:30] [Rank 0] Group 0 Loss: 4.9754 +[2025-09-09 15:20:30] [Rank 0] Group 1 Loss: 4.5486 +[2025-09-09 15:20:30] [Rank 0] Group 1 Loss: 4.5486 +[2025-09-09 15:20:30] [Rank 0] Group 2 Loss: 4.3157 +[2025-09-09 15:20:30] [Rank 0] Group 2 Loss: 4.3157 +[2025-09-09 15:20:30] [Rank 0] Group 3 Loss: 4.8126 +[2025-09-09 15:20:30] [Rank 0] Group 3 Loss: 4.8126 +[2025-09-09 15:20:30] [Rank 0] Group 4 Loss: 4.7197 +[2025-09-09 15:20:30] [Rank 0] Group 4 Loss: 4.7197 +[2025-09-09 15:20:30] [Rank 0] Group 5 Loss: 4.7964 +[2025-09-09 15:20:30] [Rank 0] Group 5 Loss: 4.7964 +[2025-09-09 15:20:30] [Rank 0] Group 6 Loss: 4.7248 +[2025-09-09 15:20:30] [Rank 0] Group 6 Loss: 4.7248 +[2025-09-09 15:20:30] [Rank 0] Group 7 Loss: 4.8517 +[2025-09-09 15:20:30] [Rank 0] Group 7 Loss: 4.8517 +[2025-09-09 15:20:30] [Rank 0] Group 8 Loss: 4.9241 +[2025-09-09 15:20:30] [Rank 0] Group 8 Loss: 4.9241 +[2025-09-09 15:20:30] [Rank 0] Group 9 Loss: 4.8880 +[2025-09-09 15:20:30] [Rank 0] Group 9 Loss: 4.8880 +[2025-09-09 15:20:30] [Rank 0] Group 10 Loss: 5.0143 +[2025-09-09 15:20:30] [Rank 0] Group 10 Loss: 5.0143 +[2025-09-09 15:20:30] [Rank 0] Group 11 Loss: 5.0922 +[2025-09-09 15:20:30] [Rank 0] Group 11 Loss: 5.0922 +[2025-09-09 15:20:30] [Rank 0] Group 12 Loss: 5.0102 +[2025-09-09 15:20:30] [Rank 0] Group 12 Loss: 5.0102 +[2025-09-09 15:20:30] [Rank 0] Group 13 Loss: 5.1616 +[2025-09-09 15:20:30] [Rank 0] Group 13 Loss: 5.1616 +[2025-09-09 15:20:30] [Rank 0] Group 14 Loss: 5.1220 +[2025-09-09 15:20:30] [Rank 0] Group 14 Loss: 5.1220 +[2025-09-09 15:20:30] [Rank 0] Group 15 Loss: 5.1475 +[2025-09-09 15:20:30] [Rank 0] Group 15 Loss: 5.1475 +[2025-09-09 15:20:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:20:30] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 15:20:30] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 15:20:30] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 15:20:30] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 15:20:30] [Rank 0] Group 13 FTA: 0.9400 +[2025-09-09 15:20:30] [Rank 0] Group 13 FTA: 0.9400 +[2025-09-09 15:20:30] [Rank 0] Group 14 FTA: 0.7100 +[2025-09-09 15:20:30] [Rank 0] Group 14 FTA: 0.7100 +[2025-09-09 15:20:30] [Rank 0] Group 15 FTA: 0.4200 +[2025-09-09 15:20:30] [Rank 0] Group 15 FTA: 0.4200 +[2025-09-09 15:20:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:20:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:20:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:20:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:20:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:20:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:20:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:20:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:20:32] [Rank 0] step:4501/10000 train_time:220947ms step_avg:49.09ms +[2025-09-09 15:20:32] [Rank 0] step:4501/10000 train_time:220947ms step_avg:49.09ms +[2025-09-09 15:20:33] [Rank 0] step:4521/10000 train_time:221780ms step_avg:49.06ms +[2025-09-09 15:20:33] [Rank 0] step:4521/10000 train_time:221780ms step_avg:49.06ms +[2025-09-09 15:20:33] [Rank 0] step:4541/10000 train_time:222601ms step_avg:49.02ms +[2025-09-09 15:20:33] [Rank 0] step:4541/10000 train_time:222601ms step_avg:49.02ms +[2025-09-09 15:20:34] [Rank 0] step:4561/10000 train_time:223422ms step_avg:48.99ms +[2025-09-09 15:20:34] [Rank 0] step:4561/10000 train_time:223422ms step_avg:48.99ms +[2025-09-09 15:20:35] [Rank 0] step:4581/10000 train_time:224243ms step_avg:48.95ms +[2025-09-09 15:20:35] [Rank 0] step:4581/10000 train_time:224243ms step_avg:48.95ms +[2025-09-09 15:20:36] [Rank 0] step:4601/10000 train_time:225064ms step_avg:48.92ms +[2025-09-09 15:20:36] [Rank 0] step:4601/10000 train_time:225064ms step_avg:48.92ms +[2025-09-09 15:20:37] [Rank 0] step:4621/10000 train_time:225885ms step_avg:48.88ms +[2025-09-09 15:20:37] [Rank 0] step:4621/10000 train_time:225885ms step_avg:48.88ms +[2025-09-09 15:20:38] [Rank 0] step:4641/10000 train_time:226858ms step_avg:48.88ms +[2025-09-09 15:20:38] [Rank 0] step:4641/10000 train_time:226858ms step_avg:48.88ms +[2025-09-09 15:20:39] [Rank 0] step:4661/10000 train_time:228005ms step_avg:48.92ms +[2025-09-09 15:20:39] [Rank 0] step:4661/10000 train_time:228005ms step_avg:48.92ms +[2025-09-09 15:20:40] [Rank 0] step:4681/10000 train_time:228826ms step_avg:48.88ms +[2025-09-09 15:20:40] [Rank 0] step:4681/10000 train_time:228826ms step_avg:48.88ms +[2025-09-09 15:20:40] [Rank 0] step:4701/10000 train_time:229647ms step_avg:48.85ms +[2025-09-09 15:20:40] [Rank 0] step:4701/10000 train_time:229647ms step_avg:48.85ms +[2025-09-09 15:20:41] [Rank 0] step:4721/10000 train_time:230470ms step_avg:48.82ms +[2025-09-09 15:20:41] [Rank 0] step:4721/10000 train_time:230470ms step_avg:48.82ms +[2025-09-09 15:20:42] [Rank 0] step:4741/10000 train_time:231289ms step_avg:48.78ms +[2025-09-09 15:20:42] [Rank 0] step:4741/10000 train_time:231289ms step_avg:48.78ms +[2025-09-09 15:20:43] [Rank 0] step:4761/10000 train_time:232110ms step_avg:48.75ms +[2025-09-09 15:20:43] [Rank 0] step:4761/10000 train_time:232110ms step_avg:48.75ms +[2025-09-09 15:20:44] [Rank 0] step:4781/10000 train_time:232931ms step_avg:48.72ms +[2025-09-09 15:20:44] [Rank 0] step:4781/10000 train_time:232931ms step_avg:48.72ms +[2025-09-09 15:20:44] [Rank 0] step:4801/10000 train_time:233751ms step_avg:48.69ms +[2025-09-09 15:20:44] [Rank 0] step:4801/10000 train_time:233751ms step_avg:48.69ms +[2025-09-09 15:20:45] [Rank 0] step:4821/10000 train_time:234571ms step_avg:48.66ms +[2025-09-09 15:20:45] [Rank 0] step:4821/10000 train_time:234571ms step_avg:48.66ms +[2025-09-09 15:20:46] [Rank 0] step:4841/10000 train_time:235704ms step_avg:48.69ms +[2025-09-09 15:20:46] [Rank 0] step:4841/10000 train_time:235704ms step_avg:48.69ms +[2025-09-09 15:20:47] [Rank 0] step:4861/10000 train_time:236526ms step_avg:48.66ms +[2025-09-09 15:20:47] [Rank 0] step:4861/10000 train_time:236526ms step_avg:48.66ms +[2025-09-09 15:20:48] [Rank 0] step:4881/10000 train_time:237347ms step_avg:48.63ms +[2025-09-09 15:20:48] [Rank 0] step:4881/10000 train_time:237347ms step_avg:48.63ms +[2025-09-09 15:20:49] [Rank 0] step:4901/10000 train_time:238167ms step_avg:48.60ms +[2025-09-09 15:20:49] [Rank 0] step:4901/10000 train_time:238167ms step_avg:48.60ms +[2025-09-09 15:20:50] [Rank 0] step:4921/10000 train_time:238987ms step_avg:48.56ms +[2025-09-09 15:20:50] [Rank 0] step:4921/10000 train_time:238987ms step_avg:48.56ms +[2025-09-09 15:20:51] [Rank 0] step:4941/10000 train_time:239807ms step_avg:48.53ms +[2025-09-09 15:20:51] [Rank 0] step:4941/10000 train_time:239807ms step_avg:48.53ms +[2025-09-09 15:20:51] [Rank 0] step:4961/10000 train_time:240628ms step_avg:48.50ms +[2025-09-09 15:20:51] [Rank 0] step:4961/10000 train_time:240628ms step_avg:48.50ms +[2025-09-09 15:20:52] [Rank 0] step:4981/10000 train_time:241449ms step_avg:48.47ms +[2025-09-09 15:20:52] [Rank 0] step:4981/10000 train_time:241449ms step_avg:48.47ms +[2025-09-09 15:20:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:20:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:20:53] [Rank 0] PRINT: step:5000/10000 train_loss:0.6445 val_loss:0.6292 train_time:242272ms step_avg:48.45ms +[2025-09-09 15:20:53] [Rank 0] PRINT: step:5000/10000 train_loss:0.6445 val_loss:0.6292 train_time:242272ms step_avg:48.45ms +[2025-09-09 15:20:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:20:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:20:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:20:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:22:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:22:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:22:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:22:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:22:15] [Rank 0] Total Loss: 4.9003 +[2025-09-09 15:22:15] [Rank 0] Total Loss: 4.9003 +[2025-09-09 15:22:15] [Rank 0] Total FTA (Unweighted): 0.9556 +[2025-09-09 15:22:15] [Rank 0] Total FTA (Unweighted): 0.9556 +[2025-09-09 15:22:15] [Rank 0] Total FTA (Weighted): 0.9556 +[2025-09-09 15:22:15] [Rank 0] Total FTA (Weighted): 0.9556 +[2025-09-09 15:22:15] [Rank 0] Group 0 Loss: 4.9391 +[2025-09-09 15:22:15] [Rank 0] Group 0 Loss: 4.9391 +[2025-09-09 15:22:15] [Rank 0] Group 1 Loss: 4.6261 +[2025-09-09 15:22:15] [Rank 0] Group 1 Loss: 4.6261 +[2025-09-09 15:22:15] [Rank 0] Group 2 Loss: 4.3369 +[2025-09-09 15:22:15] [Rank 0] Group 2 Loss: 4.3369 +[2025-09-09 15:22:15] [Rank 0] Group 3 Loss: 4.7882 +[2025-09-09 15:22:15] [Rank 0] Group 3 Loss: 4.7882 +[2025-09-09 15:22:15] [Rank 0] Group 4 Loss: 4.7383 +[2025-09-09 15:22:15] [Rank 0] Group 4 Loss: 4.7383 +[2025-09-09 15:22:15] [Rank 0] Group 5 Loss: 4.8047 +[2025-09-09 15:22:15] [Rank 0] Group 5 Loss: 4.8047 +[2025-09-09 15:22:15] [Rank 0] Group 6 Loss: 4.7285 +[2025-09-09 15:22:15] [Rank 0] Group 6 Loss: 4.7285 +[2025-09-09 15:22:15] [Rank 0] Group 7 Loss: 4.8797 +[2025-09-09 15:22:15] [Rank 0] Group 7 Loss: 4.8797 +[2025-09-09 15:22:15] [Rank 0] Group 8 Loss: 4.9548 +[2025-09-09 15:22:15] [Rank 0] Group 8 Loss: 4.9548 +[2025-09-09 15:22:15] [Rank 0] Group 9 Loss: 4.9244 +[2025-09-09 15:22:15] [Rank 0] Group 9 Loss: 4.9244 +[2025-09-09 15:22:15] [Rank 0] Group 10 Loss: 5.0304 +[2025-09-09 15:22:15] [Rank 0] Group 10 Loss: 5.0304 +[2025-09-09 15:22:15] [Rank 0] Group 11 Loss: 5.0911 +[2025-09-09 15:22:15] [Rank 0] Group 11 Loss: 5.0911 +[2025-09-09 15:22:15] [Rank 0] Group 12 Loss: 5.0507 +[2025-09-09 15:22:15] [Rank 0] Group 12 Loss: 5.0507 +[2025-09-09 15:22:15] [Rank 0] Group 13 Loss: 5.2015 +[2025-09-09 15:22:15] [Rank 0] Group 13 Loss: 5.2015 +[2025-09-09 15:22:15] [Rank 0] Group 14 Loss: 5.1520 +[2025-09-09 15:22:15] [Rank 0] Group 14 Loss: 5.1520 +[2025-09-09 15:22:15] [Rank 0] Group 15 Loss: 5.1583 +[2025-09-09 15:22:15] [Rank 0] Group 15 Loss: 5.1583 +[2025-09-09 15:22:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 15:22:15] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 15:22:15] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:22:15] [Rank 0] Group 14 FTA: 0.8400 +[2025-09-09 15:22:15] [Rank 0] Group 14 FTA: 0.8400 +[2025-09-09 15:22:15] [Rank 0] Group 15 FTA: 0.4600 +[2025-09-09 15:22:15] [Rank 0] Group 15 FTA: 0.4600 +[2025-09-09 15:22:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:22:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:22:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:22:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:22:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:22:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:22:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:22:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:22:17] [Rank 0] step:5001/10000 train_time:242288ms step_avg:48.45ms +[2025-09-09 15:22:17] [Rank 0] step:5001/10000 train_time:242288ms step_avg:48.45ms +[2025-09-09 15:22:18] [Rank 0] step:5021/10000 train_time:243129ms step_avg:48.42ms +[2025-09-09 15:22:18] [Rank 0] step:5021/10000 train_time:243129ms step_avg:48.42ms +[2025-09-09 15:22:18] [Rank 0] step:5041/10000 train_time:243950ms step_avg:48.39ms +[2025-09-09 15:22:18] [Rank 0] step:5041/10000 train_time:243950ms step_avg:48.39ms +[2025-09-09 15:22:19] [Rank 0] step:5061/10000 train_time:244771ms step_avg:48.36ms +[2025-09-09 15:22:19] [Rank 0] step:5061/10000 train_time:244771ms step_avg:48.36ms +[2025-09-09 15:22:20] [Rank 0] step:5081/10000 train_time:245592ms step_avg:48.34ms +[2025-09-09 15:22:20] [Rank 0] step:5081/10000 train_time:245592ms step_avg:48.34ms +[2025-09-09 15:22:21] [Rank 0] step:5101/10000 train_time:246515ms step_avg:48.33ms +[2025-09-09 15:22:21] [Rank 0] step:5101/10000 train_time:246515ms step_avg:48.33ms +[2025-09-09 15:22:22] [Rank 0] step:5121/10000 train_time:247338ms step_avg:48.30ms +[2025-09-09 15:22:22] [Rank 0] step:5121/10000 train_time:247338ms step_avg:48.30ms +[2025-09-09 15:22:23] [Rank 0] step:5141/10000 train_time:248159ms step_avg:48.27ms +[2025-09-09 15:22:23] [Rank 0] step:5141/10000 train_time:248159ms step_avg:48.27ms +[2025-09-09 15:22:23] [Rank 0] step:5161/10000 train_time:248980ms step_avg:48.24ms +[2025-09-09 15:22:23] [Rank 0] step:5161/10000 train_time:248980ms step_avg:48.24ms +[2025-09-09 15:22:24] [Rank 0] step:5181/10000 train_time:249802ms step_avg:48.21ms +[2025-09-09 15:22:24] [Rank 0] step:5181/10000 train_time:249802ms step_avg:48.21ms +[2025-09-09 15:22:25] [Rank 0] step:5201/10000 train_time:250623ms step_avg:48.19ms +[2025-09-09 15:22:25] [Rank 0] step:5201/10000 train_time:250623ms step_avg:48.19ms +[2025-09-09 15:22:26] [Rank 0] step:5221/10000 train_time:251445ms step_avg:48.16ms +[2025-09-09 15:22:26] [Rank 0] step:5221/10000 train_time:251445ms step_avg:48.16ms +[2025-09-09 15:22:27] [Rank 0] step:5241/10000 train_time:252265ms step_avg:48.13ms +[2025-09-09 15:22:27] [Rank 0] step:5241/10000 train_time:252265ms step_avg:48.13ms +[2025-09-09 15:22:28] [Rank 0] step:5261/10000 train_time:253086ms step_avg:48.11ms +[2025-09-09 15:22:28] [Rank 0] step:5261/10000 train_time:253086ms step_avg:48.11ms +[2025-09-09 15:22:28] [Rank 0] step:5281/10000 train_time:253908ms step_avg:48.08ms +[2025-09-09 15:22:28] [Rank 0] step:5281/10000 train_time:253908ms step_avg:48.08ms +[2025-09-09 15:22:29] [Rank 0] step:5301/10000 train_time:254730ms step_avg:48.05ms +[2025-09-09 15:22:29] [Rank 0] step:5301/10000 train_time:254730ms step_avg:48.05ms +[2025-09-09 15:22:30] [Rank 0] step:5321/10000 train_time:255550ms step_avg:48.03ms +[2025-09-09 15:22:30] [Rank 0] step:5321/10000 train_time:255550ms step_avg:48.03ms +[2025-09-09 15:22:31] [Rank 0] step:5341/10000 train_time:256375ms step_avg:48.00ms +[2025-09-09 15:22:31] [Rank 0] step:5341/10000 train_time:256375ms step_avg:48.00ms +[2025-09-09 15:22:32] [Rank 0] step:5361/10000 train_time:257192ms step_avg:47.97ms +[2025-09-09 15:22:32] [Rank 0] step:5361/10000 train_time:257192ms step_avg:47.97ms +[2025-09-09 15:22:33] [Rank 0] step:5381/10000 train_time:258014ms step_avg:47.95ms +[2025-09-09 15:22:33] [Rank 0] step:5381/10000 train_time:258014ms step_avg:47.95ms +[2025-09-09 15:22:33] [Rank 0] step:5401/10000 train_time:258834ms step_avg:47.92ms +[2025-09-09 15:22:33] [Rank 0] step:5401/10000 train_time:258834ms step_avg:47.92ms +[2025-09-09 15:22:34] [Rank 0] step:5421/10000 train_time:259656ms step_avg:47.90ms +[2025-09-09 15:22:34] [Rank 0] step:5421/10000 train_time:259656ms step_avg:47.90ms +[2025-09-09 15:22:35] [Rank 0] step:5441/10000 train_time:260477ms step_avg:47.87ms +[2025-09-09 15:22:35] [Rank 0] step:5441/10000 train_time:260477ms step_avg:47.87ms +[2025-09-09 15:22:36] [Rank 0] step:5461/10000 train_time:261298ms step_avg:47.85ms +[2025-09-09 15:22:36] [Rank 0] step:5461/10000 train_time:261298ms step_avg:47.85ms +[2025-09-09 15:22:37] [Rank 0] step:5481/10000 train_time:262119ms step_avg:47.82ms +[2025-09-09 15:22:37] [Rank 0] step:5481/10000 train_time:262119ms step_avg:47.82ms +[2025-09-09 15:22:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:22:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:22:38] [Rank 0] PRINT: step:5500/10000 train_loss:0.6373 val_loss:0.6236 train_time:262943ms step_avg:47.81ms +[2025-09-09 15:22:38] [Rank 0] PRINT: step:5500/10000 train_loss:0.6373 val_loss:0.6236 train_time:262943ms step_avg:47.81ms +[2025-09-09 15:22:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:22:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:22:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:22:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:23:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:23:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:23:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:23:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:23:59] [Rank 0] Total Loss: 4.8730 +[2025-09-09 15:23:59] [Rank 0] Total Loss: 4.8730 +[2025-09-09 15:23:59] [Rank 0] Total FTA (Unweighted): 0.9575 +[2025-09-09 15:23:59] [Rank 0] Total FTA (Unweighted): 0.9575 +[2025-09-09 15:23:59] [Rank 0] Total FTA (Weighted): 0.9575 +[2025-09-09 15:23:59] [Rank 0] Total FTA (Weighted): 0.9575 +[2025-09-09 15:23:59] [Rank 0] Group 0 Loss: 4.9442 +[2025-09-09 15:23:59] [Rank 0] Group 0 Loss: 4.9442 +[2025-09-09 15:23:59] [Rank 0] Group 1 Loss: 4.5320 +[2025-09-09 15:23:59] [Rank 0] Group 1 Loss: 4.5320 +[2025-09-09 15:23:59] [Rank 0] Group 2 Loss: 4.3449 +[2025-09-09 15:23:59] [Rank 0] Group 2 Loss: 4.3449 +[2025-09-09 15:23:59] [Rank 0] Group 3 Loss: 4.7741 +[2025-09-09 15:23:59] [Rank 0] Group 3 Loss: 4.7741 +[2025-09-09 15:23:59] [Rank 0] Group 4 Loss: 4.7353 +[2025-09-09 15:23:59] [Rank 0] Group 4 Loss: 4.7353 +[2025-09-09 15:23:59] [Rank 0] Group 5 Loss: 4.8212 +[2025-09-09 15:23:59] [Rank 0] Group 5 Loss: 4.8212 +[2025-09-09 15:23:59] [Rank 0] Group 6 Loss: 4.6985 +[2025-09-09 15:23:59] [Rank 0] Group 6 Loss: 4.6985 +[2025-09-09 15:23:59] [Rank 0] Group 7 Loss: 4.8572 +[2025-09-09 15:23:59] [Rank 0] Group 7 Loss: 4.8572 +[2025-09-09 15:23:59] [Rank 0] Group 8 Loss: 4.9223 +[2025-09-09 15:23:59] [Rank 0] Group 8 Loss: 4.9223 +[2025-09-09 15:23:59] [Rank 0] Group 9 Loss: 4.8893 +[2025-09-09 15:23:59] [Rank 0] Group 9 Loss: 4.8893 +[2025-09-09 15:23:59] [Rank 0] Group 10 Loss: 5.0178 +[2025-09-09 15:23:59] [Rank 0] Group 10 Loss: 5.0178 +[2025-09-09 15:23:59] [Rank 0] Group 11 Loss: 5.0686 +[2025-09-09 15:23:59] [Rank 0] Group 11 Loss: 5.0686 +[2025-09-09 15:23:59] [Rank 0] Group 12 Loss: 5.0366 +[2025-09-09 15:23:59] [Rank 0] Group 12 Loss: 5.0366 +[2025-09-09 15:23:59] [Rank 0] Group 13 Loss: 5.1210 +[2025-09-09 15:23:59] [Rank 0] Group 13 Loss: 5.1210 +[2025-09-09 15:23:59] [Rank 0] Group 14 Loss: 5.0891 +[2025-09-09 15:23:59] [Rank 0] Group 14 Loss: 5.0891 +[2025-09-09 15:23:59] [Rank 0] Group 15 Loss: 5.1158 +[2025-09-09 15:23:59] [Rank 0] Group 15 Loss: 5.1158 +[2025-09-09 15:23:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 15:23:59] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 15:23:59] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:23:59] [Rank 0] Group 13 FTA: 0.9700 +[2025-09-09 15:23:59] [Rank 0] Group 13 FTA: 0.9700 +[2025-09-09 15:23:59] [Rank 0] Group 14 FTA: 0.8300 +[2025-09-09 15:23:59] [Rank 0] Group 14 FTA: 0.8300 +[2025-09-09 15:23:59] [Rank 0] Group 15 FTA: 0.5300 +[2025-09-09 15:23:59] [Rank 0] Group 15 FTA: 0.5300 +[2025-09-09 15:24:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:24:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:24:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:24:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:24:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:24:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:24:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:24:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:24:01] [Rank 0] step:5501/10000 train_time:262958ms step_avg:47.80ms +[2025-09-09 15:24:01] [Rank 0] step:5501/10000 train_time:262958ms step_avg:47.80ms +[2025-09-09 15:24:02] [Rank 0] step:5521/10000 train_time:263798ms step_avg:47.78ms +[2025-09-09 15:24:02] [Rank 0] step:5521/10000 train_time:263798ms step_avg:47.78ms +[2025-09-09 15:24:02] [Rank 0] step:5541/10000 train_time:264618ms step_avg:47.76ms +[2025-09-09 15:24:02] [Rank 0] step:5541/10000 train_time:264618ms step_avg:47.76ms +[2025-09-09 15:24:03] [Rank 0] step:5561/10000 train_time:265438ms step_avg:47.73ms +[2025-09-09 15:24:03] [Rank 0] step:5561/10000 train_time:265438ms step_avg:47.73ms +[2025-09-09 15:24:04] [Rank 0] step:5581/10000 train_time:266257ms step_avg:47.71ms +[2025-09-09 15:24:04] [Rank 0] step:5581/10000 train_time:266257ms step_avg:47.71ms +[2025-09-09 15:24:05] [Rank 0] step:5601/10000 train_time:267077ms step_avg:47.68ms +[2025-09-09 15:24:05] [Rank 0] step:5601/10000 train_time:267077ms step_avg:47.68ms +[2025-09-09 15:24:06] [Rank 0] step:5621/10000 train_time:267897ms step_avg:47.66ms +[2025-09-09 15:24:06] [Rank 0] step:5621/10000 train_time:267897ms step_avg:47.66ms +[2025-09-09 15:24:07] [Rank 0] step:5641/10000 train_time:269394ms step_avg:47.76ms +[2025-09-09 15:24:07] [Rank 0] step:5641/10000 train_time:269394ms step_avg:47.76ms +[2025-09-09 15:24:08] [Rank 0] step:5661/10000 train_time:270214ms step_avg:47.73ms +[2025-09-09 15:24:08] [Rank 0] step:5661/10000 train_time:270214ms step_avg:47.73ms +[2025-09-09 15:24:09] [Rank 0] step:5681/10000 train_time:271034ms step_avg:47.71ms +[2025-09-09 15:24:09] [Rank 0] step:5681/10000 train_time:271034ms step_avg:47.71ms +[2025-09-09 15:24:10] [Rank 0] step:5701/10000 train_time:271853ms step_avg:47.69ms +[2025-09-09 15:24:10] [Rank 0] step:5701/10000 train_time:271853ms step_avg:47.69ms +[2025-09-09 15:24:10] [Rank 0] step:5721/10000 train_time:272673ms step_avg:47.66ms +[2025-09-09 15:24:10] [Rank 0] step:5721/10000 train_time:272673ms step_avg:47.66ms +[2025-09-09 15:24:11] [Rank 0] step:5741/10000 train_time:273492ms step_avg:47.64ms +[2025-09-09 15:24:11] [Rank 0] step:5741/10000 train_time:273492ms step_avg:47.64ms +[2025-09-09 15:24:12] [Rank 0] step:5761/10000 train_time:274314ms step_avg:47.62ms +[2025-09-09 15:24:12] [Rank 0] step:5761/10000 train_time:274314ms step_avg:47.62ms +[2025-09-09 15:24:13] [Rank 0] step:5781/10000 train_time:275135ms step_avg:47.59ms +[2025-09-09 15:24:13] [Rank 0] step:5781/10000 train_time:275135ms step_avg:47.59ms +[2025-09-09 15:24:14] [Rank 0] step:5801/10000 train_time:275955ms step_avg:47.57ms +[2025-09-09 15:24:14] [Rank 0] step:5801/10000 train_time:275955ms step_avg:47.57ms +[2025-09-09 15:24:15] [Rank 0] step:5821/10000 train_time:276776ms step_avg:47.55ms +[2025-09-09 15:24:15] [Rank 0] step:5821/10000 train_time:276776ms step_avg:47.55ms +[2025-09-09 15:24:15] [Rank 0] step:5841/10000 train_time:277598ms step_avg:47.53ms +[2025-09-09 15:24:15] [Rank 0] step:5841/10000 train_time:277598ms step_avg:47.53ms +[2025-09-09 15:24:16] [Rank 0] step:5861/10000 train_time:278418ms step_avg:47.50ms +[2025-09-09 15:24:16] [Rank 0] step:5861/10000 train_time:278418ms step_avg:47.50ms +[2025-09-09 15:24:17] [Rank 0] step:5881/10000 train_time:279240ms step_avg:47.48ms +[2025-09-09 15:24:17] [Rank 0] step:5881/10000 train_time:279240ms step_avg:47.48ms +[2025-09-09 15:24:18] [Rank 0] step:5901/10000 train_time:280061ms step_avg:47.46ms +[2025-09-09 15:24:18] [Rank 0] step:5901/10000 train_time:280061ms step_avg:47.46ms +[2025-09-09 15:24:19] [Rank 0] step:5921/10000 train_time:280883ms step_avg:47.44ms +[2025-09-09 15:24:19] [Rank 0] step:5921/10000 train_time:280883ms step_avg:47.44ms +[2025-09-09 15:24:19] [Rank 0] step:5941/10000 train_time:281705ms step_avg:47.42ms +[2025-09-09 15:24:19] [Rank 0] step:5941/10000 train_time:281705ms step_avg:47.42ms +[2025-09-09 15:24:20] [Rank 0] step:5961/10000 train_time:282527ms step_avg:47.40ms +[2025-09-09 15:24:20] [Rank 0] step:5961/10000 train_time:282527ms step_avg:47.40ms +[2025-09-09 15:24:21] [Rank 0] step:5981/10000 train_time:283349ms step_avg:47.37ms +[2025-09-09 15:24:21] [Rank 0] step:5981/10000 train_time:283349ms step_avg:47.37ms +[2025-09-09 15:24:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:24:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:24:22] [Rank 0] PRINT: step:6000/10000 train_loss:0.6309 val_loss:0.6190 train_time:284172ms step_avg:47.36ms +[2025-09-09 15:24:22] [Rank 0] PRINT: step:6000/10000 train_loss:0.6309 val_loss:0.6190 train_time:284172ms step_avg:47.36ms +[2025-09-09 15:24:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:24:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:24:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:24:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:25:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:25:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:25:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:25:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:25:44] [Rank 0] Total Loss: 4.9333 +[2025-09-09 15:25:44] [Rank 0] Total Loss: 4.9333 +[2025-09-09 15:25:44] [Rank 0] Total FTA (Unweighted): 0.9681 +[2025-09-09 15:25:44] [Rank 0] Total FTA (Unweighted): 0.9681 +[2025-09-09 15:25:44] [Rank 0] Total FTA (Weighted): 0.9681 +[2025-09-09 15:25:44] [Rank 0] Total FTA (Weighted): 0.9681 +[2025-09-09 15:25:44] [Rank 0] Group 0 Loss: 4.8886 +[2025-09-09 15:25:44] [Rank 0] Group 0 Loss: 4.8886 +[2025-09-09 15:25:44] [Rank 0] Group 1 Loss: 4.6339 +[2025-09-09 15:25:44] [Rank 0] Group 1 Loss: 4.6339 +[2025-09-09 15:25:44] [Rank 0] Group 2 Loss: 4.3832 +[2025-09-09 15:25:44] [Rank 0] Group 2 Loss: 4.3832 +[2025-09-09 15:25:44] [Rank 0] Group 3 Loss: 4.8030 +[2025-09-09 15:25:44] [Rank 0] Group 3 Loss: 4.8030 +[2025-09-09 15:25:44] [Rank 0] Group 4 Loss: 4.7703 +[2025-09-09 15:25:44] [Rank 0] Group 4 Loss: 4.7703 +[2025-09-09 15:25:44] [Rank 0] Group 5 Loss: 4.8526 +[2025-09-09 15:25:44] [Rank 0] Group 5 Loss: 4.8526 +[2025-09-09 15:25:44] [Rank 0] Group 6 Loss: 4.8073 +[2025-09-09 15:25:44] [Rank 0] Group 6 Loss: 4.8073 +[2025-09-09 15:25:44] [Rank 0] Group 7 Loss: 4.9215 +[2025-09-09 15:25:44] [Rank 0] Group 7 Loss: 4.9215 +[2025-09-09 15:25:44] [Rank 0] Group 8 Loss: 4.9848 +[2025-09-09 15:25:44] [Rank 0] Group 8 Loss: 4.9848 +[2025-09-09 15:25:44] [Rank 0] Group 9 Loss: 4.9520 +[2025-09-09 15:25:44] [Rank 0] Group 9 Loss: 4.9520 +[2025-09-09 15:25:44] [Rank 0] Group 10 Loss: 5.1087 +[2025-09-09 15:25:44] [Rank 0] Group 10 Loss: 5.1087 +[2025-09-09 15:25:44] [Rank 0] Group 11 Loss: 5.1722 +[2025-09-09 15:25:44] [Rank 0] Group 11 Loss: 5.1722 +[2025-09-09 15:25:44] [Rank 0] Group 12 Loss: 5.0898 +[2025-09-09 15:25:44] [Rank 0] Group 12 Loss: 5.0898 +[2025-09-09 15:25:44] [Rank 0] Group 13 Loss: 5.2196 +[2025-09-09 15:25:44] [Rank 0] Group 13 Loss: 5.2196 +[2025-09-09 15:25:44] [Rank 0] Group 14 Loss: 5.1761 +[2025-09-09 15:25:44] [Rank 0] Group 14 Loss: 5.1761 +[2025-09-09 15:25:44] [Rank 0] Group 15 Loss: 5.1697 +[2025-09-09 15:25:44] [Rank 0] Group 15 Loss: 5.1697 +[2025-09-09 15:25:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 15:25:44] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 15:25:44] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:25:44] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 15:25:44] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 15:25:44] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 15:25:44] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 15:25:44] [Rank 0] Group 14 FTA: 0.9100 +[2025-09-09 15:25:44] [Rank 0] Group 14 FTA: 0.9100 +[2025-09-09 15:25:44] [Rank 0] Group 15 FTA: 0.6200 +[2025-09-09 15:25:44] [Rank 0] Group 15 FTA: 0.6200 +[2025-09-09 15:25:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:25:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:25:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:25:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:25:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:25:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:25:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:25:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:25:45] [Rank 0] step:6001/10000 train_time:284188ms step_avg:47.36ms +[2025-09-09 15:25:45] [Rank 0] step:6001/10000 train_time:284188ms step_avg:47.36ms +[2025-09-09 15:25:46] [Rank 0] step:6021/10000 train_time:285278ms step_avg:47.38ms +[2025-09-09 15:25:46] [Rank 0] step:6021/10000 train_time:285278ms step_avg:47.38ms +[2025-09-09 15:25:47] [Rank 0] step:6041/10000 train_time:286096ms step_avg:47.36ms +[2025-09-09 15:25:47] [Rank 0] step:6041/10000 train_time:286096ms step_avg:47.36ms +[2025-09-09 15:25:48] [Rank 0] step:6061/10000 train_time:286917ms step_avg:47.34ms +[2025-09-09 15:25:48] [Rank 0] step:6061/10000 train_time:286917ms step_avg:47.34ms +[2025-09-09 15:25:49] [Rank 0] step:6081/10000 train_time:287738ms step_avg:47.32ms +[2025-09-09 15:25:49] [Rank 0] step:6081/10000 train_time:287738ms step_avg:47.32ms +[2025-09-09 15:25:50] [Rank 0] step:6101/10000 train_time:288558ms step_avg:47.30ms +[2025-09-09 15:25:50] [Rank 0] step:6101/10000 train_time:288558ms step_avg:47.30ms +[2025-09-09 15:25:51] [Rank 0] step:6121/10000 train_time:289378ms step_avg:47.28ms +[2025-09-09 15:25:51] [Rank 0] step:6121/10000 train_time:289378ms step_avg:47.28ms +[2025-09-09 15:25:51] [Rank 0] step:6141/10000 train_time:290199ms step_avg:47.26ms +[2025-09-09 15:25:51] [Rank 0] step:6141/10000 train_time:290199ms step_avg:47.26ms +[2025-09-09 15:25:52] [Rank 0] step:6161/10000 train_time:291121ms step_avg:47.25ms +[2025-09-09 15:25:52] [Rank 0] step:6161/10000 train_time:291121ms step_avg:47.25ms +[2025-09-09 15:25:53] [Rank 0] step:6181/10000 train_time:291941ms step_avg:47.23ms +[2025-09-09 15:25:53] [Rank 0] step:6181/10000 train_time:291941ms step_avg:47.23ms +[2025-09-09 15:25:54] [Rank 0] step:6201/10000 train_time:293235ms step_avg:47.29ms +[2025-09-09 15:25:54] [Rank 0] step:6201/10000 train_time:293235ms step_avg:47.29ms +[2025-09-09 15:25:55] [Rank 0] step:6221/10000 train_time:294055ms step_avg:47.27ms +[2025-09-09 15:25:55] [Rank 0] step:6221/10000 train_time:294055ms step_avg:47.27ms +[2025-09-09 15:25:56] [Rank 0] step:6241/10000 train_time:294878ms step_avg:47.25ms +[2025-09-09 15:25:56] [Rank 0] step:6241/10000 train_time:294878ms step_avg:47.25ms +[2025-09-09 15:25:57] [Rank 0] step:6261/10000 train_time:295697ms step_avg:47.23ms +[2025-09-09 15:25:57] [Rank 0] step:6261/10000 train_time:295697ms step_avg:47.23ms +[2025-09-09 15:25:58] [Rank 0] step:6281/10000 train_time:296518ms step_avg:47.21ms +[2025-09-09 15:25:58] [Rank 0] step:6281/10000 train_time:296518ms step_avg:47.21ms +[2025-09-09 15:25:59] [Rank 0] step:6301/10000 train_time:297339ms step_avg:47.19ms +[2025-09-09 15:25:59] [Rank 0] step:6301/10000 train_time:297339ms step_avg:47.19ms +[2025-09-09 15:25:59] [Rank 0] step:6321/10000 train_time:298160ms step_avg:47.17ms +[2025-09-09 15:25:59] [Rank 0] step:6321/10000 train_time:298160ms step_avg:47.17ms +[2025-09-09 15:26:00] [Rank 0] step:6341/10000 train_time:298981ms step_avg:47.15ms +[2025-09-09 15:26:00] [Rank 0] step:6341/10000 train_time:298981ms step_avg:47.15ms +[2025-09-09 15:26:01] [Rank 0] step:6361/10000 train_time:299804ms step_avg:47.13ms +[2025-09-09 15:26:01] [Rank 0] step:6361/10000 train_time:299804ms step_avg:47.13ms +[2025-09-09 15:26:02] [Rank 0] step:6381/10000 train_time:300623ms step_avg:47.11ms +[2025-09-09 15:26:02] [Rank 0] step:6381/10000 train_time:300623ms step_avg:47.11ms +[2025-09-09 15:26:03] [Rank 0] step:6401/10000 train_time:301445ms step_avg:47.09ms +[2025-09-09 15:26:03] [Rank 0] step:6401/10000 train_time:301445ms step_avg:47.09ms +[2025-09-09 15:26:03] [Rank 0] step:6421/10000 train_time:302265ms step_avg:47.07ms +[2025-09-09 15:26:03] [Rank 0] step:6421/10000 train_time:302265ms step_avg:47.07ms +[2025-09-09 15:26:04] [Rank 0] step:6441/10000 train_time:303085ms step_avg:47.06ms +[2025-09-09 15:26:04] [Rank 0] step:6441/10000 train_time:303085ms step_avg:47.06ms +[2025-09-09 15:26:05] [Rank 0] step:6461/10000 train_time:303906ms step_avg:47.04ms +[2025-09-09 15:26:05] [Rank 0] step:6461/10000 train_time:303906ms step_avg:47.04ms +[2025-09-09 15:26:06] [Rank 0] step:6481/10000 train_time:304726ms step_avg:47.02ms +[2025-09-09 15:26:06] [Rank 0] step:6481/10000 train_time:304726ms step_avg:47.02ms +[2025-09-09 15:26:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:26:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:26:07] [Rank 0] PRINT: step:6500/10000 train_loss:0.6256 val_loss:0.6150 train_time:305549ms step_avg:47.01ms +[2025-09-09 15:26:07] [Rank 0] PRINT: step:6500/10000 train_loss:0.6256 val_loss:0.6150 train_time:305549ms step_avg:47.01ms +[2025-09-09 15:26:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:26:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:26:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:26:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:27:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:27:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:27:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:27:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:27:29] [Rank 0] Total Loss: 4.9318 +[2025-09-09 15:27:29] [Rank 0] Total Loss: 4.9318 +[2025-09-09 15:27:29] [Rank 0] Total FTA (Unweighted): 0.9819 +[2025-09-09 15:27:29] [Rank 0] Total FTA (Unweighted): 0.9819 +[2025-09-09 15:27:29] [Rank 0] Total FTA (Weighted): 0.9819 +[2025-09-09 15:27:29] [Rank 0] Total FTA (Weighted): 0.9819 +[2025-09-09 15:27:29] [Rank 0] Group 0 Loss: 4.8658 +[2025-09-09 15:27:29] [Rank 0] Group 0 Loss: 4.8658 +[2025-09-09 15:27:29] [Rank 0] Group 1 Loss: 4.6153 +[2025-09-09 15:27:29] [Rank 0] Group 1 Loss: 4.6153 +[2025-09-09 15:27:29] [Rank 0] Group 2 Loss: 4.3822 +[2025-09-09 15:27:29] [Rank 0] Group 2 Loss: 4.3822 +[2025-09-09 15:27:29] [Rank 0] Group 3 Loss: 4.8175 +[2025-09-09 15:27:29] [Rank 0] Group 3 Loss: 4.8175 +[2025-09-09 15:27:29] [Rank 0] Group 4 Loss: 4.7622 +[2025-09-09 15:27:29] [Rank 0] Group 4 Loss: 4.7622 +[2025-09-09 15:27:29] [Rank 0] Group 5 Loss: 4.8456 +[2025-09-09 15:27:29] [Rank 0] Group 5 Loss: 4.8456 +[2025-09-09 15:27:29] [Rank 0] Group 6 Loss: 4.7835 +[2025-09-09 15:27:29] [Rank 0] Group 6 Loss: 4.7835 +[2025-09-09 15:27:29] [Rank 0] Group 7 Loss: 4.9107 +[2025-09-09 15:27:29] [Rank 0] Group 7 Loss: 4.9107 +[2025-09-09 15:27:29] [Rank 0] Group 8 Loss: 4.9964 +[2025-09-09 15:27:29] [Rank 0] Group 8 Loss: 4.9964 +[2025-09-09 15:27:29] [Rank 0] Group 9 Loss: 4.9603 +[2025-09-09 15:27:29] [Rank 0] Group 9 Loss: 4.9603 +[2025-09-09 15:27:29] [Rank 0] Group 10 Loss: 5.1223 +[2025-09-09 15:27:29] [Rank 0] Group 10 Loss: 5.1223 +[2025-09-09 15:27:29] [Rank 0] Group 11 Loss: 5.1760 +[2025-09-09 15:27:29] [Rank 0] Group 11 Loss: 5.1760 +[2025-09-09 15:27:29] [Rank 0] Group 12 Loss: 5.0949 +[2025-09-09 15:27:29] [Rank 0] Group 12 Loss: 5.0949 +[2025-09-09 15:27:29] [Rank 0] Group 13 Loss: 5.1909 +[2025-09-09 15:27:29] [Rank 0] Group 13 Loss: 5.1909 +[2025-09-09 15:27:29] [Rank 0] Group 14 Loss: 5.2020 +[2025-09-09 15:27:29] [Rank 0] Group 14 Loss: 5.2020 +[2025-09-09 15:27:29] [Rank 0] Group 15 Loss: 5.1825 +[2025-09-09 15:27:29] [Rank 0] Group 15 Loss: 5.1825 +[2025-09-09 15:27:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:27:29] [Rank 0] Group 14 FTA: 0.9600 +[2025-09-09 15:27:29] [Rank 0] Group 14 FTA: 0.9600 +[2025-09-09 15:27:29] [Rank 0] Group 15 FTA: 0.7500 +[2025-09-09 15:27:29] [Rank 0] Group 15 FTA: 0.7500 +[2025-09-09 15:27:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:27:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:27:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:27:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:27:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:27:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:27:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:27:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:27:31] [Rank 0] step:6501/10000 train_time:305567ms step_avg:47.00ms +[2025-09-09 15:27:31] [Rank 0] step:6501/10000 train_time:305567ms step_avg:47.00ms +[2025-09-09 15:27:32] [Rank 0] step:6521/10000 train_time:306393ms step_avg:46.99ms +[2025-09-09 15:27:32] [Rank 0] step:6521/10000 train_time:306393ms step_avg:46.99ms +[2025-09-09 15:27:33] [Rank 0] step:6541/10000 train_time:307213ms step_avg:46.97ms +[2025-09-09 15:27:33] [Rank 0] step:6541/10000 train_time:307213ms step_avg:46.97ms +[2025-09-09 15:27:33] [Rank 0] step:6561/10000 train_time:308034ms step_avg:46.95ms +[2025-09-09 15:27:33] [Rank 0] step:6561/10000 train_time:308034ms step_avg:46.95ms +[2025-09-09 15:27:34] [Rank 0] step:6581/10000 train_time:308854ms step_avg:46.93ms +[2025-09-09 15:27:34] [Rank 0] step:6581/10000 train_time:308854ms step_avg:46.93ms +[2025-09-09 15:27:35] [Rank 0] step:6601/10000 train_time:309674ms step_avg:46.91ms +[2025-09-09 15:27:35] [Rank 0] step:6601/10000 train_time:309674ms step_avg:46.91ms +[2025-09-09 15:27:36] [Rank 0] step:6621/10000 train_time:310494ms step_avg:46.90ms +[2025-09-09 15:27:36] [Rank 0] step:6621/10000 train_time:310494ms step_avg:46.90ms +[2025-09-09 15:27:37] [Rank 0] step:6641/10000 train_time:311315ms step_avg:46.88ms +[2025-09-09 15:27:37] [Rank 0] step:6641/10000 train_time:311315ms step_avg:46.88ms +[2025-09-09 15:27:37] [Rank 0] step:6661/10000 train_time:312135ms step_avg:46.86ms +[2025-09-09 15:27:37] [Rank 0] step:6661/10000 train_time:312135ms step_avg:46.86ms +[2025-09-09 15:27:38] [Rank 0] step:6681/10000 train_time:312956ms step_avg:46.84ms +[2025-09-09 15:27:38] [Rank 0] step:6681/10000 train_time:312956ms step_avg:46.84ms +[2025-09-09 15:27:39] [Rank 0] step:6701/10000 train_time:313779ms step_avg:46.83ms +[2025-09-09 15:27:39] [Rank 0] step:6701/10000 train_time:313779ms step_avg:46.83ms +[2025-09-09 15:27:40] [Rank 0] step:6721/10000 train_time:314598ms step_avg:46.81ms +[2025-09-09 15:27:40] [Rank 0] step:6721/10000 train_time:314598ms step_avg:46.81ms +[2025-09-09 15:27:41] [Rank 0] step:6741/10000 train_time:315418ms step_avg:46.79ms +[2025-09-09 15:27:41] [Rank 0] step:6741/10000 train_time:315418ms step_avg:46.79ms +[2025-09-09 15:27:42] [Rank 0] step:6761/10000 train_time:316238ms step_avg:46.77ms +[2025-09-09 15:27:42] [Rank 0] step:6761/10000 train_time:316238ms step_avg:46.77ms +[2025-09-09 15:27:42] [Rank 0] step:6781/10000 train_time:317059ms step_avg:46.76ms +[2025-09-09 15:27:42] [Rank 0] step:6781/10000 train_time:317059ms step_avg:46.76ms +[2025-09-09 15:27:43] [Rank 0] step:6801/10000 train_time:317879ms step_avg:46.74ms +[2025-09-09 15:27:43] [Rank 0] step:6801/10000 train_time:317879ms step_avg:46.74ms +[2025-09-09 15:27:44] [Rank 0] step:6821/10000 train_time:318700ms step_avg:46.72ms +[2025-09-09 15:27:44] [Rank 0] step:6821/10000 train_time:318700ms step_avg:46.72ms +[2025-09-09 15:27:45] [Rank 0] step:6841/10000 train_time:319588ms step_avg:46.72ms +[2025-09-09 15:27:45] [Rank 0] step:6841/10000 train_time:319588ms step_avg:46.72ms +[2025-09-09 15:27:46] [Rank 0] step:6861/10000 train_time:320408ms step_avg:46.70ms +[2025-09-09 15:27:46] [Rank 0] step:6861/10000 train_time:320408ms step_avg:46.70ms +[2025-09-09 15:27:47] [Rank 0] step:6881/10000 train_time:321228ms step_avg:46.68ms +[2025-09-09 15:27:47] [Rank 0] step:6881/10000 train_time:321228ms step_avg:46.68ms +[2025-09-09 15:27:47] [Rank 0] step:6901/10000 train_time:322049ms step_avg:46.67ms +[2025-09-09 15:27:47] [Rank 0] step:6901/10000 train_time:322049ms step_avg:46.67ms +[2025-09-09 15:27:48] [Rank 0] step:6921/10000 train_time:322869ms step_avg:46.65ms +[2025-09-09 15:27:48] [Rank 0] step:6921/10000 train_time:322869ms step_avg:46.65ms +[2025-09-09 15:27:49] [Rank 0] step:6941/10000 train_time:323690ms step_avg:46.63ms +[2025-09-09 15:27:49] [Rank 0] step:6941/10000 train_time:323690ms step_avg:46.63ms +[2025-09-09 15:27:50] [Rank 0] step:6961/10000 train_time:324511ms step_avg:46.62ms +[2025-09-09 15:27:50] [Rank 0] step:6961/10000 train_time:324511ms step_avg:46.62ms +[2025-09-09 15:27:51] [Rank 0] step:6981/10000 train_time:325332ms step_avg:46.60ms +[2025-09-09 15:27:51] [Rank 0] step:6981/10000 train_time:325332ms step_avg:46.60ms +[2025-09-09 15:27:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:27:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:27:52] [Rank 0] PRINT: step:7000/10000 train_loss:0.6204 val_loss:0.6121 train_time:326156ms step_avg:46.59ms +[2025-09-09 15:27:52] [Rank 0] PRINT: step:7000/10000 train_loss:0.6204 val_loss:0.6121 train_time:326156ms step_avg:46.59ms +[2025-09-09 15:27:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:27:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:27:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:27:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:29:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:29:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:29:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:29:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:29:14] [Rank 0] Total Loss: 4.8804 +[2025-09-09 15:29:14] [Rank 0] Total Loss: 4.8804 +[2025-09-09 15:29:14] [Rank 0] Total FTA (Unweighted): 0.9863 +[2025-09-09 15:29:14] [Rank 0] Total FTA (Unweighted): 0.9863 +[2025-09-09 15:29:14] [Rank 0] Total FTA (Weighted): 0.9862 +[2025-09-09 15:29:14] [Rank 0] Total FTA (Weighted): 0.9862 +[2025-09-09 15:29:14] [Rank 0] Group 0 Loss: 4.7975 +[2025-09-09 15:29:14] [Rank 0] Group 0 Loss: 4.7975 +[2025-09-09 15:29:14] [Rank 0] Group 1 Loss: 4.5298 +[2025-09-09 15:29:14] [Rank 0] Group 1 Loss: 4.5298 +[2025-09-09 15:29:14] [Rank 0] Group 2 Loss: 4.3825 +[2025-09-09 15:29:14] [Rank 0] Group 2 Loss: 4.3825 +[2025-09-09 15:29:14] [Rank 0] Group 3 Loss: 4.7111 +[2025-09-09 15:29:14] [Rank 0] Group 3 Loss: 4.7111 +[2025-09-09 15:29:14] [Rank 0] Group 4 Loss: 4.7607 +[2025-09-09 15:29:14] [Rank 0] Group 4 Loss: 4.7607 +[2025-09-09 15:29:14] [Rank 0] Group 5 Loss: 4.8047 +[2025-09-09 15:29:14] [Rank 0] Group 5 Loss: 4.8047 +[2025-09-09 15:29:14] [Rank 0] Group 6 Loss: 4.7630 +[2025-09-09 15:29:14] [Rank 0] Group 6 Loss: 4.7630 +[2025-09-09 15:29:14] [Rank 0] Group 7 Loss: 4.8564 +[2025-09-09 15:29:14] [Rank 0] Group 7 Loss: 4.8564 +[2025-09-09 15:29:14] [Rank 0] Group 8 Loss: 4.9474 +[2025-09-09 15:29:14] [Rank 0] Group 8 Loss: 4.9474 +[2025-09-09 15:29:14] [Rank 0] Group 9 Loss: 4.9201 +[2025-09-09 15:29:14] [Rank 0] Group 9 Loss: 4.9201 +[2025-09-09 15:29:14] [Rank 0] Group 10 Loss: 5.0450 +[2025-09-09 15:29:14] [Rank 0] Group 10 Loss: 5.0450 +[2025-09-09 15:29:14] [Rank 0] Group 11 Loss: 5.1044 +[2025-09-09 15:29:14] [Rank 0] Group 11 Loss: 5.1044 +[2025-09-09 15:29:14] [Rank 0] Group 12 Loss: 5.0393 +[2025-09-09 15:29:14] [Rank 0] Group 12 Loss: 5.0393 +[2025-09-09 15:29:14] [Rank 0] Group 13 Loss: 5.1720 +[2025-09-09 15:29:14] [Rank 0] Group 13 Loss: 5.1720 +[2025-09-09 15:29:14] [Rank 0] Group 14 Loss: 5.1591 +[2025-09-09 15:29:14] [Rank 0] Group 14 Loss: 5.1591 +[2025-09-09 15:29:14] [Rank 0] Group 15 Loss: 5.0936 +[2025-09-09 15:29:14] [Rank 0] Group 15 Loss: 5.0936 +[2025-09-09 15:29:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:29:14] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 15:29:14] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 15:29:14] [Rank 0] Group 14 FTA: 0.9800 +[2025-09-09 15:29:14] [Rank 0] Group 14 FTA: 0.9800 +[2025-09-09 15:29:14] [Rank 0] Group 15 FTA: 0.8100 +[2025-09-09 15:29:14] [Rank 0] Group 15 FTA: 0.8100 +[2025-09-09 15:29:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:29:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:29:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:29:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:29:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:29:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:29:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:29:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:29:15] [Rank 0] step:7001/10000 train_time:326172ms step_avg:46.59ms +[2025-09-09 15:29:15] [Rank 0] step:7001/10000 train_time:326172ms step_avg:46.59ms +[2025-09-09 15:29:16] [Rank 0] step:7021/10000 train_time:326990ms step_avg:46.57ms +[2025-09-09 15:29:16] [Rank 0] step:7021/10000 train_time:326990ms step_avg:46.57ms +[2025-09-09 15:29:17] [Rank 0] step:7041/10000 train_time:327810ms step_avg:46.56ms +[2025-09-09 15:29:17] [Rank 0] step:7041/10000 train_time:327810ms step_avg:46.56ms +[2025-09-09 15:29:18] [Rank 0] step:7061/10000 train_time:328630ms step_avg:46.54ms +[2025-09-09 15:29:18] [Rank 0] step:7061/10000 train_time:328630ms step_avg:46.54ms +[2025-09-09 15:29:18] [Rank 0] step:7081/10000 train_time:329452ms step_avg:46.53ms +[2025-09-09 15:29:18] [Rank 0] step:7081/10000 train_time:329452ms step_avg:46.53ms +[2025-09-09 15:29:19] [Rank 0] step:7101/10000 train_time:330272ms step_avg:46.51ms +[2025-09-09 15:29:19] [Rank 0] step:7101/10000 train_time:330272ms step_avg:46.51ms +[2025-09-09 15:29:20] [Rank 0] step:7121/10000 train_time:331093ms step_avg:46.50ms +[2025-09-09 15:29:20] [Rank 0] step:7121/10000 train_time:331093ms step_avg:46.50ms +[2025-09-09 15:29:21] [Rank 0] step:7141/10000 train_time:331915ms step_avg:46.48ms +[2025-09-09 15:29:21] [Rank 0] step:7141/10000 train_time:331915ms step_avg:46.48ms +[2025-09-09 15:29:22] [Rank 0] step:7161/10000 train_time:332735ms step_avg:46.46ms +[2025-09-09 15:29:22] [Rank 0] step:7161/10000 train_time:332735ms step_avg:46.46ms +[2025-09-09 15:29:22] [Rank 0] step:7181/10000 train_time:333556ms step_avg:46.45ms +[2025-09-09 15:29:22] [Rank 0] step:7181/10000 train_time:333556ms step_avg:46.45ms +[2025-09-09 15:29:23] [Rank 0] step:7201/10000 train_time:334378ms step_avg:46.43ms +[2025-09-09 15:29:23] [Rank 0] step:7201/10000 train_time:334378ms step_avg:46.43ms +[2025-09-09 15:29:24] [Rank 0] step:7221/10000 train_time:335200ms step_avg:46.42ms +[2025-09-09 15:29:24] [Rank 0] step:7221/10000 train_time:335200ms step_avg:46.42ms +[2025-09-09 15:29:25] [Rank 0] step:7241/10000 train_time:336020ms step_avg:46.41ms +[2025-09-09 15:29:25] [Rank 0] step:7241/10000 train_time:336020ms step_avg:46.41ms +[2025-09-09 15:29:26] [Rank 0] step:7261/10000 train_time:336844ms step_avg:46.39ms +[2025-09-09 15:29:26] [Rank 0] step:7261/10000 train_time:336844ms step_avg:46.39ms +[2025-09-09 15:29:27] [Rank 0] step:7281/10000 train_time:337662ms step_avg:46.38ms +[2025-09-09 15:29:27] [Rank 0] step:7281/10000 train_time:337662ms step_avg:46.38ms +[2025-09-09 15:29:27] [Rank 0] step:7301/10000 train_time:338483ms step_avg:46.36ms +[2025-09-09 15:29:27] [Rank 0] step:7301/10000 train_time:338483ms step_avg:46.36ms +[2025-09-09 15:29:28] [Rank 0] step:7321/10000 train_time:339303ms step_avg:46.35ms +[2025-09-09 15:29:28] [Rank 0] step:7321/10000 train_time:339303ms step_avg:46.35ms +[2025-09-09 15:29:29] [Rank 0] step:7341/10000 train_time:340125ms step_avg:46.33ms +[2025-09-09 15:29:29] [Rank 0] step:7341/10000 train_time:340125ms step_avg:46.33ms +[2025-09-09 15:29:30] [Rank 0] step:7361/10000 train_time:340948ms step_avg:46.32ms +[2025-09-09 15:29:30] [Rank 0] step:7361/10000 train_time:340948ms step_avg:46.32ms +[2025-09-09 15:29:31] [Rank 0] step:7381/10000 train_time:341769ms step_avg:46.30ms +[2025-09-09 15:29:31] [Rank 0] step:7381/10000 train_time:341769ms step_avg:46.30ms +[2025-09-09 15:29:32] [Rank 0] step:7401/10000 train_time:342590ms step_avg:46.29ms +[2025-09-09 15:29:32] [Rank 0] step:7401/10000 train_time:342590ms step_avg:46.29ms +[2025-09-09 15:29:32] [Rank 0] step:7421/10000 train_time:343411ms step_avg:46.28ms +[2025-09-09 15:29:32] [Rank 0] step:7421/10000 train_time:343411ms step_avg:46.28ms +[2025-09-09 15:29:33] [Rank 0] step:7441/10000 train_time:344232ms step_avg:46.26ms +[2025-09-09 15:29:33] [Rank 0] step:7441/10000 train_time:344232ms step_avg:46.26ms +[2025-09-09 15:29:34] [Rank 0] step:7461/10000 train_time:345052ms step_avg:46.25ms +[2025-09-09 15:29:34] [Rank 0] step:7461/10000 train_time:345052ms step_avg:46.25ms +[2025-09-09 15:29:35] [Rank 0] step:7481/10000 train_time:345872ms step_avg:46.23ms +[2025-09-09 15:29:35] [Rank 0] step:7481/10000 train_time:345872ms step_avg:46.23ms +[2025-09-09 15:29:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:29:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:29:36] [Rank 0] PRINT: step:7500/10000 train_loss:0.6163 val_loss:0.6099 train_time:346696ms step_avg:46.23ms +[2025-09-09 15:29:36] [Rank 0] PRINT: step:7500/10000 train_loss:0.6163 val_loss:0.6099 train_time:346696ms step_avg:46.23ms +[2025-09-09 15:29:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:29:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:29:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:29:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:30:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:30:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:30:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:30:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:30:58] [Rank 0] Total Loss: 4.9329 +[2025-09-09 15:30:58] [Rank 0] Total Loss: 4.9329 +[2025-09-09 15:30:58] [Rank 0] Total FTA (Unweighted): 0.9906 +[2025-09-09 15:30:58] [Rank 0] Total FTA (Unweighted): 0.9906 +[2025-09-09 15:30:58] [Rank 0] Total FTA (Weighted): 0.9906 +[2025-09-09 15:30:58] [Rank 0] Total FTA (Weighted): 0.9906 +[2025-09-09 15:30:58] [Rank 0] Group 0 Loss: 4.8786 +[2025-09-09 15:30:58] [Rank 0] Group 0 Loss: 4.8786 +[2025-09-09 15:30:58] [Rank 0] Group 1 Loss: 4.5758 +[2025-09-09 15:30:58] [Rank 0] Group 1 Loss: 4.5758 +[2025-09-09 15:30:58] [Rank 0] Group 2 Loss: 4.4290 +[2025-09-09 15:30:58] [Rank 0] Group 2 Loss: 4.4290 +[2025-09-09 15:30:58] [Rank 0] Group 3 Loss: 4.7640 +[2025-09-09 15:30:58] [Rank 0] Group 3 Loss: 4.7640 +[2025-09-09 15:30:58] [Rank 0] Group 4 Loss: 4.7722 +[2025-09-09 15:30:58] [Rank 0] Group 4 Loss: 4.7722 +[2025-09-09 15:30:58] [Rank 0] Group 5 Loss: 4.8747 +[2025-09-09 15:30:58] [Rank 0] Group 5 Loss: 4.8747 +[2025-09-09 15:30:58] [Rank 0] Group 6 Loss: 4.7985 +[2025-09-09 15:30:58] [Rank 0] Group 6 Loss: 4.7985 +[2025-09-09 15:30:58] [Rank 0] Group 7 Loss: 4.8837 +[2025-09-09 15:30:58] [Rank 0] Group 7 Loss: 4.8837 +[2025-09-09 15:30:58] [Rank 0] Group 8 Loss: 5.0002 +[2025-09-09 15:30:58] [Rank 0] Group 8 Loss: 5.0002 +[2025-09-09 15:30:58] [Rank 0] Group 9 Loss: 4.9632 +[2025-09-09 15:30:58] [Rank 0] Group 9 Loss: 4.9632 +[2025-09-09 15:30:58] [Rank 0] Group 10 Loss: 5.1171 +[2025-09-09 15:30:58] [Rank 0] Group 10 Loss: 5.1171 +[2025-09-09 15:30:58] [Rank 0] Group 11 Loss: 5.1669 +[2025-09-09 15:30:58] [Rank 0] Group 11 Loss: 5.1669 +[2025-09-09 15:30:58] [Rank 0] Group 12 Loss: 5.0937 +[2025-09-09 15:30:58] [Rank 0] Group 12 Loss: 5.0937 +[2025-09-09 15:30:58] [Rank 0] Group 13 Loss: 5.2010 +[2025-09-09 15:30:58] [Rank 0] Group 13 Loss: 5.2010 +[2025-09-09 15:30:58] [Rank 0] Group 14 Loss: 5.2202 +[2025-09-09 15:30:58] [Rank 0] Group 14 Loss: 5.2202 +[2025-09-09 15:30:58] [Rank 0] Group 15 Loss: 5.1872 +[2025-09-09 15:30:58] [Rank 0] Group 15 Loss: 5.1872 +[2025-09-09 15:30:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 15:30:58] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 15:30:58] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 15:30:58] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 15:30:58] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:30:58] [Rank 0] Group 15 FTA: 0.8700 +[2025-09-09 15:30:58] [Rank 0] Group 15 FTA: 0.8700 +[2025-09-09 15:30:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:30:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:30:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:30:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:30:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:30:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:30:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:30:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:30:59] [Rank 0] step:7501/10000 train_time:346712ms step_avg:46.22ms +[2025-09-09 15:30:59] [Rank 0] step:7501/10000 train_time:346712ms step_avg:46.22ms +[2025-09-09 15:31:00] [Rank 0] step:7521/10000 train_time:347533ms step_avg:46.21ms +[2025-09-09 15:31:00] [Rank 0] step:7521/10000 train_time:347533ms step_avg:46.21ms +[2025-09-09 15:31:01] [Rank 0] step:7541/10000 train_time:348355ms step_avg:46.19ms +[2025-09-09 15:31:01] [Rank 0] step:7541/10000 train_time:348355ms step_avg:46.19ms +[2025-09-09 15:31:02] [Rank 0] step:7561/10000 train_time:349176ms step_avg:46.18ms +[2025-09-09 15:31:02] [Rank 0] step:7561/10000 train_time:349176ms step_avg:46.18ms +[2025-09-09 15:31:03] [Rank 0] step:7581/10000 train_time:349998ms step_avg:46.17ms +[2025-09-09 15:31:03] [Rank 0] step:7581/10000 train_time:349998ms step_avg:46.17ms +[2025-09-09 15:31:04] [Rank 0] step:7601/10000 train_time:350819ms step_avg:46.15ms +[2025-09-09 15:31:04] [Rank 0] step:7601/10000 train_time:350819ms step_avg:46.15ms +[2025-09-09 15:31:04] [Rank 0] step:7621/10000 train_time:351640ms step_avg:46.14ms +[2025-09-09 15:31:04] [Rank 0] step:7621/10000 train_time:351640ms step_avg:46.14ms +[2025-09-09 15:31:06] [Rank 0] step:7641/10000 train_time:353151ms step_avg:46.22ms +[2025-09-09 15:31:06] [Rank 0] step:7641/10000 train_time:353151ms step_avg:46.22ms +[2025-09-09 15:31:07] [Rank 0] step:7661/10000 train_time:353973ms step_avg:46.20ms +[2025-09-09 15:31:07] [Rank 0] step:7661/10000 train_time:353973ms step_avg:46.20ms +[2025-09-09 15:31:07] [Rank 0] step:7681/10000 train_time:354794ms step_avg:46.19ms +[2025-09-09 15:31:07] [Rank 0] step:7681/10000 train_time:354794ms step_avg:46.19ms +[2025-09-09 15:31:08] [Rank 0] step:7701/10000 train_time:355616ms step_avg:46.18ms +[2025-09-09 15:31:08] [Rank 0] step:7701/10000 train_time:355616ms step_avg:46.18ms +[2025-09-09 15:31:09] [Rank 0] step:7721/10000 train_time:356436ms step_avg:46.16ms +[2025-09-09 15:31:09] [Rank 0] step:7721/10000 train_time:356436ms step_avg:46.16ms +[2025-09-09 15:31:10] [Rank 0] step:7741/10000 train_time:357257ms step_avg:46.15ms +[2025-09-09 15:31:10] [Rank 0] step:7741/10000 train_time:357257ms step_avg:46.15ms +[2025-09-09 15:31:11] [Rank 0] step:7761/10000 train_time:358513ms step_avg:46.19ms +[2025-09-09 15:31:11] [Rank 0] step:7761/10000 train_time:358513ms step_avg:46.19ms +[2025-09-09 15:31:12] [Rank 0] step:7781/10000 train_time:359335ms step_avg:46.18ms +[2025-09-09 15:31:12] [Rank 0] step:7781/10000 train_time:359335ms step_avg:46.18ms +[2025-09-09 15:31:13] [Rank 0] step:7801/10000 train_time:360160ms step_avg:46.17ms +[2025-09-09 15:31:13] [Rank 0] step:7801/10000 train_time:360160ms step_avg:46.17ms +[2025-09-09 15:31:14] [Rank 0] step:7821/10000 train_time:360985ms step_avg:46.16ms +[2025-09-09 15:31:14] [Rank 0] step:7821/10000 train_time:360985ms step_avg:46.16ms +[2025-09-09 15:31:14] [Rank 0] step:7841/10000 train_time:361805ms step_avg:46.14ms +[2025-09-09 15:31:14] [Rank 0] step:7841/10000 train_time:361805ms step_avg:46.14ms +[2025-09-09 15:31:15] [Rank 0] step:7861/10000 train_time:362625ms step_avg:46.13ms +[2025-09-09 15:31:15] [Rank 0] step:7861/10000 train_time:362625ms step_avg:46.13ms +[2025-09-09 15:31:16] [Rank 0] step:7881/10000 train_time:363446ms step_avg:46.12ms +[2025-09-09 15:31:16] [Rank 0] step:7881/10000 train_time:363446ms step_avg:46.12ms +[2025-09-09 15:31:17] [Rank 0] step:7901/10000 train_time:364266ms step_avg:46.10ms +[2025-09-09 15:31:17] [Rank 0] step:7901/10000 train_time:364266ms step_avg:46.10ms +[2025-09-09 15:31:18] [Rank 0] step:7921/10000 train_time:365087ms step_avg:46.09ms +[2025-09-09 15:31:18] [Rank 0] step:7921/10000 train_time:365087ms step_avg:46.09ms +[2025-09-09 15:31:19] [Rank 0] step:7941/10000 train_time:365908ms step_avg:46.08ms +[2025-09-09 15:31:19] [Rank 0] step:7941/10000 train_time:365908ms step_avg:46.08ms +[2025-09-09 15:31:19] [Rank 0] step:7961/10000 train_time:366730ms step_avg:46.07ms +[2025-09-09 15:31:19] [Rank 0] step:7961/10000 train_time:366730ms step_avg:46.07ms +[2025-09-09 15:31:20] [Rank 0] step:7981/10000 train_time:367550ms step_avg:46.05ms +[2025-09-09 15:31:20] [Rank 0] step:7981/10000 train_time:367550ms step_avg:46.05ms +[2025-09-09 15:31:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:31:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:31:22] [Rank 0] PRINT: step:8000/10000 train_loss:0.6131 val_loss:0.6084 train_time:368373ms step_avg:46.05ms +[2025-09-09 15:31:22] [Rank 0] PRINT: step:8000/10000 train_loss:0.6131 val_loss:0.6084 train_time:368373ms step_avg:46.05ms +[2025-09-09 15:31:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:31:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:31:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:31:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:32:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:32:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:32:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:32:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:32:43] [Rank 0] Total Loss: 4.9010 +[2025-09-09 15:32:43] [Rank 0] Total Loss: 4.9010 +[2025-09-09 15:32:43] [Rank 0] Total FTA (Unweighted): 0.9944 +[2025-09-09 15:32:43] [Rank 0] Total FTA (Unweighted): 0.9944 +[2025-09-09 15:32:43] [Rank 0] Total FTA (Weighted): 0.9944 +[2025-09-09 15:32:43] [Rank 0] Total FTA (Weighted): 0.9944 +[2025-09-09 15:32:43] [Rank 0] Group 0 Loss: 4.7886 +[2025-09-09 15:32:43] [Rank 0] Group 0 Loss: 4.7886 +[2025-09-09 15:32:43] [Rank 0] Group 1 Loss: 4.5273 +[2025-09-09 15:32:43] [Rank 0] Group 1 Loss: 4.5273 +[2025-09-09 15:32:43] [Rank 0] Group 2 Loss: 4.4009 +[2025-09-09 15:32:43] [Rank 0] Group 2 Loss: 4.4009 +[2025-09-09 15:32:43] [Rank 0] Group 3 Loss: 4.7473 +[2025-09-09 15:32:43] [Rank 0] Group 3 Loss: 4.7473 +[2025-09-09 15:32:43] [Rank 0] Group 4 Loss: 4.7425 +[2025-09-09 15:32:43] [Rank 0] Group 4 Loss: 4.7425 +[2025-09-09 15:32:43] [Rank 0] Group 5 Loss: 4.8369 +[2025-09-09 15:32:43] [Rank 0] Group 5 Loss: 4.8369 +[2025-09-09 15:32:43] [Rank 0] Group 6 Loss: 4.7885 +[2025-09-09 15:32:43] [Rank 0] Group 6 Loss: 4.7885 +[2025-09-09 15:32:43] [Rank 0] Group 7 Loss: 4.8553 +[2025-09-09 15:32:43] [Rank 0] Group 7 Loss: 4.8553 +[2025-09-09 15:32:43] [Rank 0] Group 8 Loss: 4.9793 +[2025-09-09 15:32:43] [Rank 0] Group 8 Loss: 4.9793 +[2025-09-09 15:32:43] [Rank 0] Group 9 Loss: 4.9562 +[2025-09-09 15:32:43] [Rank 0] Group 9 Loss: 4.9562 +[2025-09-09 15:32:43] [Rank 0] Group 10 Loss: 5.0958 +[2025-09-09 15:32:43] [Rank 0] Group 10 Loss: 5.0958 +[2025-09-09 15:32:43] [Rank 0] Group 11 Loss: 5.1416 +[2025-09-09 15:32:43] [Rank 0] Group 11 Loss: 5.1416 +[2025-09-09 15:32:43] [Rank 0] Group 12 Loss: 5.0650 +[2025-09-09 15:32:43] [Rank 0] Group 12 Loss: 5.0650 +[2025-09-09 15:32:43] [Rank 0] Group 13 Loss: 5.1824 +[2025-09-09 15:32:43] [Rank 0] Group 13 Loss: 5.1824 +[2025-09-09 15:32:43] [Rank 0] Group 14 Loss: 5.1623 +[2025-09-09 15:32:43] [Rank 0] Group 14 Loss: 5.1623 +[2025-09-09 15:32:43] [Rank 0] Group 15 Loss: 5.1457 +[2025-09-09 15:32:43] [Rank 0] Group 15 Loss: 5.1457 +[2025-09-09 15:32:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 15:32:43] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 15:32:43] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:32:43] [Rank 0] Group 15 FTA: 0.9200 +[2025-09-09 15:32:43] [Rank 0] Group 15 FTA: 0.9200 +[2025-09-09 15:32:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:32:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:32:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:32:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:32:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:32:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:32:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:32:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:32:45] [Rank 0] step:8001/10000 train_time:368391ms step_avg:46.04ms +[2025-09-09 15:32:45] [Rank 0] step:8001/10000 train_time:368391ms step_avg:46.04ms +[2025-09-09 15:32:46] [Rank 0] step:8021/10000 train_time:369495ms step_avg:46.07ms +[2025-09-09 15:32:46] [Rank 0] step:8021/10000 train_time:369495ms step_avg:46.07ms +[2025-09-09 15:32:47] [Rank 0] step:8041/10000 train_time:370316ms step_avg:46.05ms +[2025-09-09 15:32:47] [Rank 0] step:8041/10000 train_time:370316ms step_avg:46.05ms +[2025-09-09 15:32:48] [Rank 0] step:8061/10000 train_time:371137ms step_avg:46.04ms +[2025-09-09 15:32:48] [Rank 0] step:8061/10000 train_time:371137ms step_avg:46.04ms +[2025-09-09 15:32:48] [Rank 0] step:8081/10000 train_time:371958ms step_avg:46.03ms +[2025-09-09 15:32:48] [Rank 0] step:8081/10000 train_time:371958ms step_avg:46.03ms +[2025-09-09 15:32:49] [Rank 0] step:8101/10000 train_time:372779ms step_avg:46.02ms +[2025-09-09 15:32:49] [Rank 0] step:8101/10000 train_time:372779ms step_avg:46.02ms +[2025-09-09 15:32:50] [Rank 0] step:8121/10000 train_time:373602ms step_avg:46.00ms +[2025-09-09 15:32:50] [Rank 0] step:8121/10000 train_time:373602ms step_avg:46.00ms +[2025-09-09 15:32:51] [Rank 0] step:8141/10000 train_time:374421ms step_avg:45.99ms +[2025-09-09 15:32:51] [Rank 0] step:8141/10000 train_time:374421ms step_avg:45.99ms +[2025-09-09 15:32:52] [Rank 0] step:8161/10000 train_time:375242ms step_avg:45.98ms +[2025-09-09 15:32:52] [Rank 0] step:8161/10000 train_time:375242ms step_avg:45.98ms +[2025-09-09 15:32:53] [Rank 0] step:8181/10000 train_time:376063ms step_avg:45.97ms +[2025-09-09 15:32:53] [Rank 0] step:8181/10000 train_time:376063ms step_avg:45.97ms +[2025-09-09 15:32:53] [Rank 0] step:8201/10000 train_time:376884ms step_avg:45.96ms +[2025-09-09 15:32:53] [Rank 0] step:8201/10000 train_time:376884ms step_avg:45.96ms +[2025-09-09 15:32:54] [Rank 0] step:8221/10000 train_time:377705ms step_avg:45.94ms +[2025-09-09 15:32:54] [Rank 0] step:8221/10000 train_time:377705ms step_avg:45.94ms +[2025-09-09 15:32:55] [Rank 0] step:8241/10000 train_time:378526ms step_avg:45.93ms +[2025-09-09 15:32:55] [Rank 0] step:8241/10000 train_time:378526ms step_avg:45.93ms +[2025-09-09 15:32:56] [Rank 0] step:8261/10000 train_time:379348ms step_avg:45.92ms +[2025-09-09 15:32:56] [Rank 0] step:8261/10000 train_time:379348ms step_avg:45.92ms +[2025-09-09 15:32:57] [Rank 0] step:8281/10000 train_time:380169ms step_avg:45.91ms +[2025-09-09 15:32:57] [Rank 0] step:8281/10000 train_time:380169ms step_avg:45.91ms +[2025-09-09 15:32:57] [Rank 0] step:8301/10000 train_time:380990ms step_avg:45.90ms +[2025-09-09 15:32:57] [Rank 0] step:8301/10000 train_time:380990ms step_avg:45.90ms +[2025-09-09 15:32:58] [Rank 0] step:8321/10000 train_time:381810ms step_avg:45.89ms +[2025-09-09 15:32:58] [Rank 0] step:8321/10000 train_time:381810ms step_avg:45.89ms +[2025-09-09 15:32:59] [Rank 0] step:8341/10000 train_time:382630ms step_avg:45.87ms +[2025-09-09 15:32:59] [Rank 0] step:8341/10000 train_time:382630ms step_avg:45.87ms +[2025-09-09 15:33:00] [Rank 0] step:8361/10000 train_time:383451ms step_avg:45.86ms +[2025-09-09 15:33:00] [Rank 0] step:8361/10000 train_time:383451ms step_avg:45.86ms +[2025-09-09 15:33:01] [Rank 0] step:8381/10000 train_time:384273ms step_avg:45.85ms +[2025-09-09 15:33:01] [Rank 0] step:8381/10000 train_time:384273ms step_avg:45.85ms +[2025-09-09 15:33:02] [Rank 0] step:8401/10000 train_time:385093ms step_avg:45.84ms +[2025-09-09 15:33:02] [Rank 0] step:8401/10000 train_time:385093ms step_avg:45.84ms +[2025-09-09 15:33:02] [Rank 0] step:8421/10000 train_time:385915ms step_avg:45.83ms +[2025-09-09 15:33:02] [Rank 0] step:8421/10000 train_time:385915ms step_avg:45.83ms +[2025-09-09 15:33:03] [Rank 0] step:8441/10000 train_time:386736ms step_avg:45.82ms +[2025-09-09 15:33:03] [Rank 0] step:8441/10000 train_time:386736ms step_avg:45.82ms +[2025-09-09 15:33:04] [Rank 0] step:8461/10000 train_time:387559ms step_avg:45.81ms +[2025-09-09 15:33:04] [Rank 0] step:8461/10000 train_time:387559ms step_avg:45.81ms +[2025-09-09 15:33:05] [Rank 0] step:8481/10000 train_time:388378ms step_avg:45.79ms +[2025-09-09 15:33:05] [Rank 0] step:8481/10000 train_time:388378ms step_avg:45.79ms +[2025-09-09 15:33:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:33:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:33:06] [Rank 0] PRINT: step:8500/10000 train_loss:0.6110 val_loss:0.6069 train_time:389201ms step_avg:45.79ms +[2025-09-09 15:33:06] [Rank 0] PRINT: step:8500/10000 train_loss:0.6110 val_loss:0.6069 train_time:389201ms step_avg:45.79ms +[2025-09-09 15:33:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:33:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:33:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:33:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:34:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:34:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:34:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:34:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:34:28] [Rank 0] Total Loss: 4.9810 +[2025-09-09 15:34:28] [Rank 0] Total Loss: 4.9810 +[2025-09-09 15:34:28] [Rank 0] Total FTA (Unweighted): 0.9956 +[2025-09-09 15:34:28] [Rank 0] Total FTA (Unweighted): 0.9956 +[2025-09-09 15:34:28] [Rank 0] Total FTA (Weighted): 0.9956 +[2025-09-09 15:34:28] [Rank 0] Total FTA (Weighted): 0.9956 +[2025-09-09 15:34:28] [Rank 0] Group 0 Loss: 4.9557 +[2025-09-09 15:34:28] [Rank 0] Group 0 Loss: 4.9557 +[2025-09-09 15:34:28] [Rank 0] Group 1 Loss: 4.5734 +[2025-09-09 15:34:28] [Rank 0] Group 1 Loss: 4.5734 +[2025-09-09 15:34:28] [Rank 0] Group 2 Loss: 4.4197 +[2025-09-09 15:34:28] [Rank 0] Group 2 Loss: 4.4197 +[2025-09-09 15:34:28] [Rank 0] Group 3 Loss: 4.8444 +[2025-09-09 15:34:28] [Rank 0] Group 3 Loss: 4.8444 +[2025-09-09 15:34:28] [Rank 0] Group 4 Loss: 4.8777 +[2025-09-09 15:34:28] [Rank 0] Group 4 Loss: 4.8777 +[2025-09-09 15:34:28] [Rank 0] Group 5 Loss: 4.9268 +[2025-09-09 15:34:28] [Rank 0] Group 5 Loss: 4.9268 +[2025-09-09 15:34:28] [Rank 0] Group 6 Loss: 4.8550 +[2025-09-09 15:34:28] [Rank 0] Group 6 Loss: 4.8550 +[2025-09-09 15:34:28] [Rank 0] Group 7 Loss: 4.9749 +[2025-09-09 15:34:28] [Rank 0] Group 7 Loss: 4.9749 +[2025-09-09 15:34:28] [Rank 0] Group 8 Loss: 5.0607 +[2025-09-09 15:34:28] [Rank 0] Group 8 Loss: 5.0607 +[2025-09-09 15:34:28] [Rank 0] Group 9 Loss: 5.0110 +[2025-09-09 15:34:28] [Rank 0] Group 9 Loss: 5.0110 +[2025-09-09 15:34:28] [Rank 0] Group 10 Loss: 5.1344 +[2025-09-09 15:34:28] [Rank 0] Group 10 Loss: 5.1344 +[2025-09-09 15:34:28] [Rank 0] Group 11 Loss: 5.1956 +[2025-09-09 15:34:28] [Rank 0] Group 11 Loss: 5.1956 +[2025-09-09 15:34:28] [Rank 0] Group 12 Loss: 5.1276 +[2025-09-09 15:34:28] [Rank 0] Group 12 Loss: 5.1276 +[2025-09-09 15:34:28] [Rank 0] Group 13 Loss: 5.2398 +[2025-09-09 15:34:28] [Rank 0] Group 13 Loss: 5.2398 +[2025-09-09 15:34:28] [Rank 0] Group 14 Loss: 5.2441 +[2025-09-09 15:34:28] [Rank 0] Group 14 Loss: 5.2441 +[2025-09-09 15:34:28] [Rank 0] Group 15 Loss: 5.2546 +[2025-09-09 15:34:28] [Rank 0] Group 15 Loss: 5.2546 +[2025-09-09 15:34:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:34:28] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 15:34:28] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 15:34:28] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 15:34:28] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 15:34:28] [Rank 0] Group 14 FTA: 0.9900 +[2025-09-09 15:34:28] [Rank 0] Group 14 FTA: 0.9900 +[2025-09-09 15:34:28] [Rank 0] Group 15 FTA: 0.9600 +[2025-09-09 15:34:28] [Rank 0] Group 15 FTA: 0.9600 +[2025-09-09 15:34:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:34:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:34:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:34:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:34:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:34:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:34:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:34:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:34:29] [Rank 0] step:8501/10000 train_time:389220ms step_avg:45.79ms +[2025-09-09 15:34:29] [Rank 0] step:8501/10000 train_time:389220ms step_avg:45.79ms +[2025-09-09 15:34:30] [Rank 0] step:8521/10000 train_time:390042ms step_avg:45.77ms +[2025-09-09 15:34:30] [Rank 0] step:8521/10000 train_time:390042ms step_avg:45.77ms +[2025-09-09 15:34:31] [Rank 0] step:8541/10000 train_time:390863ms step_avg:45.76ms +[2025-09-09 15:34:31] [Rank 0] step:8541/10000 train_time:390863ms step_avg:45.76ms +[2025-09-09 15:34:32] [Rank 0] step:8561/10000 train_time:391684ms step_avg:45.75ms +[2025-09-09 15:34:32] [Rank 0] step:8561/10000 train_time:391684ms step_avg:45.75ms +[2025-09-09 15:34:33] [Rank 0] step:8581/10000 train_time:392505ms step_avg:45.74ms +[2025-09-09 15:34:33] [Rank 0] step:8581/10000 train_time:392505ms step_avg:45.74ms +[2025-09-09 15:34:34] [Rank 0] step:8601/10000 train_time:393328ms step_avg:45.73ms +[2025-09-09 15:34:34] [Rank 0] step:8601/10000 train_time:393328ms step_avg:45.73ms +[2025-09-09 15:34:34] [Rank 0] step:8621/10000 train_time:394149ms step_avg:45.72ms +[2025-09-09 15:34:34] [Rank 0] step:8621/10000 train_time:394149ms step_avg:45.72ms +[2025-09-09 15:34:35] [Rank 0] step:8641/10000 train_time:394969ms step_avg:45.71ms +[2025-09-09 15:34:35] [Rank 0] step:8641/10000 train_time:394969ms step_avg:45.71ms +[2025-09-09 15:34:36] [Rank 0] step:8661/10000 train_time:395790ms step_avg:45.70ms +[2025-09-09 15:34:36] [Rank 0] step:8661/10000 train_time:395790ms step_avg:45.70ms +[2025-09-09 15:34:37] [Rank 0] step:8681/10000 train_time:396611ms step_avg:45.69ms +[2025-09-09 15:34:37] [Rank 0] step:8681/10000 train_time:396611ms step_avg:45.69ms +[2025-09-09 15:34:38] [Rank 0] step:8701/10000 train_time:397432ms step_avg:45.68ms +[2025-09-09 15:34:38] [Rank 0] step:8701/10000 train_time:397432ms step_avg:45.68ms +[2025-09-09 15:34:38] [Rank 0] step:8721/10000 train_time:398253ms step_avg:45.67ms +[2025-09-09 15:34:38] [Rank 0] step:8721/10000 train_time:398253ms step_avg:45.67ms +[2025-09-09 15:34:39] [Rank 0] step:8741/10000 train_time:399074ms step_avg:45.66ms +[2025-09-09 15:34:39] [Rank 0] step:8741/10000 train_time:399074ms step_avg:45.66ms +[2025-09-09 15:34:40] [Rank 0] step:8761/10000 train_time:399896ms step_avg:45.64ms +[2025-09-09 15:34:40] [Rank 0] step:8761/10000 train_time:399896ms step_avg:45.64ms +[2025-09-09 15:34:41] [Rank 0] step:8781/10000 train_time:400716ms step_avg:45.63ms +[2025-09-09 15:34:41] [Rank 0] step:8781/10000 train_time:400716ms step_avg:45.63ms +[2025-09-09 15:34:42] [Rank 0] step:8801/10000 train_time:401537ms step_avg:45.62ms +[2025-09-09 15:34:42] [Rank 0] step:8801/10000 train_time:401537ms step_avg:45.62ms +[2025-09-09 15:34:43] [Rank 0] step:8821/10000 train_time:402359ms step_avg:45.61ms +[2025-09-09 15:34:43] [Rank 0] step:8821/10000 train_time:402359ms step_avg:45.61ms +[2025-09-09 15:34:44] [Rank 0] step:8841/10000 train_time:403450ms step_avg:45.63ms +[2025-09-09 15:34:44] [Rank 0] step:8841/10000 train_time:403450ms step_avg:45.63ms +[2025-09-09 15:34:45] [Rank 0] step:8861/10000 train_time:404272ms step_avg:45.62ms +[2025-09-09 15:34:45] [Rank 0] step:8861/10000 train_time:404272ms step_avg:45.62ms +[2025-09-09 15:34:45] [Rank 0] step:8881/10000 train_time:405093ms step_avg:45.61ms +[2025-09-09 15:34:45] [Rank 0] step:8881/10000 train_time:405093ms step_avg:45.61ms +[2025-09-09 15:34:46] [Rank 0] step:8901/10000 train_time:405914ms step_avg:45.60ms +[2025-09-09 15:34:46] [Rank 0] step:8901/10000 train_time:405914ms step_avg:45.60ms +[2025-09-09 15:34:47] [Rank 0] step:8921/10000 train_time:406735ms step_avg:45.59ms +[2025-09-09 15:34:47] [Rank 0] step:8921/10000 train_time:406735ms step_avg:45.59ms +[2025-09-09 15:34:48] [Rank 0] step:8941/10000 train_time:407555ms step_avg:45.58ms +[2025-09-09 15:34:48] [Rank 0] step:8941/10000 train_time:407555ms step_avg:45.58ms +[2025-09-09 15:34:49] [Rank 0] step:8961/10000 train_time:408376ms step_avg:45.57ms +[2025-09-09 15:34:49] [Rank 0] step:8961/10000 train_time:408376ms step_avg:45.57ms +[2025-09-09 15:34:49] [Rank 0] step:8981/10000 train_time:409198ms step_avg:45.56ms +[2025-09-09 15:34:49] [Rank 0] step:8981/10000 train_time:409198ms step_avg:45.56ms +[2025-09-09 15:34:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:34:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:34:51] [Rank 0] PRINT: step:9000/10000 train_loss:0.6094 val_loss:0.6059 train_time:410021ms step_avg:45.56ms +[2025-09-09 15:34:51] [Rank 0] PRINT: step:9000/10000 train_loss:0.6094 val_loss:0.6059 train_time:410021ms step_avg:45.56ms +[2025-09-09 15:34:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:34:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:34:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:34:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:36:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:36:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:36:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:36:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:36:12] [Rank 0] Total Loss: 4.9480 +[2025-09-09 15:36:12] [Rank 0] Total Loss: 4.9480 +[2025-09-09 15:36:12] [Rank 0] Total FTA (Unweighted): 0.9969 +[2025-09-09 15:36:12] [Rank 0] Total FTA (Unweighted): 0.9969 +[2025-09-09 15:36:12] [Rank 0] Total FTA (Weighted): 0.9969 +[2025-09-09 15:36:12] [Rank 0] Total FTA (Weighted): 0.9969 +[2025-09-09 15:36:12] [Rank 0] Group 0 Loss: 4.8595 +[2025-09-09 15:36:12] [Rank 0] Group 0 Loss: 4.8595 +[2025-09-09 15:36:12] [Rank 0] Group 1 Loss: 4.5671 +[2025-09-09 15:36:12] [Rank 0] Group 1 Loss: 4.5671 +[2025-09-09 15:36:12] [Rank 0] Group 2 Loss: 4.4178 +[2025-09-09 15:36:12] [Rank 0] Group 2 Loss: 4.4178 +[2025-09-09 15:36:12] [Rank 0] Group 3 Loss: 4.8099 +[2025-09-09 15:36:12] [Rank 0] Group 3 Loss: 4.8099 +[2025-09-09 15:36:12] [Rank 0] Group 4 Loss: 4.8512 +[2025-09-09 15:36:12] [Rank 0] Group 4 Loss: 4.8512 +[2025-09-09 15:36:12] [Rank 0] Group 5 Loss: 4.8857 +[2025-09-09 15:36:12] [Rank 0] Group 5 Loss: 4.8857 +[2025-09-09 15:36:12] [Rank 0] Group 6 Loss: 4.8228 +[2025-09-09 15:36:12] [Rank 0] Group 6 Loss: 4.8228 +[2025-09-09 15:36:12] [Rank 0] Group 7 Loss: 4.9230 +[2025-09-09 15:36:12] [Rank 0] Group 7 Loss: 4.9230 +[2025-09-09 15:36:12] [Rank 0] Group 8 Loss: 5.0300 +[2025-09-09 15:36:12] [Rank 0] Group 8 Loss: 5.0300 +[2025-09-09 15:36:12] [Rank 0] Group 9 Loss: 4.9751 +[2025-09-09 15:36:12] [Rank 0] Group 9 Loss: 4.9751 +[2025-09-09 15:36:12] [Rank 0] Group 10 Loss: 5.1048 +[2025-09-09 15:36:12] [Rank 0] Group 10 Loss: 5.1048 +[2025-09-09 15:36:12] [Rank 0] Group 11 Loss: 5.1712 +[2025-09-09 15:36:12] [Rank 0] Group 11 Loss: 5.1712 +[2025-09-09 15:36:12] [Rank 0] Group 12 Loss: 5.1162 +[2025-09-09 15:36:12] [Rank 0] Group 12 Loss: 5.1162 +[2025-09-09 15:36:12] [Rank 0] Group 13 Loss: 5.1964 +[2025-09-09 15:36:12] [Rank 0] Group 13 Loss: 5.1964 +[2025-09-09 15:36:12] [Rank 0] Group 14 Loss: 5.2152 +[2025-09-09 15:36:12] [Rank 0] Group 14 Loss: 5.2152 +[2025-09-09 15:36:12] [Rank 0] Group 15 Loss: 5.2224 +[2025-09-09 15:36:12] [Rank 0] Group 15 Loss: 5.2224 +[2025-09-09 15:36:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:36:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:36:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:36:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:36:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:36:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:36:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:36:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:36:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:36:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:36:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:36:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:36:13] [Rank 0] Group 15 FTA: 0.9500 +[2025-09-09 15:36:13] [Rank 0] Group 15 FTA: 0.9500 +[2025-09-09 15:36:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:36:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:36:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:36:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:36:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:36:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:36:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:36:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:36:14] [Rank 0] step:9001/10000 train_time:410040ms step_avg:45.55ms +[2025-09-09 15:36:14] [Rank 0] step:9001/10000 train_time:410040ms step_avg:45.55ms +[2025-09-09 15:36:15] [Rank 0] step:9021/10000 train_time:410872ms step_avg:45.55ms +[2025-09-09 15:36:15] [Rank 0] step:9021/10000 train_time:410872ms step_avg:45.55ms +[2025-09-09 15:36:16] [Rank 0] step:9041/10000 train_time:411694ms step_avg:45.54ms +[2025-09-09 15:36:16] [Rank 0] step:9041/10000 train_time:411694ms step_avg:45.54ms +[2025-09-09 15:36:16] [Rank 0] step:9061/10000 train_time:412515ms step_avg:45.53ms +[2025-09-09 15:36:16] [Rank 0] step:9061/10000 train_time:412515ms step_avg:45.53ms +[2025-09-09 15:36:17] [Rank 0] step:9081/10000 train_time:413334ms step_avg:45.52ms +[2025-09-09 15:36:17] [Rank 0] step:9081/10000 train_time:413334ms step_avg:45.52ms +[2025-09-09 15:36:18] [Rank 0] step:9101/10000 train_time:414154ms step_avg:45.51ms +[2025-09-09 15:36:18] [Rank 0] step:9101/10000 train_time:414154ms step_avg:45.51ms +[2025-09-09 15:36:19] [Rank 0] step:9121/10000 train_time:414975ms step_avg:45.50ms +[2025-09-09 15:36:19] [Rank 0] step:9121/10000 train_time:414975ms step_avg:45.50ms +[2025-09-09 15:36:20] [Rank 0] step:9141/10000 train_time:415796ms step_avg:45.49ms +[2025-09-09 15:36:20] [Rank 0] step:9141/10000 train_time:415796ms step_avg:45.49ms +[2025-09-09 15:36:20] [Rank 0] step:9161/10000 train_time:416617ms step_avg:45.48ms +[2025-09-09 15:36:20] [Rank 0] step:9161/10000 train_time:416617ms step_avg:45.48ms +[2025-09-09 15:36:21] [Rank 0] step:9181/10000 train_time:417438ms step_avg:45.47ms +[2025-09-09 15:36:21] [Rank 0] step:9181/10000 train_time:417438ms step_avg:45.47ms +[2025-09-09 15:36:22] [Rank 0] step:9201/10000 train_time:418259ms step_avg:45.46ms +[2025-09-09 15:36:22] [Rank 0] step:9201/10000 train_time:418259ms step_avg:45.46ms +[2025-09-09 15:36:23] [Rank 0] step:9221/10000 train_time:419080ms step_avg:45.45ms +[2025-09-09 15:36:23] [Rank 0] step:9221/10000 train_time:419080ms step_avg:45.45ms +[2025-09-09 15:36:24] [Rank 0] step:9241/10000 train_time:419901ms step_avg:45.44ms +[2025-09-09 15:36:24] [Rank 0] step:9241/10000 train_time:419901ms step_avg:45.44ms +[2025-09-09 15:36:25] [Rank 0] step:9261/10000 train_time:420722ms step_avg:45.43ms +[2025-09-09 15:36:25] [Rank 0] step:9261/10000 train_time:420722ms step_avg:45.43ms +[2025-09-09 15:36:25] [Rank 0] step:9281/10000 train_time:421543ms step_avg:45.42ms +[2025-09-09 15:36:25] [Rank 0] step:9281/10000 train_time:421543ms step_avg:45.42ms +[2025-09-09 15:36:27] [Rank 0] step:9301/10000 train_time:422692ms step_avg:45.45ms +[2025-09-09 15:36:27] [Rank 0] step:9301/10000 train_time:422692ms step_avg:45.45ms +[2025-09-09 15:36:28] [Rank 0] step:9321/10000 train_time:423720ms step_avg:45.46ms +[2025-09-09 15:36:28] [Rank 0] step:9321/10000 train_time:423720ms step_avg:45.46ms +[2025-09-09 15:36:28] [Rank 0] step:9341/10000 train_time:424543ms step_avg:45.45ms +[2025-09-09 15:36:28] [Rank 0] step:9341/10000 train_time:424543ms step_avg:45.45ms +[2025-09-09 15:36:29] [Rank 0] step:9361/10000 train_time:425365ms step_avg:45.44ms +[2025-09-09 15:36:29] [Rank 0] step:9361/10000 train_time:425365ms step_avg:45.44ms +[2025-09-09 15:36:30] [Rank 0] step:9381/10000 train_time:426186ms step_avg:45.43ms +[2025-09-09 15:36:30] [Rank 0] step:9381/10000 train_time:426186ms step_avg:45.43ms +[2025-09-09 15:36:31] [Rank 0] step:9401/10000 train_time:427006ms step_avg:45.42ms +[2025-09-09 15:36:31] [Rank 0] step:9401/10000 train_time:427006ms step_avg:45.42ms +[2025-09-09 15:36:32] [Rank 0] step:9421/10000 train_time:427826ms step_avg:45.41ms +[2025-09-09 15:36:32] [Rank 0] step:9421/10000 train_time:427826ms step_avg:45.41ms +[2025-09-09 15:36:33] [Rank 0] step:9441/10000 train_time:428646ms step_avg:45.40ms +[2025-09-09 15:36:33] [Rank 0] step:9441/10000 train_time:428646ms step_avg:45.40ms +[2025-09-09 15:36:33] [Rank 0] step:9461/10000 train_time:429466ms step_avg:45.39ms +[2025-09-09 15:36:33] [Rank 0] step:9461/10000 train_time:429466ms step_avg:45.39ms +[2025-09-09 15:36:34] [Rank 0] step:9481/10000 train_time:430286ms step_avg:45.38ms +[2025-09-09 15:36:34] [Rank 0] step:9481/10000 train_time:430286ms step_avg:45.38ms +[2025-09-09 15:36:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:36:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:36:35] [Rank 0] PRINT: step:9500/10000 train_loss:0.6080 val_loss:0.6051 train_time:431109ms step_avg:45.38ms +[2025-09-09 15:36:35] [Rank 0] PRINT: step:9500/10000 train_loss:0.6080 val_loss:0.6051 train_time:431109ms step_avg:45.38ms +[2025-09-09 15:36:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:36:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:36:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:36:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:37:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:37:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:37:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:37:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:37:57] [Rank 0] Total Loss: 5.0123 +[2025-09-09 15:37:57] [Rank 0] Total Loss: 5.0123 +[2025-09-09 15:37:57] [Rank 0] Total FTA (Unweighted): 0.9981 +[2025-09-09 15:37:57] [Rank 0] Total FTA (Unweighted): 0.9981 +[2025-09-09 15:37:57] [Rank 0] Total FTA (Weighted): 0.9981 +[2025-09-09 15:37:57] [Rank 0] Total FTA (Weighted): 0.9981 +[2025-09-09 15:37:57] [Rank 0] Group 0 Loss: 4.9648 +[2025-09-09 15:37:57] [Rank 0] Group 0 Loss: 4.9648 +[2025-09-09 15:37:57] [Rank 0] Group 1 Loss: 4.6271 +[2025-09-09 15:37:57] [Rank 0] Group 1 Loss: 4.6271 +[2025-09-09 15:37:57] [Rank 0] Group 2 Loss: 4.4850 +[2025-09-09 15:37:57] [Rank 0] Group 2 Loss: 4.4850 +[2025-09-09 15:37:57] [Rank 0] Group 3 Loss: 4.8560 +[2025-09-09 15:37:57] [Rank 0] Group 3 Loss: 4.8560 +[2025-09-09 15:37:57] [Rank 0] Group 4 Loss: 4.8846 +[2025-09-09 15:37:57] [Rank 0] Group 4 Loss: 4.8846 +[2025-09-09 15:37:57] [Rank 0] Group 5 Loss: 4.9526 +[2025-09-09 15:37:57] [Rank 0] Group 5 Loss: 4.9526 +[2025-09-09 15:37:57] [Rank 0] Group 6 Loss: 4.8883 +[2025-09-09 15:37:57] [Rank 0] Group 6 Loss: 4.8883 +[2025-09-09 15:37:57] [Rank 0] Group 7 Loss: 4.9959 +[2025-09-09 15:37:57] [Rank 0] Group 7 Loss: 4.9959 +[2025-09-09 15:37:57] [Rank 0] Group 8 Loss: 5.0854 +[2025-09-09 15:37:57] [Rank 0] Group 8 Loss: 5.0854 +[2025-09-09 15:37:57] [Rank 0] Group 9 Loss: 5.0270 +[2025-09-09 15:37:57] [Rank 0] Group 9 Loss: 5.0270 +[2025-09-09 15:37:57] [Rank 0] Group 10 Loss: 5.1733 +[2025-09-09 15:37:57] [Rank 0] Group 10 Loss: 5.1733 +[2025-09-09 15:37:57] [Rank 0] Group 11 Loss: 5.2317 +[2025-09-09 15:37:57] [Rank 0] Group 11 Loss: 5.2317 +[2025-09-09 15:37:57] [Rank 0] Group 12 Loss: 5.1705 +[2025-09-09 15:37:57] [Rank 0] Group 12 Loss: 5.1705 +[2025-09-09 15:37:57] [Rank 0] Group 13 Loss: 5.2724 +[2025-09-09 15:37:57] [Rank 0] Group 13 Loss: 5.2724 +[2025-09-09 15:37:57] [Rank 0] Group 14 Loss: 5.2906 +[2025-09-09 15:37:57] [Rank 0] Group 14 Loss: 5.2906 +[2025-09-09 15:37:57] [Rank 0] Group 15 Loss: 5.2918 +[2025-09-09 15:37:57] [Rank 0] Group 15 Loss: 5.2918 +[2025-09-09 15:37:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 15:37:57] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 15:37:57] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:37:57] [Rank 0] Group 15 FTA: 0.9800 +[2025-09-09 15:37:57] [Rank 0] Group 15 FTA: 0.9800 +[2025-09-09 15:37:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:37:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:37:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:37:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:37:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:37:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:37:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:37:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:37:58] [Rank 0] step:9501/10000 train_time:431127ms step_avg:45.38ms +[2025-09-09 15:37:58] [Rank 0] step:9501/10000 train_time:431127ms step_avg:45.38ms +[2025-09-09 15:37:59] [Rank 0] step:9521/10000 train_time:431964ms step_avg:45.37ms +[2025-09-09 15:37:59] [Rank 0] step:9521/10000 train_time:431964ms step_avg:45.37ms +[2025-09-09 15:38:00] [Rank 0] step:9541/10000 train_time:432784ms step_avg:45.36ms +[2025-09-09 15:38:00] [Rank 0] step:9541/10000 train_time:432784ms step_avg:45.36ms +[2025-09-09 15:38:01] [Rank 0] step:9561/10000 train_time:433605ms step_avg:45.35ms +[2025-09-09 15:38:01] [Rank 0] step:9561/10000 train_time:433605ms step_avg:45.35ms +[2025-09-09 15:38:02] [Rank 0] step:9581/10000 train_time:434428ms step_avg:45.34ms +[2025-09-09 15:38:02] [Rank 0] step:9581/10000 train_time:434428ms step_avg:45.34ms +[2025-09-09 15:38:02] [Rank 0] step:9601/10000 train_time:435248ms step_avg:45.33ms +[2025-09-09 15:38:02] [Rank 0] step:9601/10000 train_time:435248ms step_avg:45.33ms +[2025-09-09 15:38:03] [Rank 0] step:9621/10000 train_time:436072ms step_avg:45.32ms +[2025-09-09 15:38:03] [Rank 0] step:9621/10000 train_time:436072ms step_avg:45.32ms +[2025-09-09 15:38:04] [Rank 0] step:9641/10000 train_time:436893ms step_avg:45.32ms +[2025-09-09 15:38:04] [Rank 0] step:9641/10000 train_time:436893ms step_avg:45.32ms +[2025-09-09 15:38:05] [Rank 0] step:9661/10000 train_time:437991ms step_avg:45.34ms +[2025-09-09 15:38:05] [Rank 0] step:9661/10000 train_time:437991ms step_avg:45.34ms +[2025-09-09 15:38:06] [Rank 0] step:9681/10000 train_time:438812ms step_avg:45.33ms +[2025-09-09 15:38:06] [Rank 0] step:9681/10000 train_time:438812ms step_avg:45.33ms +[2025-09-09 15:38:07] [Rank 0] step:9701/10000 train_time:439634ms step_avg:45.32ms +[2025-09-09 15:38:07] [Rank 0] step:9701/10000 train_time:439634ms step_avg:45.32ms +[2025-09-09 15:38:08] [Rank 0] step:9721/10000 train_time:440454ms step_avg:45.31ms +[2025-09-09 15:38:08] [Rank 0] step:9721/10000 train_time:440454ms step_avg:45.31ms +[2025-09-09 15:38:08] [Rank 0] step:9741/10000 train_time:441273ms step_avg:45.30ms +[2025-09-09 15:38:08] [Rank 0] step:9741/10000 train_time:441273ms step_avg:45.30ms +[2025-09-09 15:38:09] [Rank 0] step:9761/10000 train_time:442093ms step_avg:45.29ms +[2025-09-09 15:38:09] [Rank 0] step:9761/10000 train_time:442093ms step_avg:45.29ms +[2025-09-09 15:38:10] [Rank 0] step:9781/10000 train_time:442914ms step_avg:45.28ms +[2025-09-09 15:38:10] [Rank 0] step:9781/10000 train_time:442914ms step_avg:45.28ms +[2025-09-09 15:38:11] [Rank 0] step:9801/10000 train_time:443735ms step_avg:45.27ms +[2025-09-09 15:38:11] [Rank 0] step:9801/10000 train_time:443735ms step_avg:45.27ms +[2025-09-09 15:38:12] [Rank 0] step:9821/10000 train_time:444556ms step_avg:45.27ms +[2025-09-09 15:38:12] [Rank 0] step:9821/10000 train_time:444556ms step_avg:45.27ms +[2025-09-09 15:38:12] [Rank 0] step:9841/10000 train_time:445377ms step_avg:45.26ms +[2025-09-09 15:38:12] [Rank 0] step:9841/10000 train_time:445377ms step_avg:45.26ms +[2025-09-09 15:38:13] [Rank 0] step:9861/10000 train_time:446197ms step_avg:45.25ms +[2025-09-09 15:38:13] [Rank 0] step:9861/10000 train_time:446197ms step_avg:45.25ms +[2025-09-09 15:38:14] [Rank 0] step:9881/10000 train_time:447018ms step_avg:45.24ms +[2025-09-09 15:38:14] [Rank 0] step:9881/10000 train_time:447018ms step_avg:45.24ms +[2025-09-09 15:38:15] [Rank 0] step:9901/10000 train_time:447839ms step_avg:45.23ms +[2025-09-09 15:38:15] [Rank 0] step:9901/10000 train_time:447839ms step_avg:45.23ms +[2025-09-09 15:38:16] [Rank 0] step:9921/10000 train_time:448660ms step_avg:45.22ms +[2025-09-09 15:38:16] [Rank 0] step:9921/10000 train_time:448660ms step_avg:45.22ms +[2025-09-09 15:38:17] [Rank 0] step:9941/10000 train_time:449481ms step_avg:45.21ms +[2025-09-09 15:38:17] [Rank 0] step:9941/10000 train_time:449481ms step_avg:45.21ms +[2025-09-09 15:38:17] [Rank 0] step:9961/10000 train_time:450302ms step_avg:45.21ms +[2025-09-09 15:38:17] [Rank 0] step:9961/10000 train_time:450302ms step_avg:45.21ms +[2025-09-09 15:38:18] [Rank 0] step:9981/10000 train_time:451123ms step_avg:45.20ms +[2025-09-09 15:38:18] [Rank 0] step:9981/10000 train_time:451123ms step_avg:45.20ms +[2025-09-09 15:38:19] [Rank 0] step:10000/10000 train_time:451902ms step_avg:45.19ms +[2025-09-09 15:38:19] [Rank 0] step:10000/10000 train_time:451902ms step_avg:45.19ms +[2025-09-09 15:38:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:38:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:38:19] [Rank 0] PRINT: step:10000/10000 train_loss:0.6070 val_loss:0.6045 train_time:451951ms step_avg:45.20ms +[2025-09-09 15:38:19] [Rank 0] PRINT: step:10000/10000 train_loss:0.6070 val_loss:0.6045 train_time:451951ms step_avg:45.20ms +[2025-09-09 15:38:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:38:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:38:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:38:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:39:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:39:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:39:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:39:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:39:41] [Rank 0] Total Loss: 5.0234 +[2025-09-09 15:39:41] [Rank 0] Total Loss: 5.0234 +[2025-09-09 15:39:41] [Rank 0] Total FTA (Unweighted): 0.9994 +[2025-09-09 15:39:41] [Rank 0] Total FTA (Unweighted): 0.9994 +[2025-09-09 15:39:41] [Rank 0] Total FTA (Weighted): 0.9994 +[2025-09-09 15:39:41] [Rank 0] Total FTA (Weighted): 0.9994 +[2025-09-09 15:39:41] [Rank 0] Group 0 Loss: 4.9660 +[2025-09-09 15:39:41] [Rank 0] Group 0 Loss: 4.9660 +[2025-09-09 15:39:41] [Rank 0] Group 1 Loss: 4.5831 +[2025-09-09 15:39:41] [Rank 0] Group 1 Loss: 4.5831 +[2025-09-09 15:39:41] [Rank 0] Group 2 Loss: 4.4759 +[2025-09-09 15:39:41] [Rank 0] Group 2 Loss: 4.4759 +[2025-09-09 15:39:41] [Rank 0] Group 3 Loss: 4.8740 +[2025-09-09 15:39:41] [Rank 0] Group 3 Loss: 4.8740 +[2025-09-09 15:39:41] [Rank 0] Group 4 Loss: 4.9015 +[2025-09-09 15:39:41] [Rank 0] Group 4 Loss: 4.9015 +[2025-09-09 15:39:41] [Rank 0] Group 5 Loss: 4.9711 +[2025-09-09 15:39:41] [Rank 0] Group 5 Loss: 4.9711 +[2025-09-09 15:39:41] [Rank 0] Group 6 Loss: 4.9124 +[2025-09-09 15:39:41] [Rank 0] Group 6 Loss: 4.9124 +[2025-09-09 15:39:41] [Rank 0] Group 7 Loss: 5.0087 +[2025-09-09 15:39:41] [Rank 0] Group 7 Loss: 5.0087 +[2025-09-09 15:39:41] [Rank 0] Group 8 Loss: 5.1018 +[2025-09-09 15:39:41] [Rank 0] Group 8 Loss: 5.1018 +[2025-09-09 15:39:41] [Rank 0] Group 9 Loss: 5.0543 +[2025-09-09 15:39:41] [Rank 0] Group 9 Loss: 5.0543 +[2025-09-09 15:39:41] [Rank 0] Group 10 Loss: 5.2008 +[2025-09-09 15:39:41] [Rank 0] Group 10 Loss: 5.2008 +[2025-09-09 15:39:41] [Rank 0] Group 11 Loss: 5.2413 +[2025-09-09 15:39:41] [Rank 0] Group 11 Loss: 5.2413 +[2025-09-09 15:39:41] [Rank 0] Group 12 Loss: 5.1791 +[2025-09-09 15:39:41] [Rank 0] Group 12 Loss: 5.1791 +[2025-09-09 15:39:41] [Rank 0] Group 13 Loss: 5.2965 +[2025-09-09 15:39:41] [Rank 0] Group 13 Loss: 5.2965 +[2025-09-09 15:39:41] [Rank 0] Group 14 Loss: 5.3227 +[2025-09-09 15:39:41] [Rank 0] Group 14 Loss: 5.3227 +[2025-09-09 15:39:41] [Rank 0] Group 15 Loss: 5.2845 +[2025-09-09 15:39:41] [Rank 0] Group 15 Loss: 5.2845 +[2025-09-09 15:39:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 15:39:41] [Rank 0] Group 15 FTA: 0.9900 +[2025-09-09 15:39:41] [Rank 0] Group 15 FTA: 0.9900 +[2025-09-09 15:39:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:39:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_loss_curves.png +[2025-09-09 15:39:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:39:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/per_class_acc_curves.png +[2025-09-09 15:39:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:39:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_loss_curve.png +[2025-09-09 15:39:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:39:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_45/total_acc_curve.png +[2025-09-09 15:39:42] [Rank 0] step:10001/10000 train_time:451969ms step_avg:45.19ms +[2025-09-09 15:39:42] [Rank 0] step:10001/10000 train_time:451969ms step_avg:45.19ms +[2025-09-09 15:39:42] [Rank 0] PRINT: --- Training Finished: Tue Sep 9 15:39:42 2025 --- +[2025-09-09 15:39:42] [Rank 0] PRINT: --- Training Finished: Tue Sep 9 15:39:42 2025 --- +[2025-09-09 15:39:42] [Rank 0] PRINT: Peak memory allocated: 3880 MiB reserved: 4808 MiB +[2025-09-09 15:39:42] [Rank 0] PRINT: Peak memory allocated: 3880 MiB reserved: 4808 MiB diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/config.json b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..040827ad684e78487ea6d4a7acbb6e56bce4e1f2 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 7, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.001, + "adam_lr": 0.002, + "base_dir": "logs_qa_muon_gated/diff_mode", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "1a2917f9-61c5-433e-b419-6b769e2de023", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/fixed_eval_indices.json b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..cd551896cc097a84db1fc402dee91b099a1779dc --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4166d6dde291aecc58d1c8fc23ef30bbef981873c3788a2ae752aded76bc433d +size 328372 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..94f18e20b48dc4fc28081dffa206b55be0485722 --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43dc683e8b5b15bdfe393e1fe3e294b77a49979ea067cf9fa0caab5a9e4c3a63 +size 470856 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..c88145bf6359c5b5edb6ffabf834c109177dd1ff --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ab2f7b51ee0413bc8c4e988816b8fc7b5331c3eda032fbe5f9846b13a46bec9 +size 94615 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..d012a12b6c22e7cfd4f2449b4cdde0e613b3307d --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e8825a55a26de41369f32c6dcd108daa274e4ccecda47f72caa4032504c769c +size 113938 diff --git a/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/training_log_1a2917f9-61c5-433e-b419-6b769e2de023.txt b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/training_log_1a2917f9-61c5-433e-b419-6b769e2de023.txt new file mode 100644 index 0000000000000000000000000000000000000000..cd2c7fa306f753ad351d8a016828073ca8dffe5a --- /dev/null +++ b/logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/training_log_1a2917f9-61c5-433e-b419-6b769e2de023.txt @@ -0,0 +1,5618 @@ +[2025-09-09 15:40:08] [Rank 0] PRINT: --- Script Start: Tue Sep 9 15:40:08 2025 --- +[2025-09-09 15:40:08] [Rank 0] PRINT: --- Script Start: Tue Sep 9 15:40:08 2025 --- +[2025-09-09 15:40:08] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=7, model_parameterization='gated', per_group_k=100, muon_lr=0.001, adam_lr=0.002, base_dir='logs_qa_muon_gated/diff_mode', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-09 15:40:08] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=7, model_parameterization='gated', per_group_k=100, muon_lr=0.001, adam_lr=0.002, base_dir='logs_qa_muon_gated/diff_mode', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-09 15:40:08] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-09 15:40:08] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-09 15:40:08] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-09 15:40:08] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-09 15:40:08] [Rank 0] PRINT: Run directory: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46 +[2025-09-09 15:40:08] [Rank 0] PRINT: Run directory: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46 +[2025-09-09 15:40:08] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + muon_lr = exp_args.muon_lr + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-09 15:40:08] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + muon_lr = exp_args.muon_lr + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-09 15:40:08] [Rank 0] PRINT: Constructing model... +[2025-09-09 15:40:08] [Rank 0] PRINT: Constructing model... +[2025-09-09 15:40:10] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-09 15:40:10] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-09 15:40:10] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-09 15:40:10] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-09 15:40:10] [Rank 0] PRINT: Testing model forward function: +[2025-09-09 15:40:10] [Rank 0] PRINT: Testing model forward function: +[2025-09-09 15:40:14] [Rank 0] PRINT: Model test - Result type: +[2025-09-09 15:40:14] [Rank 0] PRINT: Model test - Result type: +[2025-09-09 15:40:14] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-09 15:40:14] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-09 15:40:14] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-09 15:40:14] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-09 15:40:14] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-09 15:40:14] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-09 15:40:14] [Rank 0] PRINT: Model returns: +[2025-09-09 15:40:14] [Rank 0] PRINT: Model returns: +[2025-09-09 15:40:14] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-09 15:40:14] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-09 15:40:14] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 7 +[2025-09-09 15:40:14] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 7 +[2025-09-09 15:40:14] [Rank 0] PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: 0.002). +[2025-09-09 15:40:14] [Rank 0] PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: 0.002). +[2025-09-09 15:40:14] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-09 15:40:14] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-09 15:40:14] [Rank 0] PRINT: Muon optimizer is active with 48 parameters. +[2025-09-09 15:40:14] [Rank 0] PRINT: Muon optimizer is active with 48 parameters. +[2025-09-09 15:40:14] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-09 15:40:14] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-09 15:40:18] [Rank 0] PRINT: Model compilation complete. +[2025-09-09 15:40:18] [Rank 0] PRINT: Model compilation complete. +[2025-09-09 15:40:18] [Rank 0] PRINT: Starting warmup... +[2025-09-09 15:40:18] [Rank 0] PRINT: Starting warmup... +[2025-09-09 15:40:59] [Rank 0] PRINT: Warmup complete. +[2025-09-09 15:40:59] [Rank 0] PRINT: Warmup complete. +[2025-09-09 15:40:59] [Rank 0] PRINT: Starting training... +[2025-09-09 15:40:59] [Rank 0] PRINT: Starting training... +[2025-09-09 15:41:06] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/fixed_eval_indices.json +[2025-09-09 15:41:06] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/fixed_eval_indices.json +[2025-09-09 15:41:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:41:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:41:10] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-09 15:41:10] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-09 15:41:43] [Rank 0] step:21/10000 train_time:33298ms step_avg:1585.62ms +[2025-09-09 15:41:43] [Rank 0] step:21/10000 train_time:33298ms step_avg:1585.62ms +[2025-09-09 15:41:44] [Rank 0] step:41/10000 train_time:34111ms step_avg:831.98ms +[2025-09-09 15:41:44] [Rank 0] step:41/10000 train_time:34111ms step_avg:831.98ms +[2025-09-09 15:41:45] [Rank 0] step:61/10000 train_time:34922ms step_avg:572.49ms +[2025-09-09 15:41:45] [Rank 0] step:61/10000 train_time:34922ms step_avg:572.49ms +[2025-09-09 15:41:46] [Rank 0] step:81/10000 train_time:35732ms step_avg:441.14ms +[2025-09-09 15:41:46] [Rank 0] step:81/10000 train_time:35732ms step_avg:441.14ms +[2025-09-09 15:41:47] [Rank 0] step:101/10000 train_time:36543ms step_avg:361.81ms +[2025-09-09 15:41:47] [Rank 0] step:101/10000 train_time:36543ms step_avg:361.81ms +[2025-09-09 15:41:47] [Rank 0] step:121/10000 train_time:37354ms step_avg:308.71ms +[2025-09-09 15:41:47] [Rank 0] step:121/10000 train_time:37354ms step_avg:308.71ms +[2025-09-09 15:41:48] [Rank 0] step:141/10000 train_time:38165ms step_avg:270.68ms +[2025-09-09 15:41:48] [Rank 0] step:141/10000 train_time:38165ms step_avg:270.68ms +[2025-09-09 15:41:49] [Rank 0] step:161/10000 train_time:38977ms step_avg:242.09ms +[2025-09-09 15:41:49] [Rank 0] step:161/10000 train_time:38977ms step_avg:242.09ms +[2025-09-09 15:41:50] [Rank 0] step:181/10000 train_time:39789ms step_avg:219.83ms +[2025-09-09 15:41:50] [Rank 0] step:181/10000 train_time:39789ms step_avg:219.83ms +[2025-09-09 15:41:51] [Rank 0] step:201/10000 train_time:40600ms step_avg:201.99ms +[2025-09-09 15:41:51] [Rank 0] step:201/10000 train_time:40600ms step_avg:201.99ms +[2025-09-09 15:41:51] [Rank 0] step:221/10000 train_time:41411ms step_avg:187.38ms +[2025-09-09 15:41:51] [Rank 0] step:221/10000 train_time:41411ms step_avg:187.38ms +[2025-09-09 15:41:52] [Rank 0] step:241/10000 train_time:42221ms step_avg:175.19ms +[2025-09-09 15:41:52] [Rank 0] step:241/10000 train_time:42221ms step_avg:175.19ms +[2025-09-09 15:41:53] [Rank 0] step:261/10000 train_time:43031ms step_avg:164.87ms +[2025-09-09 15:41:53] [Rank 0] step:261/10000 train_time:43031ms step_avg:164.87ms +[2025-09-09 15:41:54] [Rank 0] step:281/10000 train_time:43842ms step_avg:156.02ms +[2025-09-09 15:41:54] [Rank 0] step:281/10000 train_time:43842ms step_avg:156.02ms +[2025-09-09 15:41:55] [Rank 0] step:301/10000 train_time:44652ms step_avg:148.35ms +[2025-09-09 15:41:55] [Rank 0] step:301/10000 train_time:44652ms step_avg:148.35ms +[2025-09-09 15:41:55] [Rank 0] step:321/10000 train_time:45462ms step_avg:141.63ms +[2025-09-09 15:41:55] [Rank 0] step:321/10000 train_time:45462ms step_avg:141.63ms +[2025-09-09 15:41:56] [Rank 0] step:341/10000 train_time:46273ms step_avg:135.70ms +[2025-09-09 15:41:56] [Rank 0] step:341/10000 train_time:46273ms step_avg:135.70ms +[2025-09-09 15:41:57] [Rank 0] step:361/10000 train_time:47084ms step_avg:130.43ms +[2025-09-09 15:41:57] [Rank 0] step:361/10000 train_time:47084ms step_avg:130.43ms +[2025-09-09 15:41:58] [Rank 0] step:381/10000 train_time:47895ms step_avg:125.71ms +[2025-09-09 15:41:58] [Rank 0] step:381/10000 train_time:47895ms step_avg:125.71ms +[2025-09-09 15:41:59] [Rank 0] step:401/10000 train_time:48705ms step_avg:121.46ms +[2025-09-09 15:41:59] [Rank 0] step:401/10000 train_time:48705ms step_avg:121.46ms +[2025-09-09 15:42:00] [Rank 0] step:421/10000 train_time:49515ms step_avg:117.61ms +[2025-09-09 15:42:00] [Rank 0] step:421/10000 train_time:49515ms step_avg:117.61ms +[2025-09-09 15:42:00] [Rank 0] step:441/10000 train_time:50328ms step_avg:114.12ms +[2025-09-09 15:42:00] [Rank 0] step:441/10000 train_time:50328ms step_avg:114.12ms +[2025-09-09 15:42:01] [Rank 0] step:461/10000 train_time:51139ms step_avg:110.93ms +[2025-09-09 15:42:01] [Rank 0] step:461/10000 train_time:51139ms step_avg:110.93ms +[2025-09-09 15:42:02] [Rank 0] step:481/10000 train_time:51951ms step_avg:108.01ms +[2025-09-09 15:42:02] [Rank 0] step:481/10000 train_time:51951ms step_avg:108.01ms +[2025-09-09 15:42:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:42:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:42:03] [Rank 0] PRINT: step:500/10000 train_loss:2.9179 val_loss:1.0491 train_time:52764ms step_avg:105.53ms +[2025-09-09 15:42:03] [Rank 0] PRINT: step:500/10000 train_loss:2.9179 val_loss:1.0491 train_time:52764ms step_avg:105.53ms +[2025-09-09 15:42:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:42:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:42:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:42:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:43:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:43:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:43:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:43:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:43:24] [Rank 0] Total Loss: 3.9651 +[2025-09-09 15:43:24] [Rank 0] Total Loss: 3.9651 +[2025-09-09 15:43:24] [Rank 0] Total FTA (Unweighted): 0.5419 +[2025-09-09 15:43:24] [Rank 0] Total FTA (Unweighted): 0.5419 +[2025-09-09 15:43:24] [Rank 0] Total FTA (Weighted): 0.5419 +[2025-09-09 15:43:24] [Rank 0] Total FTA (Weighted): 0.5419 +[2025-09-09 15:43:24] [Rank 0] Group 0 Loss: 3.4919 +[2025-09-09 15:43:24] [Rank 0] Group 0 Loss: 3.4919 +[2025-09-09 15:43:24] [Rank 0] Group 1 Loss: 3.3718 +[2025-09-09 15:43:24] [Rank 0] Group 1 Loss: 3.3718 +[2025-09-09 15:43:24] [Rank 0] Group 2 Loss: 3.2642 +[2025-09-09 15:43:24] [Rank 0] Group 2 Loss: 3.2642 +[2025-09-09 15:43:24] [Rank 0] Group 3 Loss: 3.6073 +[2025-09-09 15:43:24] [Rank 0] Group 3 Loss: 3.6073 +[2025-09-09 15:43:24] [Rank 0] Group 4 Loss: 3.5607 +[2025-09-09 15:43:24] [Rank 0] Group 4 Loss: 3.5607 +[2025-09-09 15:43:24] [Rank 0] Group 5 Loss: 3.6639 +[2025-09-09 15:43:24] [Rank 0] Group 5 Loss: 3.6639 +[2025-09-09 15:43:24] [Rank 0] Group 6 Loss: 3.6887 +[2025-09-09 15:43:24] [Rank 0] Group 6 Loss: 3.6887 +[2025-09-09 15:43:24] [Rank 0] Group 7 Loss: 3.7780 +[2025-09-09 15:43:24] [Rank 0] Group 7 Loss: 3.7780 +[2025-09-09 15:43:24] [Rank 0] Group 8 Loss: 4.0282 +[2025-09-09 15:43:24] [Rank 0] Group 8 Loss: 4.0282 +[2025-09-09 15:43:25] [Rank 0] Group 9 Loss: 4.0918 +[2025-09-09 15:43:25] [Rank 0] Group 9 Loss: 4.0918 +[2025-09-09 15:43:25] [Rank 0] Group 10 Loss: 4.3620 +[2025-09-09 15:43:25] [Rank 0] Group 10 Loss: 4.3620 +[2025-09-09 15:43:25] [Rank 0] Group 11 Loss: 4.3762 +[2025-09-09 15:43:25] [Rank 0] Group 11 Loss: 4.3762 +[2025-09-09 15:43:25] [Rank 0] Group 12 Loss: 4.4469 +[2025-09-09 15:43:25] [Rank 0] Group 12 Loss: 4.4469 +[2025-09-09 15:43:25] [Rank 0] Group 13 Loss: 4.5628 +[2025-09-09 15:43:25] [Rank 0] Group 13 Loss: 4.5628 +[2025-09-09 15:43:25] [Rank 0] Group 14 Loss: 4.5606 +[2025-09-09 15:43:25] [Rank 0] Group 14 Loss: 4.5606 +[2025-09-09 15:43:25] [Rank 0] Group 15 Loss: 4.5868 +[2025-09-09 15:43:25] [Rank 0] Group 15 Loss: 4.5868 +[2025-09-09 15:43:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:43:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:43:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:43:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:43:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:43:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:43:25] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:43:25] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:43:25] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:43:25] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:43:25] [Rank 0] Group 5 FTA: 0.8400 +[2025-09-09 15:43:25] [Rank 0] Group 5 FTA: 0.8400 +[2025-09-09 15:43:25] [Rank 0] Group 6 FTA: 0.5700 +[2025-09-09 15:43:25] [Rank 0] Group 6 FTA: 0.5700 +[2025-09-09 15:43:25] [Rank 0] Group 7 FTA: 0.5400 +[2025-09-09 15:43:25] [Rank 0] Group 7 FTA: 0.5400 +[2025-09-09 15:43:25] [Rank 0] Group 8 FTA: 0.4800 +[2025-09-09 15:43:25] [Rank 0] Group 8 FTA: 0.4800 +[2025-09-09 15:43:25] [Rank 0] Group 9 FTA: 0.3600 +[2025-09-09 15:43:25] [Rank 0] Group 9 FTA: 0.3600 +[2025-09-09 15:43:25] [Rank 0] Group 10 FTA: 0.2800 +[2025-09-09 15:43:25] [Rank 0] Group 10 FTA: 0.2800 +[2025-09-09 15:43:25] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-09 15:43:25] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-09 15:43:25] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-09 15:43:25] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-09 15:43:25] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-09 15:43:25] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-09 15:43:25] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-09 15:43:25] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-09 15:43:25] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-09 15:43:25] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-09 15:43:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:43:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:43:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:43:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:43:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:43:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:43:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:43:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:43:26] [Rank 0] step:501/10000 train_time:52782ms step_avg:105.35ms +[2025-09-09 15:43:26] [Rank 0] step:501/10000 train_time:52782ms step_avg:105.35ms +[2025-09-09 15:43:27] [Rank 0] step:521/10000 train_time:53596ms step_avg:102.87ms +[2025-09-09 15:43:27] [Rank 0] step:521/10000 train_time:53596ms step_avg:102.87ms +[2025-09-09 15:43:28] [Rank 0] step:541/10000 train_time:54407ms step_avg:100.57ms +[2025-09-09 15:43:28] [Rank 0] step:541/10000 train_time:54407ms step_avg:100.57ms +[2025-09-09 15:43:29] [Rank 0] step:561/10000 train_time:55217ms step_avg:98.43ms +[2025-09-09 15:43:29] [Rank 0] step:561/10000 train_time:55217ms step_avg:98.43ms +[2025-09-09 15:43:30] [Rank 0] step:581/10000 train_time:56027ms step_avg:96.43ms +[2025-09-09 15:43:30] [Rank 0] step:581/10000 train_time:56027ms step_avg:96.43ms +[2025-09-09 15:43:30] [Rank 0] step:601/10000 train_time:56839ms step_avg:94.57ms +[2025-09-09 15:43:30] [Rank 0] step:601/10000 train_time:56839ms step_avg:94.57ms +[2025-09-09 15:43:31] [Rank 0] step:621/10000 train_time:57650ms step_avg:92.83ms +[2025-09-09 15:43:31] [Rank 0] step:621/10000 train_time:57650ms step_avg:92.83ms +[2025-09-09 15:43:32] [Rank 0] step:641/10000 train_time:58464ms step_avg:91.21ms +[2025-09-09 15:43:32] [Rank 0] step:641/10000 train_time:58464ms step_avg:91.21ms +[2025-09-09 15:43:33] [Rank 0] step:661/10000 train_time:59277ms step_avg:89.68ms +[2025-09-09 15:43:33] [Rank 0] step:661/10000 train_time:59277ms step_avg:89.68ms +[2025-09-09 15:43:34] [Rank 0] step:681/10000 train_time:60088ms step_avg:88.24ms +[2025-09-09 15:43:34] [Rank 0] step:681/10000 train_time:60088ms step_avg:88.24ms +[2025-09-09 15:43:35] [Rank 0] step:701/10000 train_time:60901ms step_avg:86.88ms +[2025-09-09 15:43:35] [Rank 0] step:701/10000 train_time:60901ms step_avg:86.88ms +[2025-09-09 15:43:35] [Rank 0] step:721/10000 train_time:61712ms step_avg:85.59ms +[2025-09-09 15:43:35] [Rank 0] step:721/10000 train_time:61712ms step_avg:85.59ms +[2025-09-09 15:43:36] [Rank 0] step:741/10000 train_time:62523ms step_avg:84.38ms +[2025-09-09 15:43:36] [Rank 0] step:741/10000 train_time:62523ms step_avg:84.38ms +[2025-09-09 15:43:37] [Rank 0] step:761/10000 train_time:63340ms step_avg:83.23ms +[2025-09-09 15:43:37] [Rank 0] step:761/10000 train_time:63340ms step_avg:83.23ms +[2025-09-09 15:43:38] [Rank 0] step:781/10000 train_time:64156ms step_avg:82.15ms +[2025-09-09 15:43:38] [Rank 0] step:781/10000 train_time:64156ms step_avg:82.15ms +[2025-09-09 15:43:39] [Rank 0] step:801/10000 train_time:64974ms step_avg:81.12ms +[2025-09-09 15:43:39] [Rank 0] step:801/10000 train_time:64974ms step_avg:81.12ms +[2025-09-09 15:43:40] [Rank 0] step:821/10000 train_time:66475ms step_avg:80.97ms +[2025-09-09 15:43:40] [Rank 0] step:821/10000 train_time:66475ms step_avg:80.97ms +[2025-09-09 15:43:41] [Rank 0] step:841/10000 train_time:67290ms step_avg:80.01ms +[2025-09-09 15:43:41] [Rank 0] step:841/10000 train_time:67290ms step_avg:80.01ms +[2025-09-09 15:43:42] [Rank 0] step:861/10000 train_time:68106ms step_avg:79.10ms +[2025-09-09 15:43:42] [Rank 0] step:861/10000 train_time:68106ms step_avg:79.10ms +[2025-09-09 15:43:43] [Rank 0] step:881/10000 train_time:68921ms step_avg:78.23ms +[2025-09-09 15:43:43] [Rank 0] step:881/10000 train_time:68921ms step_avg:78.23ms +[2025-09-09 15:43:43] [Rank 0] step:901/10000 train_time:69737ms step_avg:77.40ms +[2025-09-09 15:43:43] [Rank 0] step:901/10000 train_time:69737ms step_avg:77.40ms +[2025-09-09 15:43:44] [Rank 0] step:921/10000 train_time:70552ms step_avg:76.60ms +[2025-09-09 15:43:44] [Rank 0] step:921/10000 train_time:70552ms step_avg:76.60ms +[2025-09-09 15:43:45] [Rank 0] step:941/10000 train_time:71368ms step_avg:75.84ms +[2025-09-09 15:43:45] [Rank 0] step:941/10000 train_time:71368ms step_avg:75.84ms +[2025-09-09 15:43:46] [Rank 0] step:961/10000 train_time:72184ms step_avg:75.11ms +[2025-09-09 15:43:46] [Rank 0] step:961/10000 train_time:72184ms step_avg:75.11ms +[2025-09-09 15:43:47] [Rank 0] step:981/10000 train_time:73002ms step_avg:74.42ms +[2025-09-09 15:43:47] [Rank 0] step:981/10000 train_time:73002ms step_avg:74.42ms +[2025-09-09 15:43:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:43:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:43:48] [Rank 0] PRINT: step:1000/10000 train_loss:0.9199 val_loss:0.8218 train_time:73818ms step_avg:73.82ms +[2025-09-09 15:43:48] [Rank 0] PRINT: step:1000/10000 train_loss:0.9199 val_loss:0.8218 train_time:73818ms step_avg:73.82ms +[2025-09-09 15:43:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:43:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:43:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:43:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:45:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:45:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:45:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:45:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:45:09] [Rank 0] Total Loss: 4.3642 +[2025-09-09 15:45:09] [Rank 0] Total Loss: 4.3642 +[2025-09-09 15:45:09] [Rank 0] Total FTA (Unweighted): 0.7156 +[2025-09-09 15:45:09] [Rank 0] Total FTA (Unweighted): 0.7156 +[2025-09-09 15:45:09] [Rank 0] Total FTA (Weighted): 0.7156 +[2025-09-09 15:45:09] [Rank 0] Total FTA (Weighted): 0.7156 +[2025-09-09 15:45:09] [Rank 0] Group 0 Loss: 4.1803 +[2025-09-09 15:45:09] [Rank 0] Group 0 Loss: 4.1803 +[2025-09-09 15:45:09] [Rank 0] Group 1 Loss: 4.0641 +[2025-09-09 15:45:09] [Rank 0] Group 1 Loss: 4.0641 +[2025-09-09 15:45:09] [Rank 0] Group 2 Loss: 3.8647 +[2025-09-09 15:45:09] [Rank 0] Group 2 Loss: 3.8647 +[2025-09-09 15:45:09] [Rank 0] Group 3 Loss: 4.2239 +[2025-09-09 15:45:09] [Rank 0] Group 3 Loss: 4.2239 +[2025-09-09 15:45:09] [Rank 0] Group 4 Loss: 4.0737 +[2025-09-09 15:45:09] [Rank 0] Group 4 Loss: 4.0737 +[2025-09-09 15:45:09] [Rank 0] Group 5 Loss: 4.1372 +[2025-09-09 15:45:09] [Rank 0] Group 5 Loss: 4.1372 +[2025-09-09 15:45:09] [Rank 0] Group 6 Loss: 4.1285 +[2025-09-09 15:45:09] [Rank 0] Group 6 Loss: 4.1285 +[2025-09-09 15:45:09] [Rank 0] Group 7 Loss: 4.1538 +[2025-09-09 15:45:09] [Rank 0] Group 7 Loss: 4.1538 +[2025-09-09 15:45:09] [Rank 0] Group 8 Loss: 4.3040 +[2025-09-09 15:45:09] [Rank 0] Group 8 Loss: 4.3040 +[2025-09-09 15:45:09] [Rank 0] Group 9 Loss: 4.2733 +[2025-09-09 15:45:09] [Rank 0] Group 9 Loss: 4.2733 +[2025-09-09 15:45:09] [Rank 0] Group 10 Loss: 4.5313 +[2025-09-09 15:45:09] [Rank 0] Group 10 Loss: 4.5313 +[2025-09-09 15:45:09] [Rank 0] Group 11 Loss: 4.6070 +[2025-09-09 15:45:09] [Rank 0] Group 11 Loss: 4.6070 +[2025-09-09 15:45:09] [Rank 0] Group 12 Loss: 4.6695 +[2025-09-09 15:45:09] [Rank 0] Group 12 Loss: 4.6695 +[2025-09-09 15:45:09] [Rank 0] Group 13 Loss: 4.8031 +[2025-09-09 15:45:09] [Rank 0] Group 13 Loss: 4.8031 +[2025-09-09 15:45:09] [Rank 0] Group 14 Loss: 4.8457 +[2025-09-09 15:45:09] [Rank 0] Group 14 Loss: 4.8457 +[2025-09-09 15:45:09] [Rank 0] Group 15 Loss: 4.9671 +[2025-09-09 15:45:09] [Rank 0] Group 15 Loss: 4.9671 +[2025-09-09 15:45:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:45:09] [Rank 0] Group 7 FTA: 0.9600 +[2025-09-09 15:45:09] [Rank 0] Group 7 FTA: 0.9600 +[2025-09-09 15:45:09] [Rank 0] Group 8 FTA: 0.8500 +[2025-09-09 15:45:09] [Rank 0] Group 8 FTA: 0.8500 +[2025-09-09 15:45:09] [Rank 0] Group 9 FTA: 0.6800 +[2025-09-09 15:45:09] [Rank 0] Group 9 FTA: 0.6800 +[2025-09-09 15:45:09] [Rank 0] Group 10 FTA: 0.7100 +[2025-09-09 15:45:09] [Rank 0] Group 10 FTA: 0.7100 +[2025-09-09 15:45:09] [Rank 0] Group 11 FTA: 0.5200 +[2025-09-09 15:45:09] [Rank 0] Group 11 FTA: 0.5200 +[2025-09-09 15:45:09] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-09 15:45:09] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-09 15:45:09] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-09 15:45:09] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-09 15:45:09] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-09 15:45:09] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-09 15:45:09] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-09 15:45:09] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-09 15:45:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:45:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:45:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:45:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:45:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:45:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:45:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:45:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:45:10] [Rank 0] step:1001/10000 train_time:73835ms step_avg:73.76ms +[2025-09-09 15:45:10] [Rank 0] step:1001/10000 train_time:73835ms step_avg:73.76ms +[2025-09-09 15:45:11] [Rank 0] step:1021/10000 train_time:74661ms step_avg:73.13ms +[2025-09-09 15:45:11] [Rank 0] step:1021/10000 train_time:74661ms step_avg:73.13ms +[2025-09-09 15:45:12] [Rank 0] step:1041/10000 train_time:75478ms step_avg:72.50ms +[2025-09-09 15:45:12] [Rank 0] step:1041/10000 train_time:75478ms step_avg:72.50ms +[2025-09-09 15:45:13] [Rank 0] step:1061/10000 train_time:76291ms step_avg:71.91ms +[2025-09-09 15:45:13] [Rank 0] step:1061/10000 train_time:76291ms step_avg:71.91ms +[2025-09-09 15:45:14] [Rank 0] step:1081/10000 train_time:77107ms step_avg:71.33ms +[2025-09-09 15:45:14] [Rank 0] step:1081/10000 train_time:77107ms step_avg:71.33ms +[2025-09-09 15:45:14] [Rank 0] step:1101/10000 train_time:77922ms step_avg:70.77ms +[2025-09-09 15:45:14] [Rank 0] step:1101/10000 train_time:77922ms step_avg:70.77ms +[2025-09-09 15:45:15] [Rank 0] step:1121/10000 train_time:78738ms step_avg:70.24ms +[2025-09-09 15:45:15] [Rank 0] step:1121/10000 train_time:78738ms step_avg:70.24ms +[2025-09-09 15:45:16] [Rank 0] step:1141/10000 train_time:79553ms step_avg:69.72ms +[2025-09-09 15:45:16] [Rank 0] step:1141/10000 train_time:79553ms step_avg:69.72ms +[2025-09-09 15:45:17] [Rank 0] step:1161/10000 train_time:80369ms step_avg:69.22ms +[2025-09-09 15:45:17] [Rank 0] step:1161/10000 train_time:80369ms step_avg:69.22ms +[2025-09-09 15:45:18] [Rank 0] step:1181/10000 train_time:81185ms step_avg:68.74ms +[2025-09-09 15:45:18] [Rank 0] step:1181/10000 train_time:81185ms step_avg:68.74ms +[2025-09-09 15:45:18] [Rank 0] step:1201/10000 train_time:82002ms step_avg:68.28ms +[2025-09-09 15:45:18] [Rank 0] step:1201/10000 train_time:82002ms step_avg:68.28ms +[2025-09-09 15:45:19] [Rank 0] step:1221/10000 train_time:82817ms step_avg:67.83ms +[2025-09-09 15:45:19] [Rank 0] step:1221/10000 train_time:82817ms step_avg:67.83ms +[2025-09-09 15:45:20] [Rank 0] step:1241/10000 train_time:83632ms step_avg:67.39ms +[2025-09-09 15:45:20] [Rank 0] step:1241/10000 train_time:83632ms step_avg:67.39ms +[2025-09-09 15:45:21] [Rank 0] step:1261/10000 train_time:84447ms step_avg:66.97ms +[2025-09-09 15:45:21] [Rank 0] step:1261/10000 train_time:84447ms step_avg:66.97ms +[2025-09-09 15:45:22] [Rank 0] step:1281/10000 train_time:85264ms step_avg:66.56ms +[2025-09-09 15:45:22] [Rank 0] step:1281/10000 train_time:85264ms step_avg:66.56ms +[2025-09-09 15:45:23] [Rank 0] step:1301/10000 train_time:86080ms step_avg:66.16ms +[2025-09-09 15:45:23] [Rank 0] step:1301/10000 train_time:86080ms step_avg:66.16ms +[2025-09-09 15:45:23] [Rank 0] step:1321/10000 train_time:86895ms step_avg:65.78ms +[2025-09-09 15:45:23] [Rank 0] step:1321/10000 train_time:86895ms step_avg:65.78ms +[2025-09-09 15:45:24] [Rank 0] step:1341/10000 train_time:87712ms step_avg:65.41ms +[2025-09-09 15:45:24] [Rank 0] step:1341/10000 train_time:87712ms step_avg:65.41ms +[2025-09-09 15:45:25] [Rank 0] step:1361/10000 train_time:88527ms step_avg:65.05ms +[2025-09-09 15:45:25] [Rank 0] step:1361/10000 train_time:88527ms step_avg:65.05ms +[2025-09-09 15:45:26] [Rank 0] step:1381/10000 train_time:89342ms step_avg:64.69ms +[2025-09-09 15:45:26] [Rank 0] step:1381/10000 train_time:89342ms step_avg:64.69ms +[2025-09-09 15:45:27] [Rank 0] step:1401/10000 train_time:90157ms step_avg:64.35ms +[2025-09-09 15:45:27] [Rank 0] step:1401/10000 train_time:90157ms step_avg:64.35ms +[2025-09-09 15:45:27] [Rank 0] step:1421/10000 train_time:90972ms step_avg:64.02ms +[2025-09-09 15:45:27] [Rank 0] step:1421/10000 train_time:90972ms step_avg:64.02ms +[2025-09-09 15:45:28] [Rank 0] step:1441/10000 train_time:91787ms step_avg:63.70ms +[2025-09-09 15:45:28] [Rank 0] step:1441/10000 train_time:91787ms step_avg:63.70ms +[2025-09-09 15:45:29] [Rank 0] step:1461/10000 train_time:92601ms step_avg:63.38ms +[2025-09-09 15:45:29] [Rank 0] step:1461/10000 train_time:92601ms step_avg:63.38ms +[2025-09-09 15:45:30] [Rank 0] step:1481/10000 train_time:93416ms step_avg:63.08ms +[2025-09-09 15:45:30] [Rank 0] step:1481/10000 train_time:93416ms step_avg:63.08ms +[2025-09-09 15:45:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:45:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:45:31] [Rank 0] PRINT: step:1500/10000 train_loss:0.7909 val_loss:0.7461 train_time:94234ms step_avg:62.82ms +[2025-09-09 15:45:31] [Rank 0] PRINT: step:1500/10000 train_loss:0.7909 val_loss:0.7461 train_time:94234ms step_avg:62.82ms +[2025-09-09 15:45:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:45:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:45:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:45:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:46:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:46:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:46:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:46:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:46:53] [Rank 0] Total Loss: 4.5792 +[2025-09-09 15:46:53] [Rank 0] Total Loss: 4.5792 +[2025-09-09 15:46:53] [Rank 0] Total FTA (Unweighted): 0.8025 +[2025-09-09 15:46:53] [Rank 0] Total FTA (Unweighted): 0.8025 +[2025-09-09 15:46:53] [Rank 0] Total FTA (Weighted): 0.8025 +[2025-09-09 15:46:53] [Rank 0] Total FTA (Weighted): 0.8025 +[2025-09-09 15:46:53] [Rank 0] Group 0 Loss: 4.4732 +[2025-09-09 15:46:53] [Rank 0] Group 0 Loss: 4.4732 +[2025-09-09 15:46:53] [Rank 0] Group 1 Loss: 4.3676 +[2025-09-09 15:46:53] [Rank 0] Group 1 Loss: 4.3676 +[2025-09-09 15:46:53] [Rank 0] Group 2 Loss: 4.0474 +[2025-09-09 15:46:53] [Rank 0] Group 2 Loss: 4.0474 +[2025-09-09 15:46:53] [Rank 0] Group 3 Loss: 4.4399 +[2025-09-09 15:46:53] [Rank 0] Group 3 Loss: 4.4399 +[2025-09-09 15:46:53] [Rank 0] Group 4 Loss: 4.3708 +[2025-09-09 15:46:53] [Rank 0] Group 4 Loss: 4.3708 +[2025-09-09 15:46:53] [Rank 0] Group 5 Loss: 4.4190 +[2025-09-09 15:46:53] [Rank 0] Group 5 Loss: 4.4190 +[2025-09-09 15:46:53] [Rank 0] Group 6 Loss: 4.3806 +[2025-09-09 15:46:53] [Rank 0] Group 6 Loss: 4.3806 +[2025-09-09 15:46:53] [Rank 0] Group 7 Loss: 4.3989 +[2025-09-09 15:46:53] [Rank 0] Group 7 Loss: 4.3989 +[2025-09-09 15:46:53] [Rank 0] Group 8 Loss: 4.5866 +[2025-09-09 15:46:53] [Rank 0] Group 8 Loss: 4.5866 +[2025-09-09 15:46:53] [Rank 0] Group 9 Loss: 4.4708 +[2025-09-09 15:46:53] [Rank 0] Group 9 Loss: 4.4708 +[2025-09-09 15:46:53] [Rank 0] Group 10 Loss: 4.7399 +[2025-09-09 15:46:53] [Rank 0] Group 10 Loss: 4.7399 +[2025-09-09 15:46:53] [Rank 0] Group 11 Loss: 4.7338 +[2025-09-09 15:46:53] [Rank 0] Group 11 Loss: 4.7338 +[2025-09-09 15:46:53] [Rank 0] Group 12 Loss: 4.7554 +[2025-09-09 15:46:53] [Rank 0] Group 12 Loss: 4.7554 +[2025-09-09 15:46:53] [Rank 0] Group 13 Loss: 4.9701 +[2025-09-09 15:46:53] [Rank 0] Group 13 Loss: 4.9701 +[2025-09-09 15:46:53] [Rank 0] Group 14 Loss: 4.9943 +[2025-09-09 15:46:53] [Rank 0] Group 14 Loss: 4.9943 +[2025-09-09 15:46:53] [Rank 0] Group 15 Loss: 5.1191 +[2025-09-09 15:46:53] [Rank 0] Group 15 Loss: 5.1191 +[2025-09-09 15:46:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:46:53] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-09 15:46:53] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-09 15:46:53] [Rank 0] Group 9 FTA: 0.8700 +[2025-09-09 15:46:53] [Rank 0] Group 9 FTA: 0.8700 +[2025-09-09 15:46:53] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-09 15:46:53] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-09 15:46:53] [Rank 0] Group 11 FTA: 0.7600 +[2025-09-09 15:46:53] [Rank 0] Group 11 FTA: 0.7600 +[2025-09-09 15:46:53] [Rank 0] Group 12 FTA: 0.6700 +[2025-09-09 15:46:53] [Rank 0] Group 12 FTA: 0.6700 +[2025-09-09 15:46:53] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-09 15:46:53] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-09 15:46:53] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-09 15:46:53] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-09 15:46:53] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-09 15:46:53] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-09 15:46:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:46:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:46:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:46:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:46:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:46:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:46:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:46:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:46:54] [Rank 0] step:1501/10000 train_time:94250ms step_avg:62.79ms +[2025-09-09 15:46:54] [Rank 0] step:1501/10000 train_time:94250ms step_avg:62.79ms +[2025-09-09 15:46:55] [Rank 0] step:1521/10000 train_time:95065ms step_avg:62.50ms +[2025-09-09 15:46:55] [Rank 0] step:1521/10000 train_time:95065ms step_avg:62.50ms +[2025-09-09 15:46:56] [Rank 0] step:1541/10000 train_time:95882ms step_avg:62.22ms +[2025-09-09 15:46:56] [Rank 0] step:1541/10000 train_time:95882ms step_avg:62.22ms +[2025-09-09 15:46:57] [Rank 0] step:1561/10000 train_time:96699ms step_avg:61.95ms +[2025-09-09 15:46:57] [Rank 0] step:1561/10000 train_time:96699ms step_avg:61.95ms +[2025-09-09 15:46:58] [Rank 0] step:1581/10000 train_time:97518ms step_avg:61.68ms +[2025-09-09 15:46:58] [Rank 0] step:1581/10000 train_time:97518ms step_avg:61.68ms +[2025-09-09 15:46:59] [Rank 0] step:1601/10000 train_time:98764ms step_avg:61.69ms +[2025-09-09 15:46:59] [Rank 0] step:1601/10000 train_time:98764ms step_avg:61.69ms +[2025-09-09 15:47:00] [Rank 0] step:1621/10000 train_time:99620ms step_avg:61.46ms +[2025-09-09 15:47:00] [Rank 0] step:1621/10000 train_time:99620ms step_avg:61.46ms +[2025-09-09 15:47:01] [Rank 0] step:1641/10000 train_time:100711ms step_avg:61.37ms +[2025-09-09 15:47:01] [Rank 0] step:1641/10000 train_time:100711ms step_avg:61.37ms +[2025-09-09 15:47:02] [Rank 0] step:1661/10000 train_time:101528ms step_avg:61.12ms +[2025-09-09 15:47:02] [Rank 0] step:1661/10000 train_time:101528ms step_avg:61.12ms +[2025-09-09 15:47:03] [Rank 0] step:1681/10000 train_time:102345ms step_avg:60.88ms +[2025-09-09 15:47:03] [Rank 0] step:1681/10000 train_time:102345ms step_avg:60.88ms +[2025-09-09 15:47:03] [Rank 0] step:1701/10000 train_time:103161ms step_avg:60.65ms +[2025-09-09 15:47:03] [Rank 0] step:1701/10000 train_time:103161ms step_avg:60.65ms +[2025-09-09 15:47:04] [Rank 0] step:1721/10000 train_time:103977ms step_avg:60.42ms +[2025-09-09 15:47:04] [Rank 0] step:1721/10000 train_time:103977ms step_avg:60.42ms +[2025-09-09 15:47:05] [Rank 0] step:1741/10000 train_time:104793ms step_avg:60.19ms +[2025-09-09 15:47:05] [Rank 0] step:1741/10000 train_time:104793ms step_avg:60.19ms +[2025-09-09 15:47:06] [Rank 0] step:1761/10000 train_time:105609ms step_avg:59.97ms +[2025-09-09 15:47:06] [Rank 0] step:1761/10000 train_time:105609ms step_avg:59.97ms +[2025-09-09 15:47:07] [Rank 0] step:1781/10000 train_time:106424ms step_avg:59.76ms +[2025-09-09 15:47:07] [Rank 0] step:1781/10000 train_time:106424ms step_avg:59.76ms +[2025-09-09 15:47:07] [Rank 0] step:1801/10000 train_time:107240ms step_avg:59.54ms +[2025-09-09 15:47:07] [Rank 0] step:1801/10000 train_time:107240ms step_avg:59.54ms +[2025-09-09 15:47:08] [Rank 0] step:1821/10000 train_time:108056ms step_avg:59.34ms +[2025-09-09 15:47:08] [Rank 0] step:1821/10000 train_time:108056ms step_avg:59.34ms +[2025-09-09 15:47:09] [Rank 0] step:1841/10000 train_time:108872ms step_avg:59.14ms +[2025-09-09 15:47:09] [Rank 0] step:1841/10000 train_time:108872ms step_avg:59.14ms +[2025-09-09 15:47:10] [Rank 0] step:1861/10000 train_time:109687ms step_avg:58.94ms +[2025-09-09 15:47:10] [Rank 0] step:1861/10000 train_time:109687ms step_avg:58.94ms +[2025-09-09 15:47:11] [Rank 0] step:1881/10000 train_time:110503ms step_avg:58.75ms +[2025-09-09 15:47:11] [Rank 0] step:1881/10000 train_time:110503ms step_avg:58.75ms +[2025-09-09 15:47:12] [Rank 0] step:1901/10000 train_time:111318ms step_avg:58.56ms +[2025-09-09 15:47:12] [Rank 0] step:1901/10000 train_time:111318ms step_avg:58.56ms +[2025-09-09 15:47:12] [Rank 0] step:1921/10000 train_time:112132ms step_avg:58.37ms +[2025-09-09 15:47:12] [Rank 0] step:1921/10000 train_time:112132ms step_avg:58.37ms +[2025-09-09 15:47:13] [Rank 0] step:1941/10000 train_time:112948ms step_avg:58.19ms +[2025-09-09 15:47:13] [Rank 0] step:1941/10000 train_time:112948ms step_avg:58.19ms +[2025-09-09 15:47:14] [Rank 0] step:1961/10000 train_time:113764ms step_avg:58.01ms +[2025-09-09 15:47:14] [Rank 0] step:1961/10000 train_time:113764ms step_avg:58.01ms +[2025-09-09 15:47:15] [Rank 0] step:1981/10000 train_time:114578ms step_avg:57.84ms +[2025-09-09 15:47:15] [Rank 0] step:1981/10000 train_time:114578ms step_avg:57.84ms +[2025-09-09 15:47:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:47:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:47:16] [Rank 0] PRINT: step:2000/10000 train_loss:0.7387 val_loss:0.7066 train_time:115396ms step_avg:57.70ms +[2025-09-09 15:47:16] [Rank 0] PRINT: step:2000/10000 train_loss:0.7387 val_loss:0.7066 train_time:115396ms step_avg:57.70ms +[2025-09-09 15:47:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:47:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:47:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:47:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:48:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:48:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:48:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:48:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:48:38] [Rank 0] Total Loss: 4.6695 +[2025-09-09 15:48:38] [Rank 0] Total Loss: 4.6695 +[2025-09-09 15:48:38] [Rank 0] Total FTA (Unweighted): 0.8500 +[2025-09-09 15:48:38] [Rank 0] Total FTA (Unweighted): 0.8500 +[2025-09-09 15:48:38] [Rank 0] Total FTA (Weighted): 0.8500 +[2025-09-09 15:48:38] [Rank 0] Total FTA (Weighted): 0.8500 +[2025-09-09 15:48:38] [Rank 0] Group 0 Loss: 4.5839 +[2025-09-09 15:48:38] [Rank 0] Group 0 Loss: 4.5839 +[2025-09-09 15:48:38] [Rank 0] Group 1 Loss: 4.4421 +[2025-09-09 15:48:38] [Rank 0] Group 1 Loss: 4.4421 +[2025-09-09 15:48:38] [Rank 0] Group 2 Loss: 4.2551 +[2025-09-09 15:48:38] [Rank 0] Group 2 Loss: 4.2551 +[2025-09-09 15:48:38] [Rank 0] Group 3 Loss: 4.5483 +[2025-09-09 15:48:38] [Rank 0] Group 3 Loss: 4.5483 +[2025-09-09 15:48:38] [Rank 0] Group 4 Loss: 4.4675 +[2025-09-09 15:48:38] [Rank 0] Group 4 Loss: 4.4675 +[2025-09-09 15:48:38] [Rank 0] Group 5 Loss: 4.4716 +[2025-09-09 15:48:38] [Rank 0] Group 5 Loss: 4.4716 +[2025-09-09 15:48:38] [Rank 0] Group 6 Loss: 4.4735 +[2025-09-09 15:48:38] [Rank 0] Group 6 Loss: 4.4735 +[2025-09-09 15:48:38] [Rank 0] Group 7 Loss: 4.5196 +[2025-09-09 15:48:38] [Rank 0] Group 7 Loss: 4.5196 +[2025-09-09 15:48:38] [Rank 0] Group 8 Loss: 4.6897 +[2025-09-09 15:48:38] [Rank 0] Group 8 Loss: 4.6897 +[2025-09-09 15:48:38] [Rank 0] Group 9 Loss: 4.5706 +[2025-09-09 15:48:38] [Rank 0] Group 9 Loss: 4.5706 +[2025-09-09 15:48:38] [Rank 0] Group 10 Loss: 4.8664 +[2025-09-09 15:48:38] [Rank 0] Group 10 Loss: 4.8664 +[2025-09-09 15:48:38] [Rank 0] Group 11 Loss: 4.8080 +[2025-09-09 15:48:38] [Rank 0] Group 11 Loss: 4.8080 +[2025-09-09 15:48:38] [Rank 0] Group 12 Loss: 4.8052 +[2025-09-09 15:48:38] [Rank 0] Group 12 Loss: 4.8052 +[2025-09-09 15:48:38] [Rank 0] Group 13 Loss: 5.0137 +[2025-09-09 15:48:38] [Rank 0] Group 13 Loss: 5.0137 +[2025-09-09 15:48:38] [Rank 0] Group 14 Loss: 5.0260 +[2025-09-09 15:48:38] [Rank 0] Group 14 Loss: 5.0260 +[2025-09-09 15:48:38] [Rank 0] Group 15 Loss: 5.1709 +[2025-09-09 15:48:38] [Rank 0] Group 15 Loss: 5.1709 +[2025-09-09 15:48:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:48:38] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-09 15:48:38] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-09 15:48:38] [Rank 0] Group 10 FTA: 0.9600 +[2025-09-09 15:48:38] [Rank 0] Group 10 FTA: 0.9600 +[2025-09-09 15:48:38] [Rank 0] Group 11 FTA: 0.9100 +[2025-09-09 15:48:38] [Rank 0] Group 11 FTA: 0.9100 +[2025-09-09 15:48:38] [Rank 0] Group 12 FTA: 0.7900 +[2025-09-09 15:48:38] [Rank 0] Group 12 FTA: 0.7900 +[2025-09-09 15:48:38] [Rank 0] Group 13 FTA: 0.5100 +[2025-09-09 15:48:38] [Rank 0] Group 13 FTA: 0.5100 +[2025-09-09 15:48:38] [Rank 0] Group 14 FTA: 0.2800 +[2025-09-09 15:48:38] [Rank 0] Group 14 FTA: 0.2800 +[2025-09-09 15:48:38] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-09 15:48:38] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-09 15:48:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:48:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:48:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:48:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:48:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:48:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:48:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:48:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:48:39] [Rank 0] step:2001/10000 train_time:115412ms step_avg:57.68ms +[2025-09-09 15:48:39] [Rank 0] step:2001/10000 train_time:115412ms step_avg:57.68ms +[2025-09-09 15:48:40] [Rank 0] step:2021/10000 train_time:116497ms step_avg:57.64ms +[2025-09-09 15:48:40] [Rank 0] step:2021/10000 train_time:116497ms step_avg:57.64ms +[2025-09-09 15:48:41] [Rank 0] step:2041/10000 train_time:117312ms step_avg:57.48ms +[2025-09-09 15:48:41] [Rank 0] step:2041/10000 train_time:117312ms step_avg:57.48ms +[2025-09-09 15:48:42] [Rank 0] step:2061/10000 train_time:118128ms step_avg:57.32ms +[2025-09-09 15:48:42] [Rank 0] step:2061/10000 train_time:118128ms step_avg:57.32ms +[2025-09-09 15:48:43] [Rank 0] step:2081/10000 train_time:118944ms step_avg:57.16ms +[2025-09-09 15:48:43] [Rank 0] step:2081/10000 train_time:118944ms step_avg:57.16ms +[2025-09-09 15:48:44] [Rank 0] step:2101/10000 train_time:119762ms step_avg:57.00ms +[2025-09-09 15:48:44] [Rank 0] step:2101/10000 train_time:119762ms step_avg:57.00ms +[2025-09-09 15:48:44] [Rank 0] step:2121/10000 train_time:120578ms step_avg:56.85ms +[2025-09-09 15:48:44] [Rank 0] step:2121/10000 train_time:120578ms step_avg:56.85ms +[2025-09-09 15:48:45] [Rank 0] step:2141/10000 train_time:121394ms step_avg:56.70ms +[2025-09-09 15:48:45] [Rank 0] step:2141/10000 train_time:121394ms step_avg:56.70ms +[2025-09-09 15:48:46] [Rank 0] step:2161/10000 train_time:122210ms step_avg:56.55ms +[2025-09-09 15:48:46] [Rank 0] step:2161/10000 train_time:122210ms step_avg:56.55ms +[2025-09-09 15:48:47] [Rank 0] step:2181/10000 train_time:123026ms step_avg:56.41ms +[2025-09-09 15:48:47] [Rank 0] step:2181/10000 train_time:123026ms step_avg:56.41ms +[2025-09-09 15:48:48] [Rank 0] step:2201/10000 train_time:123841ms step_avg:56.27ms +[2025-09-09 15:48:48] [Rank 0] step:2201/10000 train_time:123841ms step_avg:56.27ms +[2025-09-09 15:48:48] [Rank 0] step:2221/10000 train_time:124658ms step_avg:56.13ms +[2025-09-09 15:48:48] [Rank 0] step:2221/10000 train_time:124658ms step_avg:56.13ms +[2025-09-09 15:48:49] [Rank 0] step:2241/10000 train_time:125480ms step_avg:55.99ms +[2025-09-09 15:48:49] [Rank 0] step:2241/10000 train_time:125480ms step_avg:55.99ms +[2025-09-09 15:48:50] [Rank 0] step:2261/10000 train_time:126303ms step_avg:55.86ms +[2025-09-09 15:48:50] [Rank 0] step:2261/10000 train_time:126303ms step_avg:55.86ms +[2025-09-09 15:48:51] [Rank 0] step:2281/10000 train_time:127126ms step_avg:55.73ms +[2025-09-09 15:48:51] [Rank 0] step:2281/10000 train_time:127126ms step_avg:55.73ms +[2025-09-09 15:48:52] [Rank 0] step:2301/10000 train_time:127949ms step_avg:55.61ms +[2025-09-09 15:48:52] [Rank 0] step:2301/10000 train_time:127949ms step_avg:55.61ms +[2025-09-09 15:48:53] [Rank 0] step:2321/10000 train_time:128772ms step_avg:55.48ms +[2025-09-09 15:48:53] [Rank 0] step:2321/10000 train_time:128772ms step_avg:55.48ms +[2025-09-09 15:48:53] [Rank 0] step:2341/10000 train_time:129593ms step_avg:55.36ms +[2025-09-09 15:48:53] [Rank 0] step:2341/10000 train_time:129593ms step_avg:55.36ms +[2025-09-09 15:48:54] [Rank 0] step:2361/10000 train_time:130414ms step_avg:55.24ms +[2025-09-09 15:48:54] [Rank 0] step:2361/10000 train_time:130414ms step_avg:55.24ms +[2025-09-09 15:48:55] [Rank 0] step:2381/10000 train_time:131237ms step_avg:55.12ms +[2025-09-09 15:48:55] [Rank 0] step:2381/10000 train_time:131237ms step_avg:55.12ms +[2025-09-09 15:48:56] [Rank 0] step:2401/10000 train_time:132059ms step_avg:55.00ms +[2025-09-09 15:48:56] [Rank 0] step:2401/10000 train_time:132059ms step_avg:55.00ms +[2025-09-09 15:48:57] [Rank 0] step:2421/10000 train_time:132879ms step_avg:54.89ms +[2025-09-09 15:48:57] [Rank 0] step:2421/10000 train_time:132879ms step_avg:54.89ms +[2025-09-09 15:48:57] [Rank 0] step:2441/10000 train_time:133700ms step_avg:54.77ms +[2025-09-09 15:48:57] [Rank 0] step:2441/10000 train_time:133700ms step_avg:54.77ms +[2025-09-09 15:48:58] [Rank 0] step:2461/10000 train_time:134522ms step_avg:54.66ms +[2025-09-09 15:48:58] [Rank 0] step:2461/10000 train_time:134522ms step_avg:54.66ms +[2025-09-09 15:48:59] [Rank 0] step:2481/10000 train_time:135343ms step_avg:54.55ms +[2025-09-09 15:48:59] [Rank 0] step:2481/10000 train_time:135343ms step_avg:54.55ms +[2025-09-09 15:49:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:49:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:49:00] [Rank 0] PRINT: step:2500/10000 train_loss:0.7082 val_loss:0.6815 train_time:136167ms step_avg:54.47ms +[2025-09-09 15:49:00] [Rank 0] PRINT: step:2500/10000 train_loss:0.7082 val_loss:0.6815 train_time:136167ms step_avg:54.47ms +[2025-09-09 15:49:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:49:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:49:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:49:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:50:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:50:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:50:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:50:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:50:22] [Rank 0] Total Loss: 4.8070 +[2025-09-09 15:50:22] [Rank 0] Total Loss: 4.8070 +[2025-09-09 15:50:22] [Rank 0] Total FTA (Unweighted): 0.8750 +[2025-09-09 15:50:22] [Rank 0] Total FTA (Unweighted): 0.8750 +[2025-09-09 15:50:22] [Rank 0] Total FTA (Weighted): 0.8750 +[2025-09-09 15:50:22] [Rank 0] Total FTA (Weighted): 0.8750 +[2025-09-09 15:50:22] [Rank 0] Group 0 Loss: 4.7851 +[2025-09-09 15:50:22] [Rank 0] Group 0 Loss: 4.7851 +[2025-09-09 15:50:22] [Rank 0] Group 1 Loss: 4.5513 +[2025-09-09 15:50:22] [Rank 0] Group 1 Loss: 4.5513 +[2025-09-09 15:50:22] [Rank 0] Group 2 Loss: 4.4191 +[2025-09-09 15:50:22] [Rank 0] Group 2 Loss: 4.4191 +[2025-09-09 15:50:22] [Rank 0] Group 3 Loss: 4.6808 +[2025-09-09 15:50:22] [Rank 0] Group 3 Loss: 4.6808 +[2025-09-09 15:50:22] [Rank 0] Group 4 Loss: 4.6221 +[2025-09-09 15:50:22] [Rank 0] Group 4 Loss: 4.6221 +[2025-09-09 15:50:22] [Rank 0] Group 5 Loss: 4.6883 +[2025-09-09 15:50:22] [Rank 0] Group 5 Loss: 4.6883 +[2025-09-09 15:50:22] [Rank 0] Group 6 Loss: 4.6799 +[2025-09-09 15:50:22] [Rank 0] Group 6 Loss: 4.6799 +[2025-09-09 15:50:22] [Rank 0] Group 7 Loss: 4.6964 +[2025-09-09 15:50:22] [Rank 0] Group 7 Loss: 4.6964 +[2025-09-09 15:50:22] [Rank 0] Group 8 Loss: 4.8390 +[2025-09-09 15:50:22] [Rank 0] Group 8 Loss: 4.8390 +[2025-09-09 15:50:22] [Rank 0] Group 9 Loss: 4.7476 +[2025-09-09 15:50:22] [Rank 0] Group 9 Loss: 4.7476 +[2025-09-09 15:50:22] [Rank 0] Group 10 Loss: 4.9517 +[2025-09-09 15:50:22] [Rank 0] Group 10 Loss: 4.9517 +[2025-09-09 15:50:22] [Rank 0] Group 11 Loss: 4.9660 +[2025-09-09 15:50:22] [Rank 0] Group 11 Loss: 4.9660 +[2025-09-09 15:50:22] [Rank 0] Group 12 Loss: 4.9157 +[2025-09-09 15:50:22] [Rank 0] Group 12 Loss: 4.9157 +[2025-09-09 15:50:22] [Rank 0] Group 13 Loss: 5.0741 +[2025-09-09 15:50:22] [Rank 0] Group 13 Loss: 5.0741 +[2025-09-09 15:50:22] [Rank 0] Group 14 Loss: 5.0996 +[2025-09-09 15:50:22] [Rank 0] Group 14 Loss: 5.0996 +[2025-09-09 15:50:22] [Rank 0] Group 15 Loss: 5.1952 +[2025-09-09 15:50:22] [Rank 0] Group 15 Loss: 5.1952 +[2025-09-09 15:50:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:50:22] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-09 15:50:22] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-09 15:50:22] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 15:50:22] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 15:50:22] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-09 15:50:22] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-09 15:50:22] [Rank 0] Group 12 FTA: 0.8600 +[2025-09-09 15:50:22] [Rank 0] Group 12 FTA: 0.8600 +[2025-09-09 15:50:22] [Rank 0] Group 13 FTA: 0.6800 +[2025-09-09 15:50:22] [Rank 0] Group 13 FTA: 0.6800 +[2025-09-09 15:50:22] [Rank 0] Group 14 FTA: 0.3200 +[2025-09-09 15:50:22] [Rank 0] Group 14 FTA: 0.3200 +[2025-09-09 15:50:22] [Rank 0] Group 15 FTA: 0.2100 +[2025-09-09 15:50:22] [Rank 0] Group 15 FTA: 0.2100 +[2025-09-09 15:50:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:50:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:50:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:50:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:50:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:50:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:50:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:50:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:50:23] [Rank 0] step:2501/10000 train_time:136183ms step_avg:54.45ms +[2025-09-09 15:50:23] [Rank 0] step:2501/10000 train_time:136183ms step_avg:54.45ms +[2025-09-09 15:50:24] [Rank 0] step:2521/10000 train_time:137026ms step_avg:54.35ms +[2025-09-09 15:50:24] [Rank 0] step:2521/10000 train_time:137026ms step_avg:54.35ms +[2025-09-09 15:50:25] [Rank 0] step:2541/10000 train_time:137845ms step_avg:54.25ms +[2025-09-09 15:50:25] [Rank 0] step:2541/10000 train_time:137845ms step_avg:54.25ms +[2025-09-09 15:50:26] [Rank 0] step:2561/10000 train_time:138669ms step_avg:54.15ms +[2025-09-09 15:50:26] [Rank 0] step:2561/10000 train_time:138669ms step_avg:54.15ms +[2025-09-09 15:50:27] [Rank 0] step:2581/10000 train_time:139489ms step_avg:54.04ms +[2025-09-09 15:50:27] [Rank 0] step:2581/10000 train_time:139489ms step_avg:54.04ms +[2025-09-09 15:50:28] [Rank 0] step:2601/10000 train_time:140312ms step_avg:53.95ms +[2025-09-09 15:50:28] [Rank 0] step:2601/10000 train_time:140312ms step_avg:53.95ms +[2025-09-09 15:50:28] [Rank 0] step:2621/10000 train_time:141134ms step_avg:53.85ms +[2025-09-09 15:50:28] [Rank 0] step:2621/10000 train_time:141134ms step_avg:53.85ms +[2025-09-09 15:50:29] [Rank 0] step:2641/10000 train_time:141956ms step_avg:53.75ms +[2025-09-09 15:50:29] [Rank 0] step:2641/10000 train_time:141956ms step_avg:53.75ms +[2025-09-09 15:50:30] [Rank 0] step:2661/10000 train_time:142778ms step_avg:53.66ms +[2025-09-09 15:50:30] [Rank 0] step:2661/10000 train_time:142778ms step_avg:53.66ms +[2025-09-09 15:50:31] [Rank 0] step:2681/10000 train_time:143599ms step_avg:53.56ms +[2025-09-09 15:50:31] [Rank 0] step:2681/10000 train_time:143599ms step_avg:53.56ms +[2025-09-09 15:50:32] [Rank 0] step:2701/10000 train_time:144421ms step_avg:53.47ms +[2025-09-09 15:50:32] [Rank 0] step:2701/10000 train_time:144421ms step_avg:53.47ms +[2025-09-09 15:50:32] [Rank 0] step:2721/10000 train_time:145244ms step_avg:53.38ms +[2025-09-09 15:50:32] [Rank 0] step:2721/10000 train_time:145244ms step_avg:53.38ms +[2025-09-09 15:50:33] [Rank 0] step:2741/10000 train_time:146067ms step_avg:53.29ms +[2025-09-09 15:50:33] [Rank 0] step:2741/10000 train_time:146067ms step_avg:53.29ms +[2025-09-09 15:50:34] [Rank 0] step:2761/10000 train_time:146890ms step_avg:53.20ms +[2025-09-09 15:50:34] [Rank 0] step:2761/10000 train_time:146890ms step_avg:53.20ms +[2025-09-09 15:50:35] [Rank 0] step:2781/10000 train_time:147715ms step_avg:53.12ms +[2025-09-09 15:50:35] [Rank 0] step:2781/10000 train_time:147715ms step_avg:53.12ms +[2025-09-09 15:50:36] [Rank 0] step:2801/10000 train_time:148535ms step_avg:53.03ms +[2025-09-09 15:50:36] [Rank 0] step:2801/10000 train_time:148535ms step_avg:53.03ms +[2025-09-09 15:50:37] [Rank 0] step:2821/10000 train_time:149624ms step_avg:53.04ms +[2025-09-09 15:50:37] [Rank 0] step:2821/10000 train_time:149624ms step_avg:53.04ms +[2025-09-09 15:50:38] [Rank 0] step:2841/10000 train_time:150446ms step_avg:52.96ms +[2025-09-09 15:50:38] [Rank 0] step:2841/10000 train_time:150446ms step_avg:52.96ms +[2025-09-09 15:50:38] [Rank 0] step:2861/10000 train_time:151271ms step_avg:52.87ms +[2025-09-09 15:50:38] [Rank 0] step:2861/10000 train_time:151271ms step_avg:52.87ms +[2025-09-09 15:50:39] [Rank 0] step:2881/10000 train_time:152091ms step_avg:52.79ms +[2025-09-09 15:50:39] [Rank 0] step:2881/10000 train_time:152091ms step_avg:52.79ms +[2025-09-09 15:50:40] [Rank 0] step:2901/10000 train_time:152913ms step_avg:52.71ms +[2025-09-09 15:50:40] [Rank 0] step:2901/10000 train_time:152913ms step_avg:52.71ms +[2025-09-09 15:50:41] [Rank 0] step:2921/10000 train_time:153735ms step_avg:52.63ms +[2025-09-09 15:50:41] [Rank 0] step:2921/10000 train_time:153735ms step_avg:52.63ms +[2025-09-09 15:50:42] [Rank 0] step:2941/10000 train_time:154557ms step_avg:52.55ms +[2025-09-09 15:50:42] [Rank 0] step:2941/10000 train_time:154557ms step_avg:52.55ms +[2025-09-09 15:50:43] [Rank 0] step:2961/10000 train_time:155380ms step_avg:52.48ms +[2025-09-09 15:50:43] [Rank 0] step:2961/10000 train_time:155380ms step_avg:52.48ms +[2025-09-09 15:50:43] [Rank 0] step:2981/10000 train_time:156203ms step_avg:52.40ms +[2025-09-09 15:50:43] [Rank 0] step:2981/10000 train_time:156203ms step_avg:52.40ms +[2025-09-09 15:50:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:50:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:50:45] [Rank 0] PRINT: step:3000/10000 train_loss:0.6869 val_loss:0.6653 train_time:157029ms step_avg:52.34ms +[2025-09-09 15:50:45] [Rank 0] PRINT: step:3000/10000 train_loss:0.6869 val_loss:0.6653 train_time:157029ms step_avg:52.34ms +[2025-09-09 15:50:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:50:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:50:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:50:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:52:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:52:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:52:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:52:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:52:06] [Rank 0] Total Loss: 4.7424 +[2025-09-09 15:52:06] [Rank 0] Total Loss: 4.7424 +[2025-09-09 15:52:06] [Rank 0] Total FTA (Unweighted): 0.9019 +[2025-09-09 15:52:06] [Rank 0] Total FTA (Unweighted): 0.9019 +[2025-09-09 15:52:06] [Rank 0] Total FTA (Weighted): 0.9019 +[2025-09-09 15:52:06] [Rank 0] Total FTA (Weighted): 0.9019 +[2025-09-09 15:52:06] [Rank 0] Group 0 Loss: 4.6279 +[2025-09-09 15:52:06] [Rank 0] Group 0 Loss: 4.6279 +[2025-09-09 15:52:06] [Rank 0] Group 1 Loss: 4.4547 +[2025-09-09 15:52:06] [Rank 0] Group 1 Loss: 4.4547 +[2025-09-09 15:52:06] [Rank 0] Group 2 Loss: 4.3004 +[2025-09-09 15:52:06] [Rank 0] Group 2 Loss: 4.3004 +[2025-09-09 15:52:06] [Rank 0] Group 3 Loss: 4.6447 +[2025-09-09 15:52:06] [Rank 0] Group 3 Loss: 4.6447 +[2025-09-09 15:52:06] [Rank 0] Group 4 Loss: 4.6484 +[2025-09-09 15:52:06] [Rank 0] Group 4 Loss: 4.6484 +[2025-09-09 15:52:06] [Rank 0] Group 5 Loss: 4.6115 +[2025-09-09 15:52:06] [Rank 0] Group 5 Loss: 4.6115 +[2025-09-09 15:52:06] [Rank 0] Group 6 Loss: 4.5779 +[2025-09-09 15:52:06] [Rank 0] Group 6 Loss: 4.5779 +[2025-09-09 15:52:06] [Rank 0] Group 7 Loss: 4.6898 +[2025-09-09 15:52:06] [Rank 0] Group 7 Loss: 4.6898 +[2025-09-09 15:52:06] [Rank 0] Group 8 Loss: 4.7647 +[2025-09-09 15:52:06] [Rank 0] Group 8 Loss: 4.7647 +[2025-09-09 15:52:06] [Rank 0] Group 9 Loss: 4.7277 +[2025-09-09 15:52:06] [Rank 0] Group 9 Loss: 4.7277 +[2025-09-09 15:52:06] [Rank 0] Group 10 Loss: 4.9123 +[2025-09-09 15:52:06] [Rank 0] Group 10 Loss: 4.9123 +[2025-09-09 15:52:06] [Rank 0] Group 11 Loss: 4.8714 +[2025-09-09 15:52:06] [Rank 0] Group 11 Loss: 4.8714 +[2025-09-09 15:52:06] [Rank 0] Group 12 Loss: 4.8999 +[2025-09-09 15:52:06] [Rank 0] Group 12 Loss: 4.8999 +[2025-09-09 15:52:06] [Rank 0] Group 13 Loss: 5.0048 +[2025-09-09 15:52:06] [Rank 0] Group 13 Loss: 5.0048 +[2025-09-09 15:52:06] [Rank 0] Group 14 Loss: 5.0127 +[2025-09-09 15:52:06] [Rank 0] Group 14 Loss: 5.0127 +[2025-09-09 15:52:06] [Rank 0] Group 15 Loss: 5.1296 +[2025-09-09 15:52:06] [Rank 0] Group 15 Loss: 5.1296 +[2025-09-09 15:52:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:52:06] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 15:52:06] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 15:52:06] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 15:52:06] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 15:52:06] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 15:52:06] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 15:52:06] [Rank 0] Group 13 FTA: 0.8400 +[2025-09-09 15:52:06] [Rank 0] Group 13 FTA: 0.8400 +[2025-09-09 15:52:06] [Rank 0] Group 14 FTA: 0.4100 +[2025-09-09 15:52:06] [Rank 0] Group 14 FTA: 0.4100 +[2025-09-09 15:52:06] [Rank 0] Group 15 FTA: 0.2200 +[2025-09-09 15:52:06] [Rank 0] Group 15 FTA: 0.2200 +[2025-09-09 15:52:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:52:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:52:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:52:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:52:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:52:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:52:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:52:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:52:08] [Rank 0] step:3001/10000 train_time:157045ms step_avg:52.33ms +[2025-09-09 15:52:08] [Rank 0] step:3001/10000 train_time:157045ms step_avg:52.33ms +[2025-09-09 15:52:09] [Rank 0] step:3021/10000 train_time:157884ms step_avg:52.26ms +[2025-09-09 15:52:09] [Rank 0] step:3021/10000 train_time:157884ms step_avg:52.26ms +[2025-09-09 15:52:09] [Rank 0] step:3041/10000 train_time:158706ms step_avg:52.19ms +[2025-09-09 15:52:09] [Rank 0] step:3041/10000 train_time:158706ms step_avg:52.19ms +[2025-09-09 15:52:10] [Rank 0] step:3061/10000 train_time:159528ms step_avg:52.12ms +[2025-09-09 15:52:10] [Rank 0] step:3061/10000 train_time:159528ms step_avg:52.12ms +[2025-09-09 15:52:11] [Rank 0] step:3081/10000 train_time:160351ms step_avg:52.05ms +[2025-09-09 15:52:11] [Rank 0] step:3081/10000 train_time:160351ms step_avg:52.05ms +[2025-09-09 15:52:12] [Rank 0] step:3101/10000 train_time:161173ms step_avg:51.97ms +[2025-09-09 15:52:12] [Rank 0] step:3101/10000 train_time:161173ms step_avg:51.97ms +[2025-09-09 15:52:13] [Rank 0] step:3121/10000 train_time:161996ms step_avg:51.91ms +[2025-09-09 15:52:13] [Rank 0] step:3121/10000 train_time:161996ms step_avg:51.91ms +[2025-09-09 15:52:14] [Rank 0] step:3141/10000 train_time:162815ms step_avg:51.84ms +[2025-09-09 15:52:14] [Rank 0] step:3141/10000 train_time:162815ms step_avg:51.84ms +[2025-09-09 15:52:14] [Rank 0] step:3161/10000 train_time:163637ms step_avg:51.77ms +[2025-09-09 15:52:14] [Rank 0] step:3161/10000 train_time:163637ms step_avg:51.77ms +[2025-09-09 15:52:16] [Rank 0] step:3181/10000 train_time:164966ms step_avg:51.86ms +[2025-09-09 15:52:16] [Rank 0] step:3181/10000 train_time:164966ms step_avg:51.86ms +[2025-09-09 15:52:16] [Rank 0] step:3201/10000 train_time:165788ms step_avg:51.79ms +[2025-09-09 15:52:16] [Rank 0] step:3201/10000 train_time:165788ms step_avg:51.79ms +[2025-09-09 15:52:17] [Rank 0] step:3221/10000 train_time:166610ms step_avg:51.73ms +[2025-09-09 15:52:17] [Rank 0] step:3221/10000 train_time:166610ms step_avg:51.73ms +[2025-09-09 15:52:18] [Rank 0] step:3241/10000 train_time:167436ms step_avg:51.66ms +[2025-09-09 15:52:18] [Rank 0] step:3241/10000 train_time:167436ms step_avg:51.66ms +[2025-09-09 15:52:19] [Rank 0] step:3261/10000 train_time:168257ms step_avg:51.60ms +[2025-09-09 15:52:19] [Rank 0] step:3261/10000 train_time:168257ms step_avg:51.60ms +[2025-09-09 15:52:20] [Rank 0] step:3281/10000 train_time:169079ms step_avg:51.53ms +[2025-09-09 15:52:20] [Rank 0] step:3281/10000 train_time:169079ms step_avg:51.53ms +[2025-09-09 15:52:21] [Rank 0] step:3301/10000 train_time:169901ms step_avg:51.47ms +[2025-09-09 15:52:21] [Rank 0] step:3301/10000 train_time:169901ms step_avg:51.47ms +[2025-09-09 15:52:21] [Rank 0] step:3321/10000 train_time:170723ms step_avg:51.41ms +[2025-09-09 15:52:21] [Rank 0] step:3321/10000 train_time:170723ms step_avg:51.41ms +[2025-09-09 15:52:22] [Rank 0] step:3341/10000 train_time:171546ms step_avg:51.35ms +[2025-09-09 15:52:22] [Rank 0] step:3341/10000 train_time:171546ms step_avg:51.35ms +[2025-09-09 15:52:23] [Rank 0] step:3361/10000 train_time:172369ms step_avg:51.28ms +[2025-09-09 15:52:23] [Rank 0] step:3361/10000 train_time:172369ms step_avg:51.28ms +[2025-09-09 15:52:24] [Rank 0] step:3381/10000 train_time:173192ms step_avg:51.22ms +[2025-09-09 15:52:24] [Rank 0] step:3381/10000 train_time:173192ms step_avg:51.22ms +[2025-09-09 15:52:25] [Rank 0] step:3401/10000 train_time:174014ms step_avg:51.17ms +[2025-09-09 15:52:25] [Rank 0] step:3401/10000 train_time:174014ms step_avg:51.17ms +[2025-09-09 15:52:26] [Rank 0] step:3421/10000 train_time:174837ms step_avg:51.11ms +[2025-09-09 15:52:26] [Rank 0] step:3421/10000 train_time:174837ms step_avg:51.11ms +[2025-09-09 15:52:26] [Rank 0] step:3441/10000 train_time:175661ms step_avg:51.05ms +[2025-09-09 15:52:26] [Rank 0] step:3441/10000 train_time:175661ms step_avg:51.05ms +[2025-09-09 15:52:27] [Rank 0] step:3461/10000 train_time:176481ms step_avg:50.99ms +[2025-09-09 15:52:27] [Rank 0] step:3461/10000 train_time:176481ms step_avg:50.99ms +[2025-09-09 15:52:28] [Rank 0] step:3481/10000 train_time:177303ms step_avg:50.93ms +[2025-09-09 15:52:28] [Rank 0] step:3481/10000 train_time:177303ms step_avg:50.93ms +[2025-09-09 15:52:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:52:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:52:29] [Rank 0] PRINT: step:3500/10000 train_loss:0.6727 val_loss:0.6536 train_time:178128ms step_avg:50.89ms +[2025-09-09 15:52:29] [Rank 0] PRINT: step:3500/10000 train_loss:0.6727 val_loss:0.6536 train_time:178128ms step_avg:50.89ms +[2025-09-09 15:52:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:52:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:52:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:52:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:53:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:53:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:53:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:53:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:53:51] [Rank 0] Total Loss: 4.7945 +[2025-09-09 15:53:51] [Rank 0] Total Loss: 4.7945 +[2025-09-09 15:53:51] [Rank 0] Total FTA (Unweighted): 0.9163 +[2025-09-09 15:53:51] [Rank 0] Total FTA (Unweighted): 0.9163 +[2025-09-09 15:53:51] [Rank 0] Total FTA (Weighted): 0.9163 +[2025-09-09 15:53:51] [Rank 0] Total FTA (Weighted): 0.9163 +[2025-09-09 15:53:51] [Rank 0] Group 0 Loss: 4.7855 +[2025-09-09 15:53:51] [Rank 0] Group 0 Loss: 4.7855 +[2025-09-09 15:53:51] [Rank 0] Group 1 Loss: 4.4074 +[2025-09-09 15:53:51] [Rank 0] Group 1 Loss: 4.4074 +[2025-09-09 15:53:51] [Rank 0] Group 2 Loss: 4.3577 +[2025-09-09 15:53:51] [Rank 0] Group 2 Loss: 4.3577 +[2025-09-09 15:53:51] [Rank 0] Group 3 Loss: 4.6926 +[2025-09-09 15:53:51] [Rank 0] Group 3 Loss: 4.6926 +[2025-09-09 15:53:51] [Rank 0] Group 4 Loss: 4.6780 +[2025-09-09 15:53:51] [Rank 0] Group 4 Loss: 4.6780 +[2025-09-09 15:53:51] [Rank 0] Group 5 Loss: 4.6455 +[2025-09-09 15:53:51] [Rank 0] Group 5 Loss: 4.6455 +[2025-09-09 15:53:51] [Rank 0] Group 6 Loss: 4.6289 +[2025-09-09 15:53:51] [Rank 0] Group 6 Loss: 4.6289 +[2025-09-09 15:53:51] [Rank 0] Group 7 Loss: 4.7564 +[2025-09-09 15:53:51] [Rank 0] Group 7 Loss: 4.7564 +[2025-09-09 15:53:51] [Rank 0] Group 8 Loss: 4.8371 +[2025-09-09 15:53:51] [Rank 0] Group 8 Loss: 4.8371 +[2025-09-09 15:53:51] [Rank 0] Group 9 Loss: 4.8165 +[2025-09-09 15:53:51] [Rank 0] Group 9 Loss: 4.8165 +[2025-09-09 15:53:51] [Rank 0] Group 10 Loss: 4.9637 +[2025-09-09 15:53:51] [Rank 0] Group 10 Loss: 4.9637 +[2025-09-09 15:53:51] [Rank 0] Group 11 Loss: 4.9431 +[2025-09-09 15:53:51] [Rank 0] Group 11 Loss: 4.9431 +[2025-09-09 15:53:51] [Rank 0] Group 12 Loss: 4.9308 +[2025-09-09 15:53:51] [Rank 0] Group 12 Loss: 4.9308 +[2025-09-09 15:53:51] [Rank 0] Group 13 Loss: 5.0720 +[2025-09-09 15:53:51] [Rank 0] Group 13 Loss: 5.0720 +[2025-09-09 15:53:51] [Rank 0] Group 14 Loss: 5.0550 +[2025-09-09 15:53:51] [Rank 0] Group 14 Loss: 5.0550 +[2025-09-09 15:53:51] [Rank 0] Group 15 Loss: 5.1420 +[2025-09-09 15:53:51] [Rank 0] Group 15 Loss: 5.1420 +[2025-09-09 15:53:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:53:51] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 15:53:51] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-09 15:53:51] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 15:53:51] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 15:53:51] [Rank 0] Group 13 FTA: 0.8600 +[2025-09-09 15:53:51] [Rank 0] Group 13 FTA: 0.8600 +[2025-09-09 15:53:51] [Rank 0] Group 14 FTA: 0.5800 +[2025-09-09 15:53:51] [Rank 0] Group 14 FTA: 0.5800 +[2025-09-09 15:53:51] [Rank 0] Group 15 FTA: 0.2400 +[2025-09-09 15:53:51] [Rank 0] Group 15 FTA: 0.2400 +[2025-09-09 15:53:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:53:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:53:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:53:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:53:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:53:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:53:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:53:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:53:52] [Rank 0] step:3501/10000 train_time:178146ms step_avg:50.88ms +[2025-09-09 15:53:52] [Rank 0] step:3501/10000 train_time:178146ms step_avg:50.88ms +[2025-09-09 15:53:53] [Rank 0] step:3521/10000 train_time:178984ms step_avg:50.83ms +[2025-09-09 15:53:53] [Rank 0] step:3521/10000 train_time:178984ms step_avg:50.83ms +[2025-09-09 15:53:54] [Rank 0] step:3541/10000 train_time:179807ms step_avg:50.78ms +[2025-09-09 15:53:54] [Rank 0] step:3541/10000 train_time:179807ms step_avg:50.78ms +[2025-09-09 15:53:55] [Rank 0] step:3561/10000 train_time:180630ms step_avg:50.72ms +[2025-09-09 15:53:55] [Rank 0] step:3561/10000 train_time:180630ms step_avg:50.72ms +[2025-09-09 15:53:56] [Rank 0] step:3581/10000 train_time:181453ms step_avg:50.67ms +[2025-09-09 15:53:56] [Rank 0] step:3581/10000 train_time:181453ms step_avg:50.67ms +[2025-09-09 15:53:56] [Rank 0] step:3601/10000 train_time:182277ms step_avg:50.62ms +[2025-09-09 15:53:56] [Rank 0] step:3601/10000 train_time:182277ms step_avg:50.62ms +[2025-09-09 15:53:57] [Rank 0] step:3621/10000 train_time:183099ms step_avg:50.57ms +[2025-09-09 15:53:57] [Rank 0] step:3621/10000 train_time:183099ms step_avg:50.57ms +[2025-09-09 15:53:58] [Rank 0] step:3641/10000 train_time:184189ms step_avg:50.59ms +[2025-09-09 15:53:58] [Rank 0] step:3641/10000 train_time:184189ms step_avg:50.59ms +[2025-09-09 15:53:59] [Rank 0] step:3661/10000 train_time:185011ms step_avg:50.54ms +[2025-09-09 15:53:59] [Rank 0] step:3661/10000 train_time:185011ms step_avg:50.54ms +[2025-09-09 15:54:00] [Rank 0] step:3681/10000 train_time:185834ms step_avg:50.48ms +[2025-09-09 15:54:00] [Rank 0] step:3681/10000 train_time:185834ms step_avg:50.48ms +[2025-09-09 15:54:01] [Rank 0] step:3701/10000 train_time:186657ms step_avg:50.43ms +[2025-09-09 15:54:01] [Rank 0] step:3701/10000 train_time:186657ms step_avg:50.43ms +[2025-09-09 15:54:02] [Rank 0] step:3721/10000 train_time:187479ms step_avg:50.38ms +[2025-09-09 15:54:02] [Rank 0] step:3721/10000 train_time:187479ms step_avg:50.38ms +[2025-09-09 15:54:02] [Rank 0] step:3741/10000 train_time:188302ms step_avg:50.33ms +[2025-09-09 15:54:02] [Rank 0] step:3741/10000 train_time:188302ms step_avg:50.33ms +[2025-09-09 15:54:03] [Rank 0] step:3761/10000 train_time:189125ms step_avg:50.29ms +[2025-09-09 15:54:03] [Rank 0] step:3761/10000 train_time:189125ms step_avg:50.29ms +[2025-09-09 15:54:04] [Rank 0] step:3781/10000 train_time:189952ms step_avg:50.24ms +[2025-09-09 15:54:04] [Rank 0] step:3781/10000 train_time:189952ms step_avg:50.24ms +[2025-09-09 15:54:05] [Rank 0] step:3801/10000 train_time:190772ms step_avg:50.19ms +[2025-09-09 15:54:05] [Rank 0] step:3801/10000 train_time:190772ms step_avg:50.19ms +[2025-09-09 15:54:06] [Rank 0] step:3821/10000 train_time:191595ms step_avg:50.14ms +[2025-09-09 15:54:06] [Rank 0] step:3821/10000 train_time:191595ms step_avg:50.14ms +[2025-09-09 15:54:07] [Rank 0] step:3841/10000 train_time:192418ms step_avg:50.10ms +[2025-09-09 15:54:07] [Rank 0] step:3841/10000 train_time:192418ms step_avg:50.10ms +[2025-09-09 15:54:07] [Rank 0] step:3861/10000 train_time:193240ms step_avg:50.05ms +[2025-09-09 15:54:07] [Rank 0] step:3861/10000 train_time:193240ms step_avg:50.05ms +[2025-09-09 15:54:08] [Rank 0] step:3881/10000 train_time:194063ms step_avg:50.00ms +[2025-09-09 15:54:08] [Rank 0] step:3881/10000 train_time:194063ms step_avg:50.00ms +[2025-09-09 15:54:09] [Rank 0] step:3901/10000 train_time:194885ms step_avg:49.96ms +[2025-09-09 15:54:09] [Rank 0] step:3901/10000 train_time:194885ms step_avg:49.96ms +[2025-09-09 15:54:10] [Rank 0] step:3921/10000 train_time:195706ms step_avg:49.91ms +[2025-09-09 15:54:10] [Rank 0] step:3921/10000 train_time:195706ms step_avg:49.91ms +[2025-09-09 15:54:11] [Rank 0] step:3941/10000 train_time:196530ms step_avg:49.87ms +[2025-09-09 15:54:11] [Rank 0] step:3941/10000 train_time:196530ms step_avg:49.87ms +[2025-09-09 15:54:11] [Rank 0] step:3961/10000 train_time:197351ms step_avg:49.82ms +[2025-09-09 15:54:11] [Rank 0] step:3961/10000 train_time:197351ms step_avg:49.82ms +[2025-09-09 15:54:12] [Rank 0] step:3981/10000 train_time:198173ms step_avg:49.78ms +[2025-09-09 15:54:12] [Rank 0] step:3981/10000 train_time:198173ms step_avg:49.78ms +[2025-09-09 15:54:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:54:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:54:14] [Rank 0] PRINT: step:4000/10000 train_loss:0.6620 val_loss:0.6437 train_time:198998ms step_avg:49.75ms +[2025-09-09 15:54:14] [Rank 0] PRINT: step:4000/10000 train_loss:0.6620 val_loss:0.6437 train_time:198998ms step_avg:49.75ms +[2025-09-09 15:54:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:54:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:54:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:54:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:55:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:55:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:55:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:55:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:55:36] [Rank 0] Total Loss: 4.9834 +[2025-09-09 15:55:36] [Rank 0] Total Loss: 4.9834 +[2025-09-09 15:55:36] [Rank 0] Total FTA (Unweighted): 0.9319 +[2025-09-09 15:55:36] [Rank 0] Total FTA (Unweighted): 0.9319 +[2025-09-09 15:55:36] [Rank 0] Total FTA (Weighted): 0.9319 +[2025-09-09 15:55:36] [Rank 0] Total FTA (Weighted): 0.9319 +[2025-09-09 15:55:36] [Rank 0] Group 0 Loss: 4.9726 +[2025-09-09 15:55:36] [Rank 0] Group 0 Loss: 4.9726 +[2025-09-09 15:55:36] [Rank 0] Group 1 Loss: 4.6836 +[2025-09-09 15:55:36] [Rank 0] Group 1 Loss: 4.6836 +[2025-09-09 15:55:36] [Rank 0] Group 2 Loss: 4.4638 +[2025-09-09 15:55:36] [Rank 0] Group 2 Loss: 4.4638 +[2025-09-09 15:55:36] [Rank 0] Group 3 Loss: 4.9147 +[2025-09-09 15:55:36] [Rank 0] Group 3 Loss: 4.9147 +[2025-09-09 15:55:36] [Rank 0] Group 4 Loss: 4.8518 +[2025-09-09 15:55:36] [Rank 0] Group 4 Loss: 4.8518 +[2025-09-09 15:55:36] [Rank 0] Group 5 Loss: 4.8346 +[2025-09-09 15:55:36] [Rank 0] Group 5 Loss: 4.8346 +[2025-09-09 15:55:36] [Rank 0] Group 6 Loss: 4.8195 +[2025-09-09 15:55:36] [Rank 0] Group 6 Loss: 4.8195 +[2025-09-09 15:55:36] [Rank 0] Group 7 Loss: 4.9553 +[2025-09-09 15:55:36] [Rank 0] Group 7 Loss: 4.9553 +[2025-09-09 15:55:36] [Rank 0] Group 8 Loss: 5.0076 +[2025-09-09 15:55:36] [Rank 0] Group 8 Loss: 5.0076 +[2025-09-09 15:55:36] [Rank 0] Group 9 Loss: 5.0152 +[2025-09-09 15:55:36] [Rank 0] Group 9 Loss: 5.0152 +[2025-09-09 15:55:36] [Rank 0] Group 10 Loss: 5.1624 +[2025-09-09 15:55:36] [Rank 0] Group 10 Loss: 5.1624 +[2025-09-09 15:55:36] [Rank 0] Group 11 Loss: 5.1392 +[2025-09-09 15:55:36] [Rank 0] Group 11 Loss: 5.1392 +[2025-09-09 15:55:36] [Rank 0] Group 12 Loss: 5.1081 +[2025-09-09 15:55:36] [Rank 0] Group 12 Loss: 5.1081 +[2025-09-09 15:55:36] [Rank 0] Group 13 Loss: 5.2486 +[2025-09-09 15:55:36] [Rank 0] Group 13 Loss: 5.2486 +[2025-09-09 15:55:36] [Rank 0] Group 14 Loss: 5.2259 +[2025-09-09 15:55:36] [Rank 0] Group 14 Loss: 5.2259 +[2025-09-09 15:55:36] [Rank 0] Group 15 Loss: 5.3319 +[2025-09-09 15:55:36] [Rank 0] Group 15 Loss: 5.3319 +[2025-09-09 15:55:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:55:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:55:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:55:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:55:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:55:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:55:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:55:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:55:36] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:55:36] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:55:37] [Rank 0] Group 12 FTA: 0.9700 +[2025-09-09 15:55:37] [Rank 0] Group 12 FTA: 0.9700 +[2025-09-09 15:55:37] [Rank 0] Group 13 FTA: 0.8800 +[2025-09-09 15:55:37] [Rank 0] Group 13 FTA: 0.8800 +[2025-09-09 15:55:37] [Rank 0] Group 14 FTA: 0.6000 +[2025-09-09 15:55:37] [Rank 0] Group 14 FTA: 0.6000 +[2025-09-09 15:55:37] [Rank 0] Group 15 FTA: 0.4600 +[2025-09-09 15:55:37] [Rank 0] Group 15 FTA: 0.4600 +[2025-09-09 15:55:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:55:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:55:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:55:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:55:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:55:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:55:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:55:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:55:38] [Rank 0] step:4001/10000 train_time:199016ms step_avg:49.74ms +[2025-09-09 15:55:38] [Rank 0] step:4001/10000 train_time:199016ms step_avg:49.74ms +[2025-09-09 15:55:40] [Rank 0] step:4021/10000 train_time:200539ms step_avg:49.87ms +[2025-09-09 15:55:40] [Rank 0] step:4021/10000 train_time:200539ms step_avg:49.87ms +[2025-09-09 15:55:40] [Rank 0] step:4041/10000 train_time:201363ms step_avg:49.83ms +[2025-09-09 15:55:40] [Rank 0] step:4041/10000 train_time:201363ms step_avg:49.83ms +[2025-09-09 15:55:41] [Rank 0] step:4061/10000 train_time:202183ms step_avg:49.79ms +[2025-09-09 15:55:41] [Rank 0] step:4061/10000 train_time:202183ms step_avg:49.79ms +[2025-09-09 15:55:42] [Rank 0] step:4081/10000 train_time:203004ms step_avg:49.74ms +[2025-09-09 15:55:42] [Rank 0] step:4081/10000 train_time:203004ms step_avg:49.74ms +[2025-09-09 15:55:43] [Rank 0] step:4101/10000 train_time:203826ms step_avg:49.70ms +[2025-09-09 15:55:43] [Rank 0] step:4101/10000 train_time:203826ms step_avg:49.70ms +[2025-09-09 15:55:44] [Rank 0] step:4121/10000 train_time:204648ms step_avg:49.66ms +[2025-09-09 15:55:44] [Rank 0] step:4121/10000 train_time:204648ms step_avg:49.66ms +[2025-09-09 15:55:44] [Rank 0] step:4141/10000 train_time:205469ms step_avg:49.62ms +[2025-09-09 15:55:44] [Rank 0] step:4141/10000 train_time:205469ms step_avg:49.62ms +[2025-09-09 15:55:45] [Rank 0] step:4161/10000 train_time:206291ms step_avg:49.58ms +[2025-09-09 15:55:45] [Rank 0] step:4161/10000 train_time:206291ms step_avg:49.58ms +[2025-09-09 15:55:46] [Rank 0] step:4181/10000 train_time:207112ms step_avg:49.54ms +[2025-09-09 15:55:46] [Rank 0] step:4181/10000 train_time:207112ms step_avg:49.54ms +[2025-09-09 15:55:47] [Rank 0] step:4201/10000 train_time:207934ms step_avg:49.50ms +[2025-09-09 15:55:47] [Rank 0] step:4201/10000 train_time:207934ms step_avg:49.50ms +[2025-09-09 15:55:48] [Rank 0] step:4221/10000 train_time:208755ms step_avg:49.46ms +[2025-09-09 15:55:48] [Rank 0] step:4221/10000 train_time:208755ms step_avg:49.46ms +[2025-09-09 15:55:49] [Rank 0] step:4241/10000 train_time:209577ms step_avg:49.42ms +[2025-09-09 15:55:49] [Rank 0] step:4241/10000 train_time:209577ms step_avg:49.42ms +[2025-09-09 15:55:49] [Rank 0] step:4261/10000 train_time:210399ms step_avg:49.38ms +[2025-09-09 15:55:49] [Rank 0] step:4261/10000 train_time:210399ms step_avg:49.38ms +[2025-09-09 15:55:50] [Rank 0] step:4281/10000 train_time:211221ms step_avg:49.34ms +[2025-09-09 15:55:50] [Rank 0] step:4281/10000 train_time:211221ms step_avg:49.34ms +[2025-09-09 15:55:51] [Rank 0] step:4301/10000 train_time:212046ms step_avg:49.30ms +[2025-09-09 15:55:51] [Rank 0] step:4301/10000 train_time:212046ms step_avg:49.30ms +[2025-09-09 15:55:52] [Rank 0] step:4321/10000 train_time:212865ms step_avg:49.26ms +[2025-09-09 15:55:52] [Rank 0] step:4321/10000 train_time:212865ms step_avg:49.26ms +[2025-09-09 15:55:53] [Rank 0] step:4341/10000 train_time:213686ms step_avg:49.23ms +[2025-09-09 15:55:53] [Rank 0] step:4341/10000 train_time:213686ms step_avg:49.23ms +[2025-09-09 15:55:53] [Rank 0] step:4361/10000 train_time:214508ms step_avg:49.19ms +[2025-09-09 15:55:53] [Rank 0] step:4361/10000 train_time:214508ms step_avg:49.19ms +[2025-09-09 15:55:54] [Rank 0] step:4381/10000 train_time:215329ms step_avg:49.15ms +[2025-09-09 15:55:54] [Rank 0] step:4381/10000 train_time:215329ms step_avg:49.15ms +[2025-09-09 15:55:55] [Rank 0] step:4401/10000 train_time:216150ms step_avg:49.11ms +[2025-09-09 15:55:55] [Rank 0] step:4401/10000 train_time:216150ms step_avg:49.11ms +[2025-09-09 15:55:56] [Rank 0] step:4421/10000 train_time:216971ms step_avg:49.08ms +[2025-09-09 15:55:56] [Rank 0] step:4421/10000 train_time:216971ms step_avg:49.08ms +[2025-09-09 15:55:57] [Rank 0] step:4441/10000 train_time:217792ms step_avg:49.04ms +[2025-09-09 15:55:57] [Rank 0] step:4441/10000 train_time:217792ms step_avg:49.04ms +[2025-09-09 15:55:58] [Rank 0] step:4461/10000 train_time:218613ms step_avg:49.01ms +[2025-09-09 15:55:58] [Rank 0] step:4461/10000 train_time:218613ms step_avg:49.01ms +[2025-09-09 15:55:58] [Rank 0] step:4481/10000 train_time:219436ms step_avg:48.97ms +[2025-09-09 15:55:58] [Rank 0] step:4481/10000 train_time:219436ms step_avg:48.97ms +[2025-09-09 15:55:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:55:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:56:00] [Rank 0] PRINT: step:4500/10000 train_loss:0.6528 val_loss:0.6358 train_time:220259ms step_avg:48.95ms +[2025-09-09 15:56:00] [Rank 0] PRINT: step:4500/10000 train_loss:0.6528 val_loss:0.6358 train_time:220259ms step_avg:48.95ms +[2025-09-09 15:56:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:56:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:56:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:56:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:57:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:57:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:57:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:57:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:57:21] [Rank 0] Total Loss: 4.9547 +[2025-09-09 15:57:21] [Rank 0] Total Loss: 4.9547 +[2025-09-09 15:57:21] [Rank 0] Total FTA (Unweighted): 0.9431 +[2025-09-09 15:57:21] [Rank 0] Total FTA (Unweighted): 0.9431 +[2025-09-09 15:57:21] [Rank 0] Total FTA (Weighted): 0.9431 +[2025-09-09 15:57:21] [Rank 0] Total FTA (Weighted): 0.9431 +[2025-09-09 15:57:21] [Rank 0] Group 0 Loss: 4.7763 +[2025-09-09 15:57:21] [Rank 0] Group 0 Loss: 4.7763 +[2025-09-09 15:57:21] [Rank 0] Group 1 Loss: 4.5824 +[2025-09-09 15:57:21] [Rank 0] Group 1 Loss: 4.5824 +[2025-09-09 15:57:21] [Rank 0] Group 2 Loss: 4.4802 +[2025-09-09 15:57:21] [Rank 0] Group 2 Loss: 4.4802 +[2025-09-09 15:57:21] [Rank 0] Group 3 Loss: 4.8814 +[2025-09-09 15:57:21] [Rank 0] Group 3 Loss: 4.8814 +[2025-09-09 15:57:21] [Rank 0] Group 4 Loss: 4.8410 +[2025-09-09 15:57:21] [Rank 0] Group 4 Loss: 4.8410 +[2025-09-09 15:57:21] [Rank 0] Group 5 Loss: 4.8127 +[2025-09-09 15:57:21] [Rank 0] Group 5 Loss: 4.8127 +[2025-09-09 15:57:21] [Rank 0] Group 6 Loss: 4.8328 +[2025-09-09 15:57:21] [Rank 0] Group 6 Loss: 4.8328 +[2025-09-09 15:57:21] [Rank 0] Group 7 Loss: 4.9452 +[2025-09-09 15:57:21] [Rank 0] Group 7 Loss: 4.9452 +[2025-09-09 15:57:21] [Rank 0] Group 8 Loss: 5.0448 +[2025-09-09 15:57:21] [Rank 0] Group 8 Loss: 5.0448 +[2025-09-09 15:57:21] [Rank 0] Group 9 Loss: 5.0044 +[2025-09-09 15:57:21] [Rank 0] Group 9 Loss: 5.0044 +[2025-09-09 15:57:21] [Rank 0] Group 10 Loss: 5.1722 +[2025-09-09 15:57:21] [Rank 0] Group 10 Loss: 5.1722 +[2025-09-09 15:57:21] [Rank 0] Group 11 Loss: 5.1420 +[2025-09-09 15:57:21] [Rank 0] Group 11 Loss: 5.1420 +[2025-09-09 15:57:21] [Rank 0] Group 12 Loss: 5.0880 +[2025-09-09 15:57:21] [Rank 0] Group 12 Loss: 5.0880 +[2025-09-09 15:57:21] [Rank 0] Group 13 Loss: 5.2042 +[2025-09-09 15:57:21] [Rank 0] Group 13 Loss: 5.2042 +[2025-09-09 15:57:21] [Rank 0] Group 14 Loss: 5.2128 +[2025-09-09 15:57:21] [Rank 0] Group 14 Loss: 5.2128 +[2025-09-09 15:57:21] [Rank 0] Group 15 Loss: 5.2552 +[2025-09-09 15:57:21] [Rank 0] Group 15 Loss: 5.2552 +[2025-09-09 15:57:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 15:57:21] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 15:57:21] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 15:57:21] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 15:57:21] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-09 15:57:21] [Rank 0] Group 13 FTA: 0.9600 +[2025-09-09 15:57:21] [Rank 0] Group 13 FTA: 0.9600 +[2025-09-09 15:57:21] [Rank 0] Group 14 FTA: 0.7200 +[2025-09-09 15:57:21] [Rank 0] Group 14 FTA: 0.7200 +[2025-09-09 15:57:21] [Rank 0] Group 15 FTA: 0.4400 +[2025-09-09 15:57:21] [Rank 0] Group 15 FTA: 0.4400 +[2025-09-09 15:57:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:57:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:57:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:57:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:57:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:57:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:57:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:57:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:57:23] [Rank 0] step:4501/10000 train_time:220276ms step_avg:48.94ms +[2025-09-09 15:57:23] [Rank 0] step:4501/10000 train_time:220276ms step_avg:48.94ms +[2025-09-09 15:57:24] [Rank 0] step:4521/10000 train_time:221112ms step_avg:48.91ms +[2025-09-09 15:57:24] [Rank 0] step:4521/10000 train_time:221112ms step_avg:48.91ms +[2025-09-09 15:57:24] [Rank 0] step:4541/10000 train_time:221935ms step_avg:48.87ms +[2025-09-09 15:57:24] [Rank 0] step:4541/10000 train_time:221935ms step_avg:48.87ms +[2025-09-09 15:57:25] [Rank 0] step:4561/10000 train_time:222758ms step_avg:48.84ms +[2025-09-09 15:57:25] [Rank 0] step:4561/10000 train_time:222758ms step_avg:48.84ms +[2025-09-09 15:57:26] [Rank 0] step:4581/10000 train_time:223579ms step_avg:48.81ms +[2025-09-09 15:57:26] [Rank 0] step:4581/10000 train_time:223579ms step_avg:48.81ms +[2025-09-09 15:57:27] [Rank 0] step:4601/10000 train_time:224401ms step_avg:48.77ms +[2025-09-09 15:57:27] [Rank 0] step:4601/10000 train_time:224401ms step_avg:48.77ms +[2025-09-09 15:57:28] [Rank 0] step:4621/10000 train_time:225222ms step_avg:48.74ms +[2025-09-09 15:57:28] [Rank 0] step:4621/10000 train_time:225222ms step_avg:48.74ms +[2025-09-09 15:57:29] [Rank 0] step:4641/10000 train_time:226044ms step_avg:48.71ms +[2025-09-09 15:57:29] [Rank 0] step:4641/10000 train_time:226044ms step_avg:48.71ms +[2025-09-09 15:57:29] [Rank 0] step:4661/10000 train_time:226865ms step_avg:48.67ms +[2025-09-09 15:57:29] [Rank 0] step:4661/10000 train_time:226865ms step_avg:48.67ms +[2025-09-09 15:57:30] [Rank 0] step:4681/10000 train_time:227687ms step_avg:48.64ms +[2025-09-09 15:57:30] [Rank 0] step:4681/10000 train_time:227687ms step_avg:48.64ms +[2025-09-09 15:57:31] [Rank 0] step:4701/10000 train_time:228874ms step_avg:48.69ms +[2025-09-09 15:57:31] [Rank 0] step:4701/10000 train_time:228874ms step_avg:48.69ms +[2025-09-09 15:57:32] [Rank 0] step:4721/10000 train_time:229830ms step_avg:48.68ms +[2025-09-09 15:57:32] [Rank 0] step:4721/10000 train_time:229830ms step_avg:48.68ms +[2025-09-09 15:57:33] [Rank 0] step:4741/10000 train_time:230652ms step_avg:48.65ms +[2025-09-09 15:57:33] [Rank 0] step:4741/10000 train_time:230652ms step_avg:48.65ms +[2025-09-09 15:57:34] [Rank 0] step:4761/10000 train_time:231474ms step_avg:48.62ms +[2025-09-09 15:57:34] [Rank 0] step:4761/10000 train_time:231474ms step_avg:48.62ms +[2025-09-09 15:57:35] [Rank 0] step:4781/10000 train_time:232297ms step_avg:48.59ms +[2025-09-09 15:57:35] [Rank 0] step:4781/10000 train_time:232297ms step_avg:48.59ms +[2025-09-09 15:57:36] [Rank 0] step:4801/10000 train_time:233117ms step_avg:48.56ms +[2025-09-09 15:57:36] [Rank 0] step:4801/10000 train_time:233117ms step_avg:48.56ms +[2025-09-09 15:57:36] [Rank 0] step:4821/10000 train_time:233938ms step_avg:48.52ms +[2025-09-09 15:57:36] [Rank 0] step:4821/10000 train_time:233938ms step_avg:48.52ms +[2025-09-09 15:57:38] [Rank 0] step:4841/10000 train_time:235070ms step_avg:48.56ms +[2025-09-09 15:57:38] [Rank 0] step:4841/10000 train_time:235070ms step_avg:48.56ms +[2025-09-09 15:57:38] [Rank 0] step:4861/10000 train_time:235893ms step_avg:48.53ms +[2025-09-09 15:57:38] [Rank 0] step:4861/10000 train_time:235893ms step_avg:48.53ms +[2025-09-09 15:57:39] [Rank 0] step:4881/10000 train_time:236716ms step_avg:48.50ms +[2025-09-09 15:57:39] [Rank 0] step:4881/10000 train_time:236716ms step_avg:48.50ms +[2025-09-09 15:57:40] [Rank 0] step:4901/10000 train_time:237537ms step_avg:48.47ms +[2025-09-09 15:57:40] [Rank 0] step:4901/10000 train_time:237537ms step_avg:48.47ms +[2025-09-09 15:57:41] [Rank 0] step:4921/10000 train_time:238358ms step_avg:48.44ms +[2025-09-09 15:57:41] [Rank 0] step:4921/10000 train_time:238358ms step_avg:48.44ms +[2025-09-09 15:57:42] [Rank 0] step:4941/10000 train_time:239180ms step_avg:48.41ms +[2025-09-09 15:57:42] [Rank 0] step:4941/10000 train_time:239180ms step_avg:48.41ms +[2025-09-09 15:57:42] [Rank 0] step:4961/10000 train_time:240002ms step_avg:48.38ms +[2025-09-09 15:57:42] [Rank 0] step:4961/10000 train_time:240002ms step_avg:48.38ms +[2025-09-09 15:57:43] [Rank 0] step:4981/10000 train_time:240825ms step_avg:48.35ms +[2025-09-09 15:57:43] [Rank 0] step:4981/10000 train_time:240825ms step_avg:48.35ms +[2025-09-09 15:57:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:57:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:57:45] [Rank 0] PRINT: step:5000/10000 train_loss:0.6443 val_loss:0.6293 train_time:241650ms step_avg:48.33ms +[2025-09-09 15:57:45] [Rank 0] PRINT: step:5000/10000 train_loss:0.6443 val_loss:0.6293 train_time:241650ms step_avg:48.33ms +[2025-09-09 15:57:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:57:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:57:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:57:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:59:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:59:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 15:59:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:59:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 15:59:07] [Rank 0] Total Loss: 4.9498 +[2025-09-09 15:59:07] [Rank 0] Total Loss: 4.9498 +[2025-09-09 15:59:07] [Rank 0] Total FTA (Unweighted): 0.9575 +[2025-09-09 15:59:07] [Rank 0] Total FTA (Unweighted): 0.9575 +[2025-09-09 15:59:07] [Rank 0] Total FTA (Weighted): 0.9575 +[2025-09-09 15:59:07] [Rank 0] Total FTA (Weighted): 0.9575 +[2025-09-09 15:59:07] [Rank 0] Group 0 Loss: 4.7721 +[2025-09-09 15:59:07] [Rank 0] Group 0 Loss: 4.7721 +[2025-09-09 15:59:07] [Rank 0] Group 1 Loss: 4.7171 +[2025-09-09 15:59:07] [Rank 0] Group 1 Loss: 4.7171 +[2025-09-09 15:59:07] [Rank 0] Group 2 Loss: 4.5105 +[2025-09-09 15:59:07] [Rank 0] Group 2 Loss: 4.5105 +[2025-09-09 15:59:07] [Rank 0] Group 3 Loss: 4.8321 +[2025-09-09 15:59:07] [Rank 0] Group 3 Loss: 4.8321 +[2025-09-09 15:59:07] [Rank 0] Group 4 Loss: 4.8783 +[2025-09-09 15:59:07] [Rank 0] Group 4 Loss: 4.8783 +[2025-09-09 15:59:07] [Rank 0] Group 5 Loss: 4.8345 +[2025-09-09 15:59:07] [Rank 0] Group 5 Loss: 4.8345 +[2025-09-09 15:59:07] [Rank 0] Group 6 Loss: 4.7775 +[2025-09-09 15:59:07] [Rank 0] Group 6 Loss: 4.7775 +[2025-09-09 15:59:07] [Rank 0] Group 7 Loss: 4.8713 +[2025-09-09 15:59:07] [Rank 0] Group 7 Loss: 4.8713 +[2025-09-09 15:59:07] [Rank 0] Group 8 Loss: 5.0129 +[2025-09-09 15:59:07] [Rank 0] Group 8 Loss: 5.0129 +[2025-09-09 15:59:07] [Rank 0] Group 9 Loss: 5.0242 +[2025-09-09 15:59:07] [Rank 0] Group 9 Loss: 5.0242 +[2025-09-09 15:59:07] [Rank 0] Group 10 Loss: 5.1331 +[2025-09-09 15:59:07] [Rank 0] Group 10 Loss: 5.1331 +[2025-09-09 15:59:07] [Rank 0] Group 11 Loss: 5.0988 +[2025-09-09 15:59:07] [Rank 0] Group 11 Loss: 5.0988 +[2025-09-09 15:59:07] [Rank 0] Group 12 Loss: 5.0800 +[2025-09-09 15:59:07] [Rank 0] Group 12 Loss: 5.0800 +[2025-09-09 15:59:07] [Rank 0] Group 13 Loss: 5.2322 +[2025-09-09 15:59:07] [Rank 0] Group 13 Loss: 5.2322 +[2025-09-09 15:59:07] [Rank 0] Group 14 Loss: 5.1802 +[2025-09-09 15:59:07] [Rank 0] Group 14 Loss: 5.1802 +[2025-09-09 15:59:07] [Rank 0] Group 15 Loss: 5.2411 +[2025-09-09 15:59:07] [Rank 0] Group 15 Loss: 5.2411 +[2025-09-09 15:59:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-09 15:59:07] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-09 15:59:07] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 15:59:07] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 15:59:07] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 15:59:07] [Rank 0] Group 14 FTA: 0.8300 +[2025-09-09 15:59:07] [Rank 0] Group 14 FTA: 0.8300 +[2025-09-09 15:59:07] [Rank 0] Group 15 FTA: 0.5200 +[2025-09-09 15:59:07] [Rank 0] Group 15 FTA: 0.5200 +[2025-09-09 15:59:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:59:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 15:59:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:59:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 15:59:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:59:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 15:59:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:59:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 15:59:08] [Rank 0] step:5001/10000 train_time:241666ms step_avg:48.32ms +[2025-09-09 15:59:08] [Rank 0] step:5001/10000 train_time:241666ms step_avg:48.32ms +[2025-09-09 15:59:09] [Rank 0] step:5021/10000 train_time:242500ms step_avg:48.30ms +[2025-09-09 15:59:09] [Rank 0] step:5021/10000 train_time:242500ms step_avg:48.30ms +[2025-09-09 15:59:10] [Rank 0] step:5041/10000 train_time:243322ms step_avg:48.27ms +[2025-09-09 15:59:10] [Rank 0] step:5041/10000 train_time:243322ms step_avg:48.27ms +[2025-09-09 15:59:11] [Rank 0] step:5061/10000 train_time:244146ms step_avg:48.24ms +[2025-09-09 15:59:11] [Rank 0] step:5061/10000 train_time:244146ms step_avg:48.24ms +[2025-09-09 15:59:11] [Rank 0] step:5081/10000 train_time:244968ms step_avg:48.21ms +[2025-09-09 15:59:11] [Rank 0] step:5081/10000 train_time:244968ms step_avg:48.21ms +[2025-09-09 15:59:12] [Rank 0] step:5101/10000 train_time:245790ms step_avg:48.18ms +[2025-09-09 15:59:12] [Rank 0] step:5101/10000 train_time:245790ms step_avg:48.18ms +[2025-09-09 15:59:13] [Rank 0] step:5121/10000 train_time:246613ms step_avg:48.16ms +[2025-09-09 15:59:13] [Rank 0] step:5121/10000 train_time:246613ms step_avg:48.16ms +[2025-09-09 15:59:14] [Rank 0] step:5141/10000 train_time:247437ms step_avg:48.13ms +[2025-09-09 15:59:14] [Rank 0] step:5141/10000 train_time:247437ms step_avg:48.13ms +[2025-09-09 15:59:15] [Rank 0] step:5161/10000 train_time:248256ms step_avg:48.10ms +[2025-09-09 15:59:15] [Rank 0] step:5161/10000 train_time:248256ms step_avg:48.10ms +[2025-09-09 15:59:16] [Rank 0] step:5181/10000 train_time:249078ms step_avg:48.08ms +[2025-09-09 15:59:16] [Rank 0] step:5181/10000 train_time:249078ms step_avg:48.08ms +[2025-09-09 15:59:16] [Rank 0] step:5201/10000 train_time:249900ms step_avg:48.05ms +[2025-09-09 15:59:16] [Rank 0] step:5201/10000 train_time:249900ms step_avg:48.05ms +[2025-09-09 15:59:17] [Rank 0] step:5221/10000 train_time:250721ms step_avg:48.02ms +[2025-09-09 15:59:17] [Rank 0] step:5221/10000 train_time:250721ms step_avg:48.02ms +[2025-09-09 15:59:18] [Rank 0] step:5241/10000 train_time:251543ms step_avg:48.00ms +[2025-09-09 15:59:18] [Rank 0] step:5241/10000 train_time:251543ms step_avg:48.00ms +[2025-09-09 15:59:19] [Rank 0] step:5261/10000 train_time:252365ms step_avg:47.97ms +[2025-09-09 15:59:19] [Rank 0] step:5261/10000 train_time:252365ms step_avg:47.97ms +[2025-09-09 15:59:20] [Rank 0] step:5281/10000 train_time:253186ms step_avg:47.94ms +[2025-09-09 15:59:20] [Rank 0] step:5281/10000 train_time:253186ms step_avg:47.94ms +[2025-09-09 15:59:20] [Rank 0] step:5301/10000 train_time:254008ms step_avg:47.92ms +[2025-09-09 15:59:20] [Rank 0] step:5301/10000 train_time:254008ms step_avg:47.92ms +[2025-09-09 15:59:21] [Rank 0] step:5321/10000 train_time:254831ms step_avg:47.89ms +[2025-09-09 15:59:21] [Rank 0] step:5321/10000 train_time:254831ms step_avg:47.89ms +[2025-09-09 15:59:22] [Rank 0] step:5341/10000 train_time:255652ms step_avg:47.87ms +[2025-09-09 15:59:22] [Rank 0] step:5341/10000 train_time:255652ms step_avg:47.87ms +[2025-09-09 15:59:23] [Rank 0] step:5361/10000 train_time:256472ms step_avg:47.84ms +[2025-09-09 15:59:23] [Rank 0] step:5361/10000 train_time:256472ms step_avg:47.84ms +[2025-09-09 15:59:24] [Rank 0] step:5381/10000 train_time:257295ms step_avg:47.82ms +[2025-09-09 15:59:24] [Rank 0] step:5381/10000 train_time:257295ms step_avg:47.82ms +[2025-09-09 15:59:25] [Rank 0] step:5401/10000 train_time:258116ms step_avg:47.79ms +[2025-09-09 15:59:25] [Rank 0] step:5401/10000 train_time:258116ms step_avg:47.79ms +[2025-09-09 15:59:25] [Rank 0] step:5421/10000 train_time:258936ms step_avg:47.77ms +[2025-09-09 15:59:25] [Rank 0] step:5421/10000 train_time:258936ms step_avg:47.77ms +[2025-09-09 15:59:26] [Rank 0] step:5441/10000 train_time:259757ms step_avg:47.74ms +[2025-09-09 15:59:26] [Rank 0] step:5441/10000 train_time:259757ms step_avg:47.74ms +[2025-09-09 15:59:27] [Rank 0] step:5461/10000 train_time:260579ms step_avg:47.72ms +[2025-09-09 15:59:27] [Rank 0] step:5461/10000 train_time:260579ms step_avg:47.72ms +[2025-09-09 15:59:28] [Rank 0] step:5481/10000 train_time:261400ms step_avg:47.69ms +[2025-09-09 15:59:28] [Rank 0] step:5481/10000 train_time:261400ms step_avg:47.69ms +[2025-09-09 15:59:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:59:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 15:59:29] [Rank 0] PRINT: step:5500/10000 train_loss:0.6370 val_loss:0.6235 train_time:262225ms step_avg:47.68ms +[2025-09-09 15:59:29] [Rank 0] PRINT: step:5500/10000 train_loss:0.6370 val_loss:0.6235 train_time:262225ms step_avg:47.68ms +[2025-09-09 15:59:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:59:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 15:59:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 15:59:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:00:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:00:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:00:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:00:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:00:51] [Rank 0] Total Loss: 5.0141 +[2025-09-09 16:00:51] [Rank 0] Total Loss: 5.0141 +[2025-09-09 16:00:51] [Rank 0] Total FTA (Unweighted): 0.9637 +[2025-09-09 16:00:51] [Rank 0] Total FTA (Unweighted): 0.9637 +[2025-09-09 16:00:51] [Rank 0] Total FTA (Weighted): 0.9637 +[2025-09-09 16:00:51] [Rank 0] Total FTA (Weighted): 0.9637 +[2025-09-09 16:00:51] [Rank 0] Group 0 Loss: 4.9412 +[2025-09-09 16:00:51] [Rank 0] Group 0 Loss: 4.9412 +[2025-09-09 16:00:51] [Rank 0] Group 1 Loss: 4.7372 +[2025-09-09 16:00:51] [Rank 0] Group 1 Loss: 4.7372 +[2025-09-09 16:00:51] [Rank 0] Group 2 Loss: 4.5501 +[2025-09-09 16:00:51] [Rank 0] Group 2 Loss: 4.5501 +[2025-09-09 16:00:51] [Rank 0] Group 3 Loss: 4.9137 +[2025-09-09 16:00:51] [Rank 0] Group 3 Loss: 4.9137 +[2025-09-09 16:00:51] [Rank 0] Group 4 Loss: 4.9131 +[2025-09-09 16:00:51] [Rank 0] Group 4 Loss: 4.9131 +[2025-09-09 16:00:51] [Rank 0] Group 5 Loss: 4.8873 +[2025-09-09 16:00:51] [Rank 0] Group 5 Loss: 4.8873 +[2025-09-09 16:00:51] [Rank 0] Group 6 Loss: 4.8539 +[2025-09-09 16:00:51] [Rank 0] Group 6 Loss: 4.8539 +[2025-09-09 16:00:51] [Rank 0] Group 7 Loss: 4.9685 +[2025-09-09 16:00:51] [Rank 0] Group 7 Loss: 4.9685 +[2025-09-09 16:00:51] [Rank 0] Group 8 Loss: 5.0589 +[2025-09-09 16:00:51] [Rank 0] Group 8 Loss: 5.0589 +[2025-09-09 16:00:51] [Rank 0] Group 9 Loss: 5.0807 +[2025-09-09 16:00:51] [Rank 0] Group 9 Loss: 5.0807 +[2025-09-09 16:00:51] [Rank 0] Group 10 Loss: 5.1821 +[2025-09-09 16:00:51] [Rank 0] Group 10 Loss: 5.1821 +[2025-09-09 16:00:51] [Rank 0] Group 11 Loss: 5.2038 +[2025-09-09 16:00:51] [Rank 0] Group 11 Loss: 5.2038 +[2025-09-09 16:00:51] [Rank 0] Group 12 Loss: 5.1239 +[2025-09-09 16:00:51] [Rank 0] Group 12 Loss: 5.1239 +[2025-09-09 16:00:51] [Rank 0] Group 13 Loss: 5.2598 +[2025-09-09 16:00:51] [Rank 0] Group 13 Loss: 5.2598 +[2025-09-09 16:00:51] [Rank 0] Group 14 Loss: 5.2505 +[2025-09-09 16:00:51] [Rank 0] Group 14 Loss: 5.2505 +[2025-09-09 16:00:51] [Rank 0] Group 15 Loss: 5.3005 +[2025-09-09 16:00:51] [Rank 0] Group 15 Loss: 5.3005 +[2025-09-09 16:00:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-09 16:00:51] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-09 16:00:51] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 16:00:51] [Rank 0] Group 13 FTA: 0.9600 +[2025-09-09 16:00:51] [Rank 0] Group 13 FTA: 0.9600 +[2025-09-09 16:00:51] [Rank 0] Group 14 FTA: 0.8700 +[2025-09-09 16:00:51] [Rank 0] Group 14 FTA: 0.8700 +[2025-09-09 16:00:51] [Rank 0] Group 15 FTA: 0.6000 +[2025-09-09 16:00:51] [Rank 0] Group 15 FTA: 0.6000 +[2025-09-09 16:00:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:00:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:00:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:00:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:00:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:00:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:00:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:00:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:00:52] [Rank 0] step:5501/10000 train_time:262241ms step_avg:47.67ms +[2025-09-09 16:00:52] [Rank 0] step:5501/10000 train_time:262241ms step_avg:47.67ms +[2025-09-09 16:00:53] [Rank 0] step:5521/10000 train_time:263083ms step_avg:47.65ms +[2025-09-09 16:00:53] [Rank 0] step:5521/10000 train_time:263083ms step_avg:47.65ms +[2025-09-09 16:00:54] [Rank 0] step:5541/10000 train_time:263905ms step_avg:47.63ms +[2025-09-09 16:00:54] [Rank 0] step:5541/10000 train_time:263905ms step_avg:47.63ms +[2025-09-09 16:00:55] [Rank 0] step:5561/10000 train_time:264727ms step_avg:47.60ms +[2025-09-09 16:00:55] [Rank 0] step:5561/10000 train_time:264727ms step_avg:47.60ms +[2025-09-09 16:00:55] [Rank 0] step:5581/10000 train_time:265549ms step_avg:47.58ms +[2025-09-09 16:00:55] [Rank 0] step:5581/10000 train_time:265549ms step_avg:47.58ms +[2025-09-09 16:00:56] [Rank 0] step:5601/10000 train_time:266373ms step_avg:47.56ms +[2025-09-09 16:00:56] [Rank 0] step:5601/10000 train_time:266373ms step_avg:47.56ms +[2025-09-09 16:00:57] [Rank 0] step:5621/10000 train_time:267193ms step_avg:47.53ms +[2025-09-09 16:00:57] [Rank 0] step:5621/10000 train_time:267193ms step_avg:47.53ms +[2025-09-09 16:00:58] [Rank 0] step:5641/10000 train_time:268283ms step_avg:47.56ms +[2025-09-09 16:00:58] [Rank 0] step:5641/10000 train_time:268283ms step_avg:47.56ms +[2025-09-09 16:00:59] [Rank 0] step:5661/10000 train_time:269105ms step_avg:47.54ms +[2025-09-09 16:00:59] [Rank 0] step:5661/10000 train_time:269105ms step_avg:47.54ms +[2025-09-09 16:01:00] [Rank 0] step:5681/10000 train_time:269929ms step_avg:47.51ms +[2025-09-09 16:01:00] [Rank 0] step:5681/10000 train_time:269929ms step_avg:47.51ms +[2025-09-09 16:01:01] [Rank 0] step:5701/10000 train_time:270750ms step_avg:47.49ms +[2025-09-09 16:01:01] [Rank 0] step:5701/10000 train_time:270750ms step_avg:47.49ms +[2025-09-09 16:01:02] [Rank 0] step:5721/10000 train_time:271574ms step_avg:47.47ms +[2025-09-09 16:01:02] [Rank 0] step:5721/10000 train_time:271574ms step_avg:47.47ms +[2025-09-09 16:01:02] [Rank 0] step:5741/10000 train_time:272400ms step_avg:47.45ms +[2025-09-09 16:01:02] [Rank 0] step:5741/10000 train_time:272400ms step_avg:47.45ms +[2025-09-09 16:01:03] [Rank 0] step:5761/10000 train_time:273224ms step_avg:47.43ms +[2025-09-09 16:01:03] [Rank 0] step:5761/10000 train_time:273224ms step_avg:47.43ms +[2025-09-09 16:01:04] [Rank 0] step:5781/10000 train_time:274046ms step_avg:47.40ms +[2025-09-09 16:01:04] [Rank 0] step:5781/10000 train_time:274046ms step_avg:47.40ms +[2025-09-09 16:01:05] [Rank 0] step:5801/10000 train_time:274868ms step_avg:47.38ms +[2025-09-09 16:01:05] [Rank 0] step:5801/10000 train_time:274868ms step_avg:47.38ms +[2025-09-09 16:01:06] [Rank 0] step:5821/10000 train_time:275690ms step_avg:47.36ms +[2025-09-09 16:01:06] [Rank 0] step:5821/10000 train_time:275690ms step_avg:47.36ms +[2025-09-09 16:01:06] [Rank 0] step:5841/10000 train_time:276512ms step_avg:47.34ms +[2025-09-09 16:01:06] [Rank 0] step:5841/10000 train_time:276512ms step_avg:47.34ms +[2025-09-09 16:01:07] [Rank 0] step:5861/10000 train_time:277333ms step_avg:47.32ms +[2025-09-09 16:01:07] [Rank 0] step:5861/10000 train_time:277333ms step_avg:47.32ms +[2025-09-09 16:01:08] [Rank 0] step:5881/10000 train_time:278155ms step_avg:47.30ms +[2025-09-09 16:01:08] [Rank 0] step:5881/10000 train_time:278155ms step_avg:47.30ms +[2025-09-09 16:01:09] [Rank 0] step:5901/10000 train_time:278978ms step_avg:47.28ms +[2025-09-09 16:01:09] [Rank 0] step:5901/10000 train_time:278978ms step_avg:47.28ms +[2025-09-09 16:01:10] [Rank 0] step:5921/10000 train_time:279798ms step_avg:47.26ms +[2025-09-09 16:01:10] [Rank 0] step:5921/10000 train_time:279798ms step_avg:47.26ms +[2025-09-09 16:01:11] [Rank 0] step:5941/10000 train_time:280620ms step_avg:47.23ms +[2025-09-09 16:01:11] [Rank 0] step:5941/10000 train_time:280620ms step_avg:47.23ms +[2025-09-09 16:01:11] [Rank 0] step:5961/10000 train_time:281441ms step_avg:47.21ms +[2025-09-09 16:01:11] [Rank 0] step:5961/10000 train_time:281441ms step_avg:47.21ms +[2025-09-09 16:01:12] [Rank 0] step:5981/10000 train_time:282263ms step_avg:47.19ms +[2025-09-09 16:01:12] [Rank 0] step:5981/10000 train_time:282263ms step_avg:47.19ms +[2025-09-09 16:01:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:01:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:01:13] [Rank 0] PRINT: step:6000/10000 train_loss:0.6308 val_loss:0.6187 train_time:283087ms step_avg:47.18ms +[2025-09-09 16:01:13] [Rank 0] PRINT: step:6000/10000 train_loss:0.6308 val_loss:0.6187 train_time:283087ms step_avg:47.18ms +[2025-09-09 16:01:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:01:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:01:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:01:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:02:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:02:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:02:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:02:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:02:35] [Rank 0] Total Loss: 5.0407 +[2025-09-09 16:02:35] [Rank 0] Total Loss: 5.0407 +[2025-09-09 16:02:35] [Rank 0] Total FTA (Unweighted): 0.9731 +[2025-09-09 16:02:35] [Rank 0] Total FTA (Unweighted): 0.9731 +[2025-09-09 16:02:35] [Rank 0] Total FTA (Weighted): 0.9731 +[2025-09-09 16:02:35] [Rank 0] Total FTA (Weighted): 0.9731 +[2025-09-09 16:02:35] [Rank 0] Group 0 Loss: 4.9546 +[2025-09-09 16:02:35] [Rank 0] Group 0 Loss: 4.9546 +[2025-09-09 16:02:35] [Rank 0] Group 1 Loss: 4.8292 +[2025-09-09 16:02:35] [Rank 0] Group 1 Loss: 4.8292 +[2025-09-09 16:02:35] [Rank 0] Group 2 Loss: 4.5472 +[2025-09-09 16:02:35] [Rank 0] Group 2 Loss: 4.5472 +[2025-09-09 16:02:35] [Rank 0] Group 3 Loss: 4.9321 +[2025-09-09 16:02:35] [Rank 0] Group 3 Loss: 4.9321 +[2025-09-09 16:02:35] [Rank 0] Group 4 Loss: 4.9441 +[2025-09-09 16:02:35] [Rank 0] Group 4 Loss: 4.9441 +[2025-09-09 16:02:35] [Rank 0] Group 5 Loss: 4.9254 +[2025-09-09 16:02:35] [Rank 0] Group 5 Loss: 4.9254 +[2025-09-09 16:02:35] [Rank 0] Group 6 Loss: 4.8687 +[2025-09-09 16:02:35] [Rank 0] Group 6 Loss: 4.8687 +[2025-09-09 16:02:35] [Rank 0] Group 7 Loss: 4.9796 +[2025-09-09 16:02:35] [Rank 0] Group 7 Loss: 4.9796 +[2025-09-09 16:02:35] [Rank 0] Group 8 Loss: 5.1064 +[2025-09-09 16:02:35] [Rank 0] Group 8 Loss: 5.1064 +[2025-09-09 16:02:35] [Rank 0] Group 9 Loss: 5.1049 +[2025-09-09 16:02:35] [Rank 0] Group 9 Loss: 5.1049 +[2025-09-09 16:02:35] [Rank 0] Group 10 Loss: 5.2269 +[2025-09-09 16:02:35] [Rank 0] Group 10 Loss: 5.2269 +[2025-09-09 16:02:35] [Rank 0] Group 11 Loss: 5.2063 +[2025-09-09 16:02:35] [Rank 0] Group 11 Loss: 5.2063 +[2025-09-09 16:02:35] [Rank 0] Group 12 Loss: 5.1477 +[2025-09-09 16:02:35] [Rank 0] Group 12 Loss: 5.1477 +[2025-09-09 16:02:35] [Rank 0] Group 13 Loss: 5.2970 +[2025-09-09 16:02:35] [Rank 0] Group 13 Loss: 5.2970 +[2025-09-09 16:02:35] [Rank 0] Group 14 Loss: 5.2464 +[2025-09-09 16:02:35] [Rank 0] Group 14 Loss: 5.2464 +[2025-09-09 16:02:35] [Rank 0] Group 15 Loss: 5.3340 +[2025-09-09 16:02:35] [Rank 0] Group 15 Loss: 5.3340 +[2025-09-09 16:02:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 16:02:35] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 16:02:35] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:02:35] [Rank 0] Group 14 FTA: 0.9400 +[2025-09-09 16:02:35] [Rank 0] Group 14 FTA: 0.9400 +[2025-09-09 16:02:35] [Rank 0] Group 15 FTA: 0.6400 +[2025-09-09 16:02:35] [Rank 0] Group 15 FTA: 0.6400 +[2025-09-09 16:02:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:02:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:02:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:02:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:02:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:02:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:02:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:02:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:02:37] [Rank 0] step:6001/10000 train_time:283103ms step_avg:47.18ms +[2025-09-09 16:02:37] [Rank 0] step:6001/10000 train_time:283103ms step_avg:47.18ms +[2025-09-09 16:02:38] [Rank 0] step:6021/10000 train_time:284640ms step_avg:47.27ms +[2025-09-09 16:02:38] [Rank 0] step:6021/10000 train_time:284640ms step_avg:47.27ms +[2025-09-09 16:02:39] [Rank 0] step:6041/10000 train_time:285463ms step_avg:47.25ms +[2025-09-09 16:02:39] [Rank 0] step:6041/10000 train_time:285463ms step_avg:47.25ms +[2025-09-09 16:02:40] [Rank 0] step:6061/10000 train_time:286284ms step_avg:47.23ms +[2025-09-09 16:02:40] [Rank 0] step:6061/10000 train_time:286284ms step_avg:47.23ms +[2025-09-09 16:02:41] [Rank 0] step:6081/10000 train_time:287107ms step_avg:47.21ms +[2025-09-09 16:02:41] [Rank 0] step:6081/10000 train_time:287107ms step_avg:47.21ms +[2025-09-09 16:02:41] [Rank 0] step:6101/10000 train_time:287929ms step_avg:47.19ms +[2025-09-09 16:02:41] [Rank 0] step:6101/10000 train_time:287929ms step_avg:47.19ms +[2025-09-09 16:02:42] [Rank 0] step:6121/10000 train_time:288752ms step_avg:47.17ms +[2025-09-09 16:02:42] [Rank 0] step:6121/10000 train_time:288752ms step_avg:47.17ms +[2025-09-09 16:02:43] [Rank 0] step:6141/10000 train_time:289574ms step_avg:47.15ms +[2025-09-09 16:02:43] [Rank 0] step:6141/10000 train_time:289574ms step_avg:47.15ms +[2025-09-09 16:02:44] [Rank 0] step:6161/10000 train_time:290396ms step_avg:47.13ms +[2025-09-09 16:02:44] [Rank 0] step:6161/10000 train_time:290396ms step_avg:47.13ms +[2025-09-09 16:02:45] [Rank 0] step:6181/10000 train_time:291218ms step_avg:47.12ms +[2025-09-09 16:02:45] [Rank 0] step:6181/10000 train_time:291218ms step_avg:47.12ms +[2025-09-09 16:02:45] [Rank 0] step:6201/10000 train_time:292041ms step_avg:47.10ms +[2025-09-09 16:02:45] [Rank 0] step:6201/10000 train_time:292041ms step_avg:47.10ms +[2025-09-09 16:02:46] [Rank 0] step:6221/10000 train_time:292864ms step_avg:47.08ms +[2025-09-09 16:02:46] [Rank 0] step:6221/10000 train_time:292864ms step_avg:47.08ms +[2025-09-09 16:02:47] [Rank 0] step:6241/10000 train_time:294067ms step_avg:47.12ms +[2025-09-09 16:02:47] [Rank 0] step:6241/10000 train_time:294067ms step_avg:47.12ms +[2025-09-09 16:02:48] [Rank 0] step:6261/10000 train_time:294978ms step_avg:47.11ms +[2025-09-09 16:02:48] [Rank 0] step:6261/10000 train_time:294978ms step_avg:47.11ms +[2025-09-09 16:02:49] [Rank 0] step:6281/10000 train_time:295802ms step_avg:47.09ms +[2025-09-09 16:02:49] [Rank 0] step:6281/10000 train_time:295802ms step_avg:47.09ms +[2025-09-09 16:02:50] [Rank 0] step:6301/10000 train_time:296624ms step_avg:47.08ms +[2025-09-09 16:02:50] [Rank 0] step:6301/10000 train_time:296624ms step_avg:47.08ms +[2025-09-09 16:02:51] [Rank 0] step:6321/10000 train_time:297446ms step_avg:47.06ms +[2025-09-09 16:02:51] [Rank 0] step:6321/10000 train_time:297446ms step_avg:47.06ms +[2025-09-09 16:02:52] [Rank 0] step:6341/10000 train_time:298270ms step_avg:47.04ms +[2025-09-09 16:02:52] [Rank 0] step:6341/10000 train_time:298270ms step_avg:47.04ms +[2025-09-09 16:02:53] [Rank 0] step:6361/10000 train_time:299094ms step_avg:47.02ms +[2025-09-09 16:02:53] [Rank 0] step:6361/10000 train_time:299094ms step_avg:47.02ms +[2025-09-09 16:02:53] [Rank 0] step:6381/10000 train_time:299915ms step_avg:47.00ms +[2025-09-09 16:02:53] [Rank 0] step:6381/10000 train_time:299915ms step_avg:47.00ms +[2025-09-09 16:02:54] [Rank 0] step:6401/10000 train_time:300736ms step_avg:46.98ms +[2025-09-09 16:02:54] [Rank 0] step:6401/10000 train_time:300736ms step_avg:46.98ms +[2025-09-09 16:02:55] [Rank 0] step:6421/10000 train_time:301558ms step_avg:46.96ms +[2025-09-09 16:02:55] [Rank 0] step:6421/10000 train_time:301558ms step_avg:46.96ms +[2025-09-09 16:02:56] [Rank 0] step:6441/10000 train_time:302379ms step_avg:46.95ms +[2025-09-09 16:02:56] [Rank 0] step:6441/10000 train_time:302379ms step_avg:46.95ms +[2025-09-09 16:02:57] [Rank 0] step:6461/10000 train_time:303200ms step_avg:46.93ms +[2025-09-09 16:02:57] [Rank 0] step:6461/10000 train_time:303200ms step_avg:46.93ms +[2025-09-09 16:02:57] [Rank 0] step:6481/10000 train_time:304022ms step_avg:46.91ms +[2025-09-09 16:02:57] [Rank 0] step:6481/10000 train_time:304022ms step_avg:46.91ms +[2025-09-09 16:02:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:02:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:02:59] [Rank 0] PRINT: step:6500/10000 train_loss:0.6253 val_loss:0.6150 train_time:304845ms step_avg:46.90ms +[2025-09-09 16:02:59] [Rank 0] PRINT: step:6500/10000 train_loss:0.6253 val_loss:0.6150 train_time:304845ms step_avg:46.90ms +[2025-09-09 16:02:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:02:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:02:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:02:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:04:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:04:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:04:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:04:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:04:21] [Rank 0] Total Loss: 4.9852 +[2025-09-09 16:04:21] [Rank 0] Total Loss: 4.9852 +[2025-09-09 16:04:21] [Rank 0] Total FTA (Unweighted): 0.9819 +[2025-09-09 16:04:21] [Rank 0] Total FTA (Unweighted): 0.9819 +[2025-09-09 16:04:21] [Rank 0] Total FTA (Weighted): 0.9819 +[2025-09-09 16:04:21] [Rank 0] Total FTA (Weighted): 0.9819 +[2025-09-09 16:04:21] [Rank 0] Group 0 Loss: 5.0173 +[2025-09-09 16:04:21] [Rank 0] Group 0 Loss: 5.0173 +[2025-09-09 16:04:21] [Rank 0] Group 1 Loss: 4.6553 +[2025-09-09 16:04:21] [Rank 0] Group 1 Loss: 4.6553 +[2025-09-09 16:04:21] [Rank 0] Group 2 Loss: 4.5450 +[2025-09-09 16:04:21] [Rank 0] Group 2 Loss: 4.5450 +[2025-09-09 16:04:21] [Rank 0] Group 3 Loss: 4.9234 +[2025-09-09 16:04:21] [Rank 0] Group 3 Loss: 4.9234 +[2025-09-09 16:04:21] [Rank 0] Group 4 Loss: 4.9085 +[2025-09-09 16:04:21] [Rank 0] Group 4 Loss: 4.9085 +[2025-09-09 16:04:21] [Rank 0] Group 5 Loss: 4.8291 +[2025-09-09 16:04:21] [Rank 0] Group 5 Loss: 4.8291 +[2025-09-09 16:04:21] [Rank 0] Group 6 Loss: 4.8281 +[2025-09-09 16:04:21] [Rank 0] Group 6 Loss: 4.8281 +[2025-09-09 16:04:21] [Rank 0] Group 7 Loss: 4.9345 +[2025-09-09 16:04:21] [Rank 0] Group 7 Loss: 4.9345 +[2025-09-09 16:04:21] [Rank 0] Group 8 Loss: 5.0627 +[2025-09-09 16:04:21] [Rank 0] Group 8 Loss: 5.0627 +[2025-09-09 16:04:21] [Rank 0] Group 9 Loss: 5.0249 +[2025-09-09 16:04:21] [Rank 0] Group 9 Loss: 5.0249 +[2025-09-09 16:04:21] [Rank 0] Group 10 Loss: 5.1451 +[2025-09-09 16:04:21] [Rank 0] Group 10 Loss: 5.1451 +[2025-09-09 16:04:21] [Rank 0] Group 11 Loss: 5.1262 +[2025-09-09 16:04:21] [Rank 0] Group 11 Loss: 5.1262 +[2025-09-09 16:04:21] [Rank 0] Group 12 Loss: 5.1220 +[2025-09-09 16:04:21] [Rank 0] Group 12 Loss: 5.1220 +[2025-09-09 16:04:21] [Rank 0] Group 13 Loss: 5.2126 +[2025-09-09 16:04:21] [Rank 0] Group 13 Loss: 5.2126 +[2025-09-09 16:04:21] [Rank 0] Group 14 Loss: 5.1867 +[2025-09-09 16:04:21] [Rank 0] Group 14 Loss: 5.1867 +[2025-09-09 16:04:21] [Rank 0] Group 15 Loss: 5.2415 +[2025-09-09 16:04:21] [Rank 0] Group 15 Loss: 5.2415 +[2025-09-09 16:04:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 16:04:21] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 16:04:21] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:04:21] [Rank 0] Group 14 FTA: 0.9400 +[2025-09-09 16:04:21] [Rank 0] Group 14 FTA: 0.9400 +[2025-09-09 16:04:21] [Rank 0] Group 15 FTA: 0.7800 +[2025-09-09 16:04:21] [Rank 0] Group 15 FTA: 0.7800 +[2025-09-09 16:04:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:04:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:04:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:04:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:04:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:04:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:04:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:04:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:04:22] [Rank 0] step:6501/10000 train_time:304862ms step_avg:46.89ms +[2025-09-09 16:04:22] [Rank 0] step:6501/10000 train_time:304862ms step_avg:46.89ms +[2025-09-09 16:04:23] [Rank 0] step:6521/10000 train_time:305696ms step_avg:46.88ms +[2025-09-09 16:04:23] [Rank 0] step:6521/10000 train_time:305696ms step_avg:46.88ms +[2025-09-09 16:04:24] [Rank 0] step:6541/10000 train_time:306517ms step_avg:46.86ms +[2025-09-09 16:04:24] [Rank 0] step:6541/10000 train_time:306517ms step_avg:46.86ms +[2025-09-09 16:04:25] [Rank 0] step:6561/10000 train_time:307337ms step_avg:46.84ms +[2025-09-09 16:04:25] [Rank 0] step:6561/10000 train_time:307337ms step_avg:46.84ms +[2025-09-09 16:04:26] [Rank 0] step:6581/10000 train_time:308159ms step_avg:46.83ms +[2025-09-09 16:04:26] [Rank 0] step:6581/10000 train_time:308159ms step_avg:46.83ms +[2025-09-09 16:04:26] [Rank 0] step:6601/10000 train_time:308980ms step_avg:46.81ms +[2025-09-09 16:04:26] [Rank 0] step:6601/10000 train_time:308980ms step_avg:46.81ms +[2025-09-09 16:04:27] [Rank 0] step:6621/10000 train_time:309801ms step_avg:46.79ms +[2025-09-09 16:04:27] [Rank 0] step:6621/10000 train_time:309801ms step_avg:46.79ms +[2025-09-09 16:04:28] [Rank 0] step:6641/10000 train_time:310624ms step_avg:46.77ms +[2025-09-09 16:04:28] [Rank 0] step:6641/10000 train_time:310624ms step_avg:46.77ms +[2025-09-09 16:04:29] [Rank 0] step:6661/10000 train_time:311445ms step_avg:46.76ms +[2025-09-09 16:04:29] [Rank 0] step:6661/10000 train_time:311445ms step_avg:46.76ms +[2025-09-09 16:04:30] [Rank 0] step:6681/10000 train_time:312266ms step_avg:46.74ms +[2025-09-09 16:04:30] [Rank 0] step:6681/10000 train_time:312266ms step_avg:46.74ms +[2025-09-09 16:04:30] [Rank 0] step:6701/10000 train_time:313088ms step_avg:46.72ms +[2025-09-09 16:04:30] [Rank 0] step:6701/10000 train_time:313088ms step_avg:46.72ms +[2025-09-09 16:04:31] [Rank 0] step:6721/10000 train_time:313909ms step_avg:46.71ms +[2025-09-09 16:04:31] [Rank 0] step:6721/10000 train_time:313909ms step_avg:46.71ms +[2025-09-09 16:04:32] [Rank 0] step:6741/10000 train_time:314730ms step_avg:46.69ms +[2025-09-09 16:04:32] [Rank 0] step:6741/10000 train_time:314730ms step_avg:46.69ms +[2025-09-09 16:04:33] [Rank 0] step:6761/10000 train_time:315554ms step_avg:46.67ms +[2025-09-09 16:04:33] [Rank 0] step:6761/10000 train_time:315554ms step_avg:46.67ms +[2025-09-09 16:04:34] [Rank 0] step:6781/10000 train_time:316374ms step_avg:46.66ms +[2025-09-09 16:04:34] [Rank 0] step:6781/10000 train_time:316374ms step_avg:46.66ms +[2025-09-09 16:04:35] [Rank 0] step:6801/10000 train_time:317195ms step_avg:46.64ms +[2025-09-09 16:04:35] [Rank 0] step:6801/10000 train_time:317195ms step_avg:46.64ms +[2025-09-09 16:04:35] [Rank 0] step:6821/10000 train_time:318016ms step_avg:46.62ms +[2025-09-09 16:04:35] [Rank 0] step:6821/10000 train_time:318016ms step_avg:46.62ms +[2025-09-09 16:04:37] [Rank 0] step:6841/10000 train_time:319529ms step_avg:46.71ms +[2025-09-09 16:04:37] [Rank 0] step:6841/10000 train_time:319529ms step_avg:46.71ms +[2025-09-09 16:04:38] [Rank 0] step:6861/10000 train_time:320351ms step_avg:46.69ms +[2025-09-09 16:04:38] [Rank 0] step:6861/10000 train_time:320351ms step_avg:46.69ms +[2025-09-09 16:04:39] [Rank 0] step:6881/10000 train_time:321174ms step_avg:46.68ms +[2025-09-09 16:04:39] [Rank 0] step:6881/10000 train_time:321174ms step_avg:46.68ms +[2025-09-09 16:04:39] [Rank 0] step:6901/10000 train_time:321996ms step_avg:46.66ms +[2025-09-09 16:04:39] [Rank 0] step:6901/10000 train_time:321996ms step_avg:46.66ms +[2025-09-09 16:04:40] [Rank 0] step:6921/10000 train_time:322818ms step_avg:46.64ms +[2025-09-09 16:04:40] [Rank 0] step:6921/10000 train_time:322818ms step_avg:46.64ms +[2025-09-09 16:04:41] [Rank 0] step:6941/10000 train_time:323643ms step_avg:46.63ms +[2025-09-09 16:04:41] [Rank 0] step:6941/10000 train_time:323643ms step_avg:46.63ms +[2025-09-09 16:04:42] [Rank 0] step:6961/10000 train_time:324464ms step_avg:46.61ms +[2025-09-09 16:04:42] [Rank 0] step:6961/10000 train_time:324464ms step_avg:46.61ms +[2025-09-09 16:04:43] [Rank 0] step:6981/10000 train_time:325286ms step_avg:46.60ms +[2025-09-09 16:04:43] [Rank 0] step:6981/10000 train_time:325286ms step_avg:46.60ms +[2025-09-09 16:04:43] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:04:43] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:04:44] [Rank 0] PRINT: step:7000/10000 train_loss:0.6203 val_loss:0.6119 train_time:326110ms step_avg:46.59ms +[2025-09-09 16:04:44] [Rank 0] PRINT: step:7000/10000 train_loss:0.6203 val_loss:0.6119 train_time:326110ms step_avg:46.59ms +[2025-09-09 16:04:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:04:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:04:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:04:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:06:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:06:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:06:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:06:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:06:05] [Rank 0] Total Loss: 4.9881 +[2025-09-09 16:06:05] [Rank 0] Total Loss: 4.9881 +[2025-09-09 16:06:05] [Rank 0] Total FTA (Unweighted): 0.9925 +[2025-09-09 16:06:05] [Rank 0] Total FTA (Unweighted): 0.9925 +[2025-09-09 16:06:05] [Rank 0] Total FTA (Weighted): 0.9925 +[2025-09-09 16:06:05] [Rank 0] Total FTA (Weighted): 0.9925 +[2025-09-09 16:06:05] [Rank 0] Group 0 Loss: 4.9783 +[2025-09-09 16:06:05] [Rank 0] Group 0 Loss: 4.9783 +[2025-09-09 16:06:05] [Rank 0] Group 1 Loss: 4.8220 +[2025-09-09 16:06:05] [Rank 0] Group 1 Loss: 4.8220 +[2025-09-09 16:06:05] [Rank 0] Group 2 Loss: 4.5085 +[2025-09-09 16:06:05] [Rank 0] Group 2 Loss: 4.5085 +[2025-09-09 16:06:05] [Rank 0] Group 3 Loss: 4.8708 +[2025-09-09 16:06:05] [Rank 0] Group 3 Loss: 4.8708 +[2025-09-09 16:06:05] [Rank 0] Group 4 Loss: 4.8837 +[2025-09-09 16:06:05] [Rank 0] Group 4 Loss: 4.8837 +[2025-09-09 16:06:05] [Rank 0] Group 5 Loss: 4.8414 +[2025-09-09 16:06:05] [Rank 0] Group 5 Loss: 4.8414 +[2025-09-09 16:06:05] [Rank 0] Group 6 Loss: 4.8344 +[2025-09-09 16:06:05] [Rank 0] Group 6 Loss: 4.8344 +[2025-09-09 16:06:05] [Rank 0] Group 7 Loss: 4.9133 +[2025-09-09 16:06:05] [Rank 0] Group 7 Loss: 4.9133 +[2025-09-09 16:06:05] [Rank 0] Group 8 Loss: 5.0611 +[2025-09-09 16:06:05] [Rank 0] Group 8 Loss: 5.0611 +[2025-09-09 16:06:05] [Rank 0] Group 9 Loss: 5.0250 +[2025-09-09 16:06:05] [Rank 0] Group 9 Loss: 5.0250 +[2025-09-09 16:06:05] [Rank 0] Group 10 Loss: 5.1472 +[2025-09-09 16:06:05] [Rank 0] Group 10 Loss: 5.1472 +[2025-09-09 16:06:05] [Rank 0] Group 11 Loss: 5.1354 +[2025-09-09 16:06:05] [Rank 0] Group 11 Loss: 5.1354 +[2025-09-09 16:06:05] [Rank 0] Group 12 Loss: 5.1116 +[2025-09-09 16:06:05] [Rank 0] Group 12 Loss: 5.1116 +[2025-09-09 16:06:05] [Rank 0] Group 13 Loss: 5.2421 +[2025-09-09 16:06:05] [Rank 0] Group 13 Loss: 5.2421 +[2025-09-09 16:06:05] [Rank 0] Group 14 Loss: 5.1984 +[2025-09-09 16:06:05] [Rank 0] Group 14 Loss: 5.1984 +[2025-09-09 16:06:05] [Rank 0] Group 15 Loss: 5.2366 +[2025-09-09 16:06:05] [Rank 0] Group 15 Loss: 5.2366 +[2025-09-09 16:06:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:06:05] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:06:06] [Rank 0] Group 14 FTA: 0.9700 +[2025-09-09 16:06:06] [Rank 0] Group 14 FTA: 0.9700 +[2025-09-09 16:06:06] [Rank 0] Group 15 FTA: 0.9100 +[2025-09-09 16:06:06] [Rank 0] Group 15 FTA: 0.9100 +[2025-09-09 16:06:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:06:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:06:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:06:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:06:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:06:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:06:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:06:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:06:07] [Rank 0] step:7001/10000 train_time:326126ms step_avg:46.58ms +[2025-09-09 16:06:07] [Rank 0] step:7001/10000 train_time:326126ms step_avg:46.58ms +[2025-09-09 16:06:08] [Rank 0] step:7021/10000 train_time:326945ms step_avg:46.57ms +[2025-09-09 16:06:08] [Rank 0] step:7021/10000 train_time:326945ms step_avg:46.57ms +[2025-09-09 16:06:09] [Rank 0] step:7041/10000 train_time:327767ms step_avg:46.55ms +[2025-09-09 16:06:09] [Rank 0] step:7041/10000 train_time:327767ms step_avg:46.55ms +[2025-09-09 16:06:09] [Rank 0] step:7061/10000 train_time:328586ms step_avg:46.54ms +[2025-09-09 16:06:09] [Rank 0] step:7061/10000 train_time:328586ms step_avg:46.54ms +[2025-09-09 16:06:10] [Rank 0] step:7081/10000 train_time:329419ms step_avg:46.52ms +[2025-09-09 16:06:10] [Rank 0] step:7081/10000 train_time:329419ms step_avg:46.52ms +[2025-09-09 16:06:11] [Rank 0] step:7101/10000 train_time:330241ms step_avg:46.51ms +[2025-09-09 16:06:11] [Rank 0] step:7101/10000 train_time:330241ms step_avg:46.51ms +[2025-09-09 16:06:12] [Rank 0] step:7121/10000 train_time:331062ms step_avg:46.49ms +[2025-09-09 16:06:12] [Rank 0] step:7121/10000 train_time:331062ms step_avg:46.49ms +[2025-09-09 16:06:13] [Rank 0] step:7141/10000 train_time:331883ms step_avg:46.48ms +[2025-09-09 16:06:13] [Rank 0] step:7141/10000 train_time:331883ms step_avg:46.48ms +[2025-09-09 16:06:14] [Rank 0] step:7161/10000 train_time:332703ms step_avg:46.46ms +[2025-09-09 16:06:14] [Rank 0] step:7161/10000 train_time:332703ms step_avg:46.46ms +[2025-09-09 16:06:14] [Rank 0] step:7181/10000 train_time:333526ms step_avg:46.45ms +[2025-09-09 16:06:14] [Rank 0] step:7181/10000 train_time:333526ms step_avg:46.45ms +[2025-09-09 16:06:15] [Rank 0] step:7201/10000 train_time:334349ms step_avg:46.43ms +[2025-09-09 16:06:15] [Rank 0] step:7201/10000 train_time:334349ms step_avg:46.43ms +[2025-09-09 16:06:16] [Rank 0] step:7221/10000 train_time:335170ms step_avg:46.42ms +[2025-09-09 16:06:16] [Rank 0] step:7221/10000 train_time:335170ms step_avg:46.42ms +[2025-09-09 16:06:17] [Rank 0] step:7241/10000 train_time:335991ms step_avg:46.40ms +[2025-09-09 16:06:17] [Rank 0] step:7241/10000 train_time:335991ms step_avg:46.40ms +[2025-09-09 16:06:18] [Rank 0] step:7261/10000 train_time:336813ms step_avg:46.39ms +[2025-09-09 16:06:18] [Rank 0] step:7261/10000 train_time:336813ms step_avg:46.39ms +[2025-09-09 16:06:18] [Rank 0] step:7281/10000 train_time:337637ms step_avg:46.37ms +[2025-09-09 16:06:18] [Rank 0] step:7281/10000 train_time:337637ms step_avg:46.37ms +[2025-09-09 16:06:19] [Rank 0] step:7301/10000 train_time:338457ms step_avg:46.36ms +[2025-09-09 16:06:19] [Rank 0] step:7301/10000 train_time:338457ms step_avg:46.36ms +[2025-09-09 16:06:20] [Rank 0] step:7321/10000 train_time:339278ms step_avg:46.34ms +[2025-09-09 16:06:20] [Rank 0] step:7321/10000 train_time:339278ms step_avg:46.34ms +[2025-09-09 16:06:21] [Rank 0] step:7341/10000 train_time:340099ms step_avg:46.33ms +[2025-09-09 16:06:21] [Rank 0] step:7341/10000 train_time:340099ms step_avg:46.33ms +[2025-09-09 16:06:22] [Rank 0] step:7361/10000 train_time:340921ms step_avg:46.31ms +[2025-09-09 16:06:22] [Rank 0] step:7361/10000 train_time:340921ms step_avg:46.31ms +[2025-09-09 16:06:23] [Rank 0] step:7381/10000 train_time:341742ms step_avg:46.30ms +[2025-09-09 16:06:23] [Rank 0] step:7381/10000 train_time:341742ms step_avg:46.30ms +[2025-09-09 16:06:23] [Rank 0] step:7401/10000 train_time:342563ms step_avg:46.29ms +[2025-09-09 16:06:23] [Rank 0] step:7401/10000 train_time:342563ms step_avg:46.29ms +[2025-09-09 16:06:24] [Rank 0] step:7421/10000 train_time:343385ms step_avg:46.27ms +[2025-09-09 16:06:24] [Rank 0] step:7421/10000 train_time:343385ms step_avg:46.27ms +[2025-09-09 16:06:25] [Rank 0] step:7441/10000 train_time:344207ms step_avg:46.26ms +[2025-09-09 16:06:25] [Rank 0] step:7441/10000 train_time:344207ms step_avg:46.26ms +[2025-09-09 16:06:26] [Rank 0] step:7461/10000 train_time:345028ms step_avg:46.24ms +[2025-09-09 16:06:26] [Rank 0] step:7461/10000 train_time:345028ms step_avg:46.24ms +[2025-09-09 16:06:27] [Rank 0] step:7481/10000 train_time:345850ms step_avg:46.23ms +[2025-09-09 16:06:27] [Rank 0] step:7481/10000 train_time:345850ms step_avg:46.23ms +[2025-09-09 16:06:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:06:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:06:28] [Rank 0] PRINT: step:7500/10000 train_loss:0.6161 val_loss:0.6100 train_time:346674ms step_avg:46.22ms +[2025-09-09 16:06:28] [Rank 0] PRINT: step:7500/10000 train_loss:0.6161 val_loss:0.6100 train_time:346674ms step_avg:46.22ms +[2025-09-09 16:06:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:06:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:06:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:06:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:07:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:07:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:07:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:07:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:07:49] [Rank 0] Total Loss: 5.0070 +[2025-09-09 16:07:49] [Rank 0] Total Loss: 5.0070 +[2025-09-09 16:07:49] [Rank 0] Total FTA (Unweighted): 0.9900 +[2025-09-09 16:07:49] [Rank 0] Total FTA (Unweighted): 0.9900 +[2025-09-09 16:07:49] [Rank 0] Total FTA (Weighted): 0.9900 +[2025-09-09 16:07:49] [Rank 0] Total FTA (Weighted): 0.9900 +[2025-09-09 16:07:49] [Rank 0] Group 0 Loss: 5.0414 +[2025-09-09 16:07:49] [Rank 0] Group 0 Loss: 5.0414 +[2025-09-09 16:07:49] [Rank 0] Group 1 Loss: 4.7122 +[2025-09-09 16:07:49] [Rank 0] Group 1 Loss: 4.7122 +[2025-09-09 16:07:49] [Rank 0] Group 2 Loss: 4.4635 +[2025-09-09 16:07:49] [Rank 0] Group 2 Loss: 4.4635 +[2025-09-09 16:07:49] [Rank 0] Group 3 Loss: 4.9172 +[2025-09-09 16:07:49] [Rank 0] Group 3 Loss: 4.9172 +[2025-09-09 16:07:49] [Rank 0] Group 4 Loss: 4.9114 +[2025-09-09 16:07:49] [Rank 0] Group 4 Loss: 4.9114 +[2025-09-09 16:07:49] [Rank 0] Group 5 Loss: 4.8947 +[2025-09-09 16:07:49] [Rank 0] Group 5 Loss: 4.8947 +[2025-09-09 16:07:49] [Rank 0] Group 6 Loss: 4.8419 +[2025-09-09 16:07:49] [Rank 0] Group 6 Loss: 4.8419 +[2025-09-09 16:07:49] [Rank 0] Group 7 Loss: 4.9580 +[2025-09-09 16:07:49] [Rank 0] Group 7 Loss: 4.9580 +[2025-09-09 16:07:49] [Rank 0] Group 8 Loss: 5.0808 +[2025-09-09 16:07:49] [Rank 0] Group 8 Loss: 5.0808 +[2025-09-09 16:07:49] [Rank 0] Group 9 Loss: 5.0526 +[2025-09-09 16:07:49] [Rank 0] Group 9 Loss: 5.0526 +[2025-09-09 16:07:49] [Rank 0] Group 10 Loss: 5.1647 +[2025-09-09 16:07:49] [Rank 0] Group 10 Loss: 5.1647 +[2025-09-09 16:07:49] [Rank 0] Group 11 Loss: 5.1576 +[2025-09-09 16:07:49] [Rank 0] Group 11 Loss: 5.1576 +[2025-09-09 16:07:49] [Rank 0] Group 12 Loss: 5.1477 +[2025-09-09 16:07:49] [Rank 0] Group 12 Loss: 5.1477 +[2025-09-09 16:07:49] [Rank 0] Group 13 Loss: 5.2399 +[2025-09-09 16:07:49] [Rank 0] Group 13 Loss: 5.2399 +[2025-09-09 16:07:49] [Rank 0] Group 14 Loss: 5.2374 +[2025-09-09 16:07:49] [Rank 0] Group 14 Loss: 5.2374 +[2025-09-09 16:07:49] [Rank 0] Group 15 Loss: 5.2907 +[2025-09-09 16:07:49] [Rank 0] Group 15 Loss: 5.2907 +[2025-09-09 16:07:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:07:49] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 16:07:49] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 16:07:49] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 16:07:49] [Rank 0] Group 13 FTA: 0.9900 +[2025-09-09 16:07:49] [Rank 0] Group 14 FTA: 0.9800 +[2025-09-09 16:07:49] [Rank 0] Group 14 FTA: 0.9800 +[2025-09-09 16:07:49] [Rank 0] Group 15 FTA: 0.8800 +[2025-09-09 16:07:49] [Rank 0] Group 15 FTA: 0.8800 +[2025-09-09 16:07:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:07:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:07:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:07:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:07:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:07:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:07:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:07:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:07:51] [Rank 0] step:7501/10000 train_time:346690ms step_avg:46.22ms +[2025-09-09 16:07:51] [Rank 0] step:7501/10000 train_time:346690ms step_avg:46.22ms +[2025-09-09 16:07:52] [Rank 0] step:7521/10000 train_time:347519ms step_avg:46.21ms +[2025-09-09 16:07:52] [Rank 0] step:7521/10000 train_time:347519ms step_avg:46.21ms +[2025-09-09 16:07:52] [Rank 0] step:7541/10000 train_time:348341ms step_avg:46.19ms +[2025-09-09 16:07:52] [Rank 0] step:7541/10000 train_time:348341ms step_avg:46.19ms +[2025-09-09 16:07:53] [Rank 0] step:7561/10000 train_time:349163ms step_avg:46.18ms +[2025-09-09 16:07:53] [Rank 0] step:7561/10000 train_time:349163ms step_avg:46.18ms +[2025-09-09 16:07:54] [Rank 0] step:7581/10000 train_time:349985ms step_avg:46.17ms +[2025-09-09 16:07:54] [Rank 0] step:7581/10000 train_time:349985ms step_avg:46.17ms +[2025-09-09 16:07:55] [Rank 0] step:7601/10000 train_time:350808ms step_avg:46.15ms +[2025-09-09 16:07:55] [Rank 0] step:7601/10000 train_time:350808ms step_avg:46.15ms +[2025-09-09 16:07:56] [Rank 0] step:7621/10000 train_time:351633ms step_avg:46.14ms +[2025-09-09 16:07:56] [Rank 0] step:7621/10000 train_time:351633ms step_avg:46.14ms +[2025-09-09 16:07:57] [Rank 0] step:7641/10000 train_time:353141ms step_avg:46.22ms +[2025-09-09 16:07:57] [Rank 0] step:7641/10000 train_time:353141ms step_avg:46.22ms +[2025-09-09 16:07:58] [Rank 0] step:7661/10000 train_time:353964ms step_avg:46.20ms +[2025-09-09 16:07:58] [Rank 0] step:7661/10000 train_time:353964ms step_avg:46.20ms +[2025-09-09 16:07:59] [Rank 0] step:7681/10000 train_time:354786ms step_avg:46.19ms +[2025-09-09 16:07:59] [Rank 0] step:7681/10000 train_time:354786ms step_avg:46.19ms +[2025-09-09 16:08:00] [Rank 0] step:7701/10000 train_time:355609ms step_avg:46.18ms +[2025-09-09 16:08:00] [Rank 0] step:7701/10000 train_time:355609ms step_avg:46.18ms +[2025-09-09 16:08:01] [Rank 0] step:7721/10000 train_time:356432ms step_avg:46.16ms +[2025-09-09 16:08:01] [Rank 0] step:7721/10000 train_time:356432ms step_avg:46.16ms +[2025-09-09 16:08:01] [Rank 0] step:7741/10000 train_time:357252ms step_avg:46.15ms +[2025-09-09 16:08:01] [Rank 0] step:7741/10000 train_time:357252ms step_avg:46.15ms +[2025-09-09 16:08:02] [Rank 0] step:7761/10000 train_time:358078ms step_avg:46.14ms +[2025-09-09 16:08:02] [Rank 0] step:7761/10000 train_time:358078ms step_avg:46.14ms +[2025-09-09 16:08:04] [Rank 0] step:7781/10000 train_time:359307ms step_avg:46.18ms +[2025-09-09 16:08:04] [Rank 0] step:7781/10000 train_time:359307ms step_avg:46.18ms +[2025-09-09 16:08:04] [Rank 0] step:7801/10000 train_time:360222ms step_avg:46.18ms +[2025-09-09 16:08:04] [Rank 0] step:7801/10000 train_time:360222ms step_avg:46.18ms +[2025-09-09 16:08:05] [Rank 0] step:7821/10000 train_time:361048ms step_avg:46.16ms +[2025-09-09 16:08:05] [Rank 0] step:7821/10000 train_time:361048ms step_avg:46.16ms +[2025-09-09 16:08:06] [Rank 0] step:7841/10000 train_time:361868ms step_avg:46.15ms +[2025-09-09 16:08:06] [Rank 0] step:7841/10000 train_time:361868ms step_avg:46.15ms +[2025-09-09 16:08:07] [Rank 0] step:7861/10000 train_time:362689ms step_avg:46.14ms +[2025-09-09 16:08:07] [Rank 0] step:7861/10000 train_time:362689ms step_avg:46.14ms +[2025-09-09 16:08:08] [Rank 0] step:7881/10000 train_time:363510ms step_avg:46.12ms +[2025-09-09 16:08:08] [Rank 0] step:7881/10000 train_time:363510ms step_avg:46.12ms +[2025-09-09 16:08:08] [Rank 0] step:7901/10000 train_time:364331ms step_avg:46.11ms +[2025-09-09 16:08:08] [Rank 0] step:7901/10000 train_time:364331ms step_avg:46.11ms +[2025-09-09 16:08:09] [Rank 0] step:7921/10000 train_time:365152ms step_avg:46.10ms +[2025-09-09 16:08:09] [Rank 0] step:7921/10000 train_time:365152ms step_avg:46.10ms +[2025-09-09 16:08:10] [Rank 0] step:7941/10000 train_time:365973ms step_avg:46.09ms +[2025-09-09 16:08:10] [Rank 0] step:7941/10000 train_time:365973ms step_avg:46.09ms +[2025-09-09 16:08:11] [Rank 0] step:7961/10000 train_time:366795ms step_avg:46.07ms +[2025-09-09 16:08:11] [Rank 0] step:7961/10000 train_time:366795ms step_avg:46.07ms +[2025-09-09 16:08:12] [Rank 0] step:7981/10000 train_time:367616ms step_avg:46.06ms +[2025-09-09 16:08:12] [Rank 0] step:7981/10000 train_time:367616ms step_avg:46.06ms +[2025-09-09 16:08:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:08:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:08:13] [Rank 0] PRINT: step:8000/10000 train_loss:0.6132 val_loss:0.6082 train_time:368440ms step_avg:46.05ms +[2025-09-09 16:08:13] [Rank 0] PRINT: step:8000/10000 train_loss:0.6132 val_loss:0.6082 train_time:368440ms step_avg:46.05ms +[2025-09-09 16:08:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:08:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:08:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:08:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:09:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:09:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:09:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:09:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:09:34] [Rank 0] Total Loss: 5.0543 +[2025-09-09 16:09:34] [Rank 0] Total Loss: 5.0543 +[2025-09-09 16:09:34] [Rank 0] Total FTA (Unweighted): 0.9931 +[2025-09-09 16:09:34] [Rank 0] Total FTA (Unweighted): 0.9931 +[2025-09-09 16:09:34] [Rank 0] Total FTA (Weighted): 0.9931 +[2025-09-09 16:09:34] [Rank 0] Total FTA (Weighted): 0.9931 +[2025-09-09 16:09:34] [Rank 0] Group 0 Loss: 5.0342 +[2025-09-09 16:09:34] [Rank 0] Group 0 Loss: 5.0342 +[2025-09-09 16:09:34] [Rank 0] Group 1 Loss: 4.8080 +[2025-09-09 16:09:34] [Rank 0] Group 1 Loss: 4.8080 +[2025-09-09 16:09:34] [Rank 0] Group 2 Loss: 4.4905 +[2025-09-09 16:09:34] [Rank 0] Group 2 Loss: 4.4905 +[2025-09-09 16:09:34] [Rank 0] Group 3 Loss: 4.9462 +[2025-09-09 16:09:34] [Rank 0] Group 3 Loss: 4.9462 +[2025-09-09 16:09:34] [Rank 0] Group 4 Loss: 4.9506 +[2025-09-09 16:09:34] [Rank 0] Group 4 Loss: 4.9506 +[2025-09-09 16:09:34] [Rank 0] Group 5 Loss: 4.9747 +[2025-09-09 16:09:34] [Rank 0] Group 5 Loss: 4.9747 +[2025-09-09 16:09:34] [Rank 0] Group 6 Loss: 4.8887 +[2025-09-09 16:09:34] [Rank 0] Group 6 Loss: 4.8887 +[2025-09-09 16:09:34] [Rank 0] Group 7 Loss: 4.9978 +[2025-09-09 16:09:34] [Rank 0] Group 7 Loss: 4.9978 +[2025-09-09 16:09:34] [Rank 0] Group 8 Loss: 5.1494 +[2025-09-09 16:09:34] [Rank 0] Group 8 Loss: 5.1494 +[2025-09-09 16:09:34] [Rank 0] Group 9 Loss: 5.0880 +[2025-09-09 16:09:34] [Rank 0] Group 9 Loss: 5.0880 +[2025-09-09 16:09:34] [Rank 0] Group 10 Loss: 5.2538 +[2025-09-09 16:09:34] [Rank 0] Group 10 Loss: 5.2538 +[2025-09-09 16:09:34] [Rank 0] Group 11 Loss: 5.2057 +[2025-09-09 16:09:34] [Rank 0] Group 11 Loss: 5.2057 +[2025-09-09 16:09:34] [Rank 0] Group 12 Loss: 5.1741 +[2025-09-09 16:09:34] [Rank 0] Group 12 Loss: 5.1741 +[2025-09-09 16:09:34] [Rank 0] Group 13 Loss: 5.3082 +[2025-09-09 16:09:34] [Rank 0] Group 13 Loss: 5.3082 +[2025-09-09 16:09:34] [Rank 0] Group 14 Loss: 5.2535 +[2025-09-09 16:09:34] [Rank 0] Group 14 Loss: 5.2535 +[2025-09-09 16:09:34] [Rank 0] Group 15 Loss: 5.3454 +[2025-09-09 16:09:34] [Rank 0] Group 15 Loss: 5.3454 +[2025-09-09 16:09:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 16:09:34] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 16:09:34] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:09:34] [Rank 0] Group 14 FTA: 0.9900 +[2025-09-09 16:09:34] [Rank 0] Group 14 FTA: 0.9900 +[2025-09-09 16:09:34] [Rank 0] Group 15 FTA: 0.9100 +[2025-09-09 16:09:34] [Rank 0] Group 15 FTA: 0.9100 +[2025-09-09 16:09:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:09:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:09:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:09:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:09:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:09:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:09:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:09:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:09:36] [Rank 0] step:8001/10000 train_time:368456ms step_avg:46.05ms +[2025-09-09 16:09:36] [Rank 0] step:8001/10000 train_time:368456ms step_avg:46.05ms +[2025-09-09 16:09:37] [Rank 0] step:8021/10000 train_time:369556ms step_avg:46.07ms +[2025-09-09 16:09:37] [Rank 0] step:8021/10000 train_time:369556ms step_avg:46.07ms +[2025-09-09 16:09:38] [Rank 0] step:8041/10000 train_time:370383ms step_avg:46.06ms +[2025-09-09 16:09:38] [Rank 0] step:8041/10000 train_time:370383ms step_avg:46.06ms +[2025-09-09 16:09:39] [Rank 0] step:8061/10000 train_time:371204ms step_avg:46.05ms +[2025-09-09 16:09:39] [Rank 0] step:8061/10000 train_time:371204ms step_avg:46.05ms +[2025-09-09 16:09:39] [Rank 0] step:8081/10000 train_time:372025ms step_avg:46.04ms +[2025-09-09 16:09:39] [Rank 0] step:8081/10000 train_time:372025ms step_avg:46.04ms +[2025-09-09 16:09:40] [Rank 0] step:8101/10000 train_time:372849ms step_avg:46.03ms +[2025-09-09 16:09:40] [Rank 0] step:8101/10000 train_time:372849ms step_avg:46.03ms +[2025-09-09 16:09:41] [Rank 0] step:8121/10000 train_time:373669ms step_avg:46.01ms +[2025-09-09 16:09:41] [Rank 0] step:8121/10000 train_time:373669ms step_avg:46.01ms +[2025-09-09 16:09:42] [Rank 0] step:8141/10000 train_time:374490ms step_avg:46.00ms +[2025-09-09 16:09:42] [Rank 0] step:8141/10000 train_time:374490ms step_avg:46.00ms +[2025-09-09 16:09:43] [Rank 0] step:8161/10000 train_time:375312ms step_avg:45.99ms +[2025-09-09 16:09:43] [Rank 0] step:8161/10000 train_time:375312ms step_avg:45.99ms +[2025-09-09 16:09:43] [Rank 0] step:8181/10000 train_time:376134ms step_avg:45.98ms +[2025-09-09 16:09:43] [Rank 0] step:8181/10000 train_time:376134ms step_avg:45.98ms +[2025-09-09 16:09:44] [Rank 0] step:8201/10000 train_time:376955ms step_avg:45.96ms +[2025-09-09 16:09:44] [Rank 0] step:8201/10000 train_time:376955ms step_avg:45.96ms +[2025-09-09 16:09:45] [Rank 0] step:8221/10000 train_time:377776ms step_avg:45.95ms +[2025-09-09 16:09:45] [Rank 0] step:8221/10000 train_time:377776ms step_avg:45.95ms +[2025-09-09 16:09:46] [Rank 0] step:8241/10000 train_time:378597ms step_avg:45.94ms +[2025-09-09 16:09:46] [Rank 0] step:8241/10000 train_time:378597ms step_avg:45.94ms +[2025-09-09 16:09:47] [Rank 0] step:8261/10000 train_time:379418ms step_avg:45.93ms +[2025-09-09 16:09:47] [Rank 0] step:8261/10000 train_time:379418ms step_avg:45.93ms +[2025-09-09 16:09:48] [Rank 0] step:8281/10000 train_time:380240ms step_avg:45.92ms +[2025-09-09 16:09:48] [Rank 0] step:8281/10000 train_time:380240ms step_avg:45.92ms +[2025-09-09 16:09:48] [Rank 0] step:8301/10000 train_time:381062ms step_avg:45.91ms +[2025-09-09 16:09:48] [Rank 0] step:8301/10000 train_time:381062ms step_avg:45.91ms +[2025-09-09 16:09:49] [Rank 0] step:8321/10000 train_time:381884ms step_avg:45.89ms +[2025-09-09 16:09:49] [Rank 0] step:8321/10000 train_time:381884ms step_avg:45.89ms +[2025-09-09 16:09:50] [Rank 0] step:8341/10000 train_time:382705ms step_avg:45.88ms +[2025-09-09 16:09:50] [Rank 0] step:8341/10000 train_time:382705ms step_avg:45.88ms +[2025-09-09 16:09:51] [Rank 0] step:8361/10000 train_time:383527ms step_avg:45.87ms +[2025-09-09 16:09:51] [Rank 0] step:8361/10000 train_time:383527ms step_avg:45.87ms +[2025-09-09 16:09:52] [Rank 0] step:8381/10000 train_time:384350ms step_avg:45.86ms +[2025-09-09 16:09:52] [Rank 0] step:8381/10000 train_time:384350ms step_avg:45.86ms +[2025-09-09 16:09:52] [Rank 0] step:8401/10000 train_time:385170ms step_avg:45.85ms +[2025-09-09 16:09:52] [Rank 0] step:8401/10000 train_time:385170ms step_avg:45.85ms +[2025-09-09 16:09:53] [Rank 0] step:8421/10000 train_time:385992ms step_avg:45.84ms +[2025-09-09 16:09:53] [Rank 0] step:8421/10000 train_time:385992ms step_avg:45.84ms +[2025-09-09 16:09:54] [Rank 0] step:8441/10000 train_time:386813ms step_avg:45.83ms +[2025-09-09 16:09:54] [Rank 0] step:8441/10000 train_time:386813ms step_avg:45.83ms +[2025-09-09 16:09:55] [Rank 0] step:8461/10000 train_time:387634ms step_avg:45.81ms +[2025-09-09 16:09:55] [Rank 0] step:8461/10000 train_time:387634ms step_avg:45.81ms +[2025-09-09 16:09:56] [Rank 0] step:8481/10000 train_time:388455ms step_avg:45.80ms +[2025-09-09 16:09:56] [Rank 0] step:8481/10000 train_time:388455ms step_avg:45.80ms +[2025-09-09 16:09:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:09:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:09:57] [Rank 0] PRINT: step:8500/10000 train_loss:0.6110 val_loss:0.6068 train_time:389280ms step_avg:45.80ms +[2025-09-09 16:09:57] [Rank 0] PRINT: step:8500/10000 train_loss:0.6110 val_loss:0.6068 train_time:389280ms step_avg:45.80ms +[2025-09-09 16:09:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:09:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:09:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:09:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:11:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:11:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:11:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:11:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:11:18] [Rank 0] Total Loss: 5.0186 +[2025-09-09 16:11:18] [Rank 0] Total Loss: 5.0186 +[2025-09-09 16:11:18] [Rank 0] Total FTA (Unweighted): 0.9956 +[2025-09-09 16:11:18] [Rank 0] Total FTA (Unweighted): 0.9956 +[2025-09-09 16:11:18] [Rank 0] Total FTA (Weighted): 0.9956 +[2025-09-09 16:11:18] [Rank 0] Total FTA (Weighted): 0.9956 +[2025-09-09 16:11:18] [Rank 0] Group 0 Loss: 5.0212 +[2025-09-09 16:11:18] [Rank 0] Group 0 Loss: 5.0212 +[2025-09-09 16:11:18] [Rank 0] Group 1 Loss: 4.7218 +[2025-09-09 16:11:18] [Rank 0] Group 1 Loss: 4.7218 +[2025-09-09 16:11:18] [Rank 0] Group 2 Loss: 4.4930 +[2025-09-09 16:11:18] [Rank 0] Group 2 Loss: 4.4930 +[2025-09-09 16:11:18] [Rank 0] Group 3 Loss: 4.9371 +[2025-09-09 16:11:18] [Rank 0] Group 3 Loss: 4.9371 +[2025-09-09 16:11:18] [Rank 0] Group 4 Loss: 4.9140 +[2025-09-09 16:11:18] [Rank 0] Group 4 Loss: 4.9140 +[2025-09-09 16:11:18] [Rank 0] Group 5 Loss: 4.9235 +[2025-09-09 16:11:18] [Rank 0] Group 5 Loss: 4.9235 +[2025-09-09 16:11:18] [Rank 0] Group 6 Loss: 4.8668 +[2025-09-09 16:11:18] [Rank 0] Group 6 Loss: 4.8668 +[2025-09-09 16:11:18] [Rank 0] Group 7 Loss: 4.9542 +[2025-09-09 16:11:18] [Rank 0] Group 7 Loss: 4.9542 +[2025-09-09 16:11:18] [Rank 0] Group 8 Loss: 5.1290 +[2025-09-09 16:11:18] [Rank 0] Group 8 Loss: 5.1290 +[2025-09-09 16:11:18] [Rank 0] Group 9 Loss: 5.0619 +[2025-09-09 16:11:18] [Rank 0] Group 9 Loss: 5.0619 +[2025-09-09 16:11:18] [Rank 0] Group 10 Loss: 5.1957 +[2025-09-09 16:11:18] [Rank 0] Group 10 Loss: 5.1957 +[2025-09-09 16:11:18] [Rank 0] Group 11 Loss: 5.1421 +[2025-09-09 16:11:18] [Rank 0] Group 11 Loss: 5.1421 +[2025-09-09 16:11:18] [Rank 0] Group 12 Loss: 5.1470 +[2025-09-09 16:11:18] [Rank 0] Group 12 Loss: 5.1470 +[2025-09-09 16:11:18] [Rank 0] Group 13 Loss: 5.2690 +[2025-09-09 16:11:18] [Rank 0] Group 13 Loss: 5.2690 +[2025-09-09 16:11:18] [Rank 0] Group 14 Loss: 5.2140 +[2025-09-09 16:11:18] [Rank 0] Group 14 Loss: 5.2140 +[2025-09-09 16:11:18] [Rank 0] Group 15 Loss: 5.3071 +[2025-09-09 16:11:18] [Rank 0] Group 15 Loss: 5.3071 +[2025-09-09 16:11:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 16:11:18] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-09 16:11:18] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 16:11:18] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 16:11:18] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 16:11:18] [Rank 0] Group 15 FTA: 0.9500 +[2025-09-09 16:11:18] [Rank 0] Group 15 FTA: 0.9500 +[2025-09-09 16:11:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:11:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:11:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:11:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:11:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:11:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:11:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:11:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:11:20] [Rank 0] step:8501/10000 train_time:389295ms step_avg:45.79ms +[2025-09-09 16:11:20] [Rank 0] step:8501/10000 train_time:389295ms step_avg:45.79ms +[2025-09-09 16:11:21] [Rank 0] step:8521/10000 train_time:390118ms step_avg:45.78ms +[2025-09-09 16:11:21] [Rank 0] step:8521/10000 train_time:390118ms step_avg:45.78ms +[2025-09-09 16:11:22] [Rank 0] step:8541/10000 train_time:390941ms step_avg:45.77ms +[2025-09-09 16:11:22] [Rank 0] step:8541/10000 train_time:390941ms step_avg:45.77ms +[2025-09-09 16:11:22] [Rank 0] step:8561/10000 train_time:391763ms step_avg:45.76ms +[2025-09-09 16:11:22] [Rank 0] step:8561/10000 train_time:391763ms step_avg:45.76ms +[2025-09-09 16:11:23] [Rank 0] step:8581/10000 train_time:392585ms step_avg:45.75ms +[2025-09-09 16:11:23] [Rank 0] step:8581/10000 train_time:392585ms step_avg:45.75ms +[2025-09-09 16:11:24] [Rank 0] step:8601/10000 train_time:393409ms step_avg:45.74ms +[2025-09-09 16:11:24] [Rank 0] step:8601/10000 train_time:393409ms step_avg:45.74ms +[2025-09-09 16:11:25] [Rank 0] step:8621/10000 train_time:394228ms step_avg:45.73ms +[2025-09-09 16:11:25] [Rank 0] step:8621/10000 train_time:394228ms step_avg:45.73ms +[2025-09-09 16:11:26] [Rank 0] step:8641/10000 train_time:395050ms step_avg:45.72ms +[2025-09-09 16:11:26] [Rank 0] step:8641/10000 train_time:395050ms step_avg:45.72ms +[2025-09-09 16:11:27] [Rank 0] step:8661/10000 train_time:395872ms step_avg:45.71ms +[2025-09-09 16:11:27] [Rank 0] step:8661/10000 train_time:395872ms step_avg:45.71ms +[2025-09-09 16:11:27] [Rank 0] step:8681/10000 train_time:396694ms step_avg:45.70ms +[2025-09-09 16:11:27] [Rank 0] step:8681/10000 train_time:396694ms step_avg:45.70ms +[2025-09-09 16:11:28] [Rank 0] step:8701/10000 train_time:397516ms step_avg:45.69ms +[2025-09-09 16:11:28] [Rank 0] step:8701/10000 train_time:397516ms step_avg:45.69ms +[2025-09-09 16:11:29] [Rank 0] step:8721/10000 train_time:398338ms step_avg:45.68ms +[2025-09-09 16:11:29] [Rank 0] step:8721/10000 train_time:398338ms step_avg:45.68ms +[2025-09-09 16:11:30] [Rank 0] step:8741/10000 train_time:399159ms step_avg:45.67ms +[2025-09-09 16:11:30] [Rank 0] step:8741/10000 train_time:399159ms step_avg:45.67ms +[2025-09-09 16:11:31] [Rank 0] step:8761/10000 train_time:399982ms step_avg:45.65ms +[2025-09-09 16:11:31] [Rank 0] step:8761/10000 train_time:399982ms step_avg:45.65ms +[2025-09-09 16:11:31] [Rank 0] step:8781/10000 train_time:400804ms step_avg:45.64ms +[2025-09-09 16:11:31] [Rank 0] step:8781/10000 train_time:400804ms step_avg:45.64ms +[2025-09-09 16:11:32] [Rank 0] step:8801/10000 train_time:401626ms step_avg:45.63ms +[2025-09-09 16:11:32] [Rank 0] step:8801/10000 train_time:401626ms step_avg:45.63ms +[2025-09-09 16:11:33] [Rank 0] step:8821/10000 train_time:402448ms step_avg:45.62ms +[2025-09-09 16:11:33] [Rank 0] step:8821/10000 train_time:402448ms step_avg:45.62ms +[2025-09-09 16:11:35] [Rank 0] step:8841/10000 train_time:403976ms step_avg:45.69ms +[2025-09-09 16:11:35] [Rank 0] step:8841/10000 train_time:403976ms step_avg:45.69ms +[2025-09-09 16:11:35] [Rank 0] step:8861/10000 train_time:404798ms step_avg:45.68ms +[2025-09-09 16:11:35] [Rank 0] step:8861/10000 train_time:404798ms step_avg:45.68ms +[2025-09-09 16:11:36] [Rank 0] step:8881/10000 train_time:405620ms step_avg:45.67ms +[2025-09-09 16:11:36] [Rank 0] step:8881/10000 train_time:405620ms step_avg:45.67ms +[2025-09-09 16:11:37] [Rank 0] step:8901/10000 train_time:406443ms step_avg:45.66ms +[2025-09-09 16:11:37] [Rank 0] step:8901/10000 train_time:406443ms step_avg:45.66ms +[2025-09-09 16:11:38] [Rank 0] step:8921/10000 train_time:407266ms step_avg:45.65ms +[2025-09-09 16:11:38] [Rank 0] step:8921/10000 train_time:407266ms step_avg:45.65ms +[2025-09-09 16:11:39] [Rank 0] step:8941/10000 train_time:408090ms step_avg:45.64ms +[2025-09-09 16:11:39] [Rank 0] step:8941/10000 train_time:408090ms step_avg:45.64ms +[2025-09-09 16:11:40] [Rank 0] step:8961/10000 train_time:408913ms step_avg:45.63ms +[2025-09-09 16:11:40] [Rank 0] step:8961/10000 train_time:408913ms step_avg:45.63ms +[2025-09-09 16:11:40] [Rank 0] step:8981/10000 train_time:409736ms step_avg:45.62ms +[2025-09-09 16:11:40] [Rank 0] step:8981/10000 train_time:409736ms step_avg:45.62ms +[2025-09-09 16:11:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:11:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:11:42] [Rank 0] PRINT: step:9000/10000 train_loss:0.6093 val_loss:0.6058 train_time:410561ms step_avg:45.62ms +[2025-09-09 16:11:42] [Rank 0] PRINT: step:9000/10000 train_loss:0.6093 val_loss:0.6058 train_time:410561ms step_avg:45.62ms +[2025-09-09 16:11:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:11:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:11:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:11:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:13:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:13:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:13:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:13:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:13:03] [Rank 0] Total Loss: 5.0496 +[2025-09-09 16:13:03] [Rank 0] Total Loss: 5.0496 +[2025-09-09 16:13:03] [Rank 0] Total FTA (Unweighted): 0.9956 +[2025-09-09 16:13:03] [Rank 0] Total FTA (Unweighted): 0.9956 +[2025-09-09 16:13:03] [Rank 0] Total FTA (Weighted): 0.9956 +[2025-09-09 16:13:03] [Rank 0] Total FTA (Weighted): 0.9956 +[2025-09-09 16:13:03] [Rank 0] Group 0 Loss: 4.9718 +[2025-09-09 16:13:03] [Rank 0] Group 0 Loss: 4.9718 +[2025-09-09 16:13:03] [Rank 0] Group 1 Loss: 4.8179 +[2025-09-09 16:13:03] [Rank 0] Group 1 Loss: 4.8179 +[2025-09-09 16:13:03] [Rank 0] Group 2 Loss: 4.5032 +[2025-09-09 16:13:03] [Rank 0] Group 2 Loss: 4.5032 +[2025-09-09 16:13:03] [Rank 0] Group 3 Loss: 4.9468 +[2025-09-09 16:13:03] [Rank 0] Group 3 Loss: 4.9468 +[2025-09-09 16:13:04] [Rank 0] Group 4 Loss: 4.9507 +[2025-09-09 16:13:04] [Rank 0] Group 4 Loss: 4.9507 +[2025-09-09 16:13:04] [Rank 0] Group 5 Loss: 4.9334 +[2025-09-09 16:13:04] [Rank 0] Group 5 Loss: 4.9334 +[2025-09-09 16:13:04] [Rank 0] Group 6 Loss: 4.9075 +[2025-09-09 16:13:04] [Rank 0] Group 6 Loss: 4.9075 +[2025-09-09 16:13:04] [Rank 0] Group 7 Loss: 4.9799 +[2025-09-09 16:13:04] [Rank 0] Group 7 Loss: 4.9799 +[2025-09-09 16:13:04] [Rank 0] Group 8 Loss: 5.1672 +[2025-09-09 16:13:04] [Rank 0] Group 8 Loss: 5.1672 +[2025-09-09 16:13:04] [Rank 0] Group 9 Loss: 5.1064 +[2025-09-09 16:13:04] [Rank 0] Group 9 Loss: 5.1064 +[2025-09-09 16:13:04] [Rank 0] Group 10 Loss: 5.2410 +[2025-09-09 16:13:04] [Rank 0] Group 10 Loss: 5.2410 +[2025-09-09 16:13:04] [Rank 0] Group 11 Loss: 5.2010 +[2025-09-09 16:13:04] [Rank 0] Group 11 Loss: 5.2010 +[2025-09-09 16:13:04] [Rank 0] Group 12 Loss: 5.1607 +[2025-09-09 16:13:04] [Rank 0] Group 12 Loss: 5.1607 +[2025-09-09 16:13:04] [Rank 0] Group 13 Loss: 5.3152 +[2025-09-09 16:13:04] [Rank 0] Group 13 Loss: 5.3152 +[2025-09-09 16:13:04] [Rank 0] Group 14 Loss: 5.2645 +[2025-09-09 16:13:04] [Rank 0] Group 14 Loss: 5.2645 +[2025-09-09 16:13:04] [Rank 0] Group 15 Loss: 5.3256 +[2025-09-09 16:13:04] [Rank 0] Group 15 Loss: 5.3256 +[2025-09-09 16:13:04] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 16:13:04] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-09 16:13:04] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 16:13:04] [Rank 0] Group 15 FTA: 0.9400 +[2025-09-09 16:13:04] [Rank 0] Group 15 FTA: 0.9400 +[2025-09-09 16:13:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:13:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:13:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:13:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:13:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:13:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:13:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:13:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:13:06] [Rank 0] step:9001/10000 train_time:410577ms step_avg:45.61ms +[2025-09-09 16:13:06] [Rank 0] step:9001/10000 train_time:410577ms step_avg:45.61ms +[2025-09-09 16:13:07] [Rank 0] step:9021/10000 train_time:411399ms step_avg:45.60ms +[2025-09-09 16:13:07] [Rank 0] step:9021/10000 train_time:411399ms step_avg:45.60ms +[2025-09-09 16:13:07] [Rank 0] step:9041/10000 train_time:412221ms step_avg:45.59ms +[2025-09-09 16:13:07] [Rank 0] step:9041/10000 train_time:412221ms step_avg:45.59ms +[2025-09-09 16:13:08] [Rank 0] step:9061/10000 train_time:413043ms step_avg:45.58ms +[2025-09-09 16:13:08] [Rank 0] step:9061/10000 train_time:413043ms step_avg:45.58ms +[2025-09-09 16:13:09] [Rank 0] step:9081/10000 train_time:413864ms step_avg:45.57ms +[2025-09-09 16:13:09] [Rank 0] step:9081/10000 train_time:413864ms step_avg:45.57ms +[2025-09-09 16:13:10] [Rank 0] step:9101/10000 train_time:414686ms step_avg:45.56ms +[2025-09-09 16:13:10] [Rank 0] step:9101/10000 train_time:414686ms step_avg:45.56ms +[2025-09-09 16:13:11] [Rank 0] step:9121/10000 train_time:415507ms step_avg:45.56ms +[2025-09-09 16:13:11] [Rank 0] step:9121/10000 train_time:415507ms step_avg:45.56ms +[2025-09-09 16:13:11] [Rank 0] step:9141/10000 train_time:416331ms step_avg:45.55ms +[2025-09-09 16:13:11] [Rank 0] step:9141/10000 train_time:416331ms step_avg:45.55ms +[2025-09-09 16:13:12] [Rank 0] step:9161/10000 train_time:417152ms step_avg:45.54ms +[2025-09-09 16:13:12] [Rank 0] step:9161/10000 train_time:417152ms step_avg:45.54ms +[2025-09-09 16:13:13] [Rank 0] step:9181/10000 train_time:417974ms step_avg:45.53ms +[2025-09-09 16:13:13] [Rank 0] step:9181/10000 train_time:417974ms step_avg:45.53ms +[2025-09-09 16:13:14] [Rank 0] step:9201/10000 train_time:418795ms step_avg:45.52ms +[2025-09-09 16:13:14] [Rank 0] step:9201/10000 train_time:418795ms step_avg:45.52ms +[2025-09-09 16:13:15] [Rank 0] step:9221/10000 train_time:419617ms step_avg:45.51ms +[2025-09-09 16:13:15] [Rank 0] step:9221/10000 train_time:419617ms step_avg:45.51ms +[2025-09-09 16:13:16] [Rank 0] step:9241/10000 train_time:420441ms step_avg:45.50ms +[2025-09-09 16:13:16] [Rank 0] step:9241/10000 train_time:420441ms step_avg:45.50ms +[2025-09-09 16:13:16] [Rank 0] step:9261/10000 train_time:421261ms step_avg:45.49ms +[2025-09-09 16:13:16] [Rank 0] step:9261/10000 train_time:421261ms step_avg:45.49ms +[2025-09-09 16:13:17] [Rank 0] step:9281/10000 train_time:422083ms step_avg:45.48ms +[2025-09-09 16:13:17] [Rank 0] step:9281/10000 train_time:422083ms step_avg:45.48ms +[2025-09-09 16:13:18] [Rank 0] step:9301/10000 train_time:422907ms step_avg:45.47ms +[2025-09-09 16:13:18] [Rank 0] step:9301/10000 train_time:422907ms step_avg:45.47ms +[2025-09-09 16:13:19] [Rank 0] step:9321/10000 train_time:423731ms step_avg:45.46ms +[2025-09-09 16:13:19] [Rank 0] step:9321/10000 train_time:423731ms step_avg:45.46ms +[2025-09-09 16:13:20] [Rank 0] step:9341/10000 train_time:425047ms step_avg:45.50ms +[2025-09-09 16:13:20] [Rank 0] step:9341/10000 train_time:425047ms step_avg:45.50ms +[2025-09-09 16:13:21] [Rank 0] step:9361/10000 train_time:425871ms step_avg:45.49ms +[2025-09-09 16:13:21] [Rank 0] step:9361/10000 train_time:425871ms step_avg:45.49ms +[2025-09-09 16:13:22] [Rank 0] step:9381/10000 train_time:426692ms step_avg:45.48ms +[2025-09-09 16:13:22] [Rank 0] step:9381/10000 train_time:426692ms step_avg:45.48ms +[2025-09-09 16:13:23] [Rank 0] step:9401/10000 train_time:427515ms step_avg:45.48ms +[2025-09-09 16:13:23] [Rank 0] step:9401/10000 train_time:427515ms step_avg:45.48ms +[2025-09-09 16:13:23] [Rank 0] step:9421/10000 train_time:428339ms step_avg:45.47ms +[2025-09-09 16:13:23] [Rank 0] step:9421/10000 train_time:428339ms step_avg:45.47ms +[2025-09-09 16:13:24] [Rank 0] step:9441/10000 train_time:429162ms step_avg:45.46ms +[2025-09-09 16:13:24] [Rank 0] step:9441/10000 train_time:429162ms step_avg:45.46ms +[2025-09-09 16:13:25] [Rank 0] step:9461/10000 train_time:429984ms step_avg:45.45ms +[2025-09-09 16:13:25] [Rank 0] step:9461/10000 train_time:429984ms step_avg:45.45ms +[2025-09-09 16:13:26] [Rank 0] step:9481/10000 train_time:430806ms step_avg:45.44ms +[2025-09-09 16:13:26] [Rank 0] step:9481/10000 train_time:430806ms step_avg:45.44ms +[2025-09-09 16:13:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:13:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:13:27] [Rank 0] PRINT: step:9500/10000 train_loss:0.6080 val_loss:0.6051 train_time:431632ms step_avg:45.43ms +[2025-09-09 16:13:27] [Rank 0] PRINT: step:9500/10000 train_loss:0.6080 val_loss:0.6051 train_time:431632ms step_avg:45.43ms +[2025-09-09 16:13:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:13:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:13:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:13:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:14:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:14:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:14:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:14:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:14:48] [Rank 0] Total Loss: 5.1155 +[2025-09-09 16:14:48] [Rank 0] Total Loss: 5.1155 +[2025-09-09 16:14:48] [Rank 0] Total FTA (Unweighted): 0.9994 +[2025-09-09 16:14:48] [Rank 0] Total FTA (Unweighted): 0.9994 +[2025-09-09 16:14:48] [Rank 0] Total FTA (Weighted): 0.9994 +[2025-09-09 16:14:48] [Rank 0] Total FTA (Weighted): 0.9994 +[2025-09-09 16:14:48] [Rank 0] Group 0 Loss: 5.0909 +[2025-09-09 16:14:48] [Rank 0] Group 0 Loss: 5.0909 +[2025-09-09 16:14:48] [Rank 0] Group 1 Loss: 4.8492 +[2025-09-09 16:14:48] [Rank 0] Group 1 Loss: 4.8492 +[2025-09-09 16:14:48] [Rank 0] Group 2 Loss: 4.6004 +[2025-09-09 16:14:48] [Rank 0] Group 2 Loss: 4.6004 +[2025-09-09 16:14:48] [Rank 0] Group 3 Loss: 5.0148 +[2025-09-09 16:14:48] [Rank 0] Group 3 Loss: 5.0148 +[2025-09-09 16:14:48] [Rank 0] Group 4 Loss: 5.0259 +[2025-09-09 16:14:48] [Rank 0] Group 4 Loss: 5.0259 +[2025-09-09 16:14:48] [Rank 0] Group 5 Loss: 4.9768 +[2025-09-09 16:14:48] [Rank 0] Group 5 Loss: 4.9768 +[2025-09-09 16:14:48] [Rank 0] Group 6 Loss: 4.9476 +[2025-09-09 16:14:48] [Rank 0] Group 6 Loss: 4.9476 +[2025-09-09 16:14:49] [Rank 0] Group 7 Loss: 5.0563 +[2025-09-09 16:14:49] [Rank 0] Group 7 Loss: 5.0563 +[2025-09-09 16:14:49] [Rank 0] Group 8 Loss: 5.2163 +[2025-09-09 16:14:49] [Rank 0] Group 8 Loss: 5.2163 +[2025-09-09 16:14:49] [Rank 0] Group 9 Loss: 5.1526 +[2025-09-09 16:14:49] [Rank 0] Group 9 Loss: 5.1526 +[2025-09-09 16:14:49] [Rank 0] Group 10 Loss: 5.2921 +[2025-09-09 16:14:49] [Rank 0] Group 10 Loss: 5.2921 +[2025-09-09 16:14:49] [Rank 0] Group 11 Loss: 5.2757 +[2025-09-09 16:14:49] [Rank 0] Group 11 Loss: 5.2757 +[2025-09-09 16:14:49] [Rank 0] Group 12 Loss: 5.2172 +[2025-09-09 16:14:49] [Rank 0] Group 12 Loss: 5.2172 +[2025-09-09 16:14:49] [Rank 0] Group 13 Loss: 5.3953 +[2025-09-09 16:14:49] [Rank 0] Group 13 Loss: 5.3953 +[2025-09-09 16:14:49] [Rank 0] Group 14 Loss: 5.3295 +[2025-09-09 16:14:49] [Rank 0] Group 14 Loss: 5.3295 +[2025-09-09 16:14:49] [Rank 0] Group 15 Loss: 5.4079 +[2025-09-09 16:14:49] [Rank 0] Group 15 Loss: 5.4079 +[2025-09-09 16:14:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 16:14:49] [Rank 0] Group 15 FTA: 0.9900 +[2025-09-09 16:14:49] [Rank 0] Group 15 FTA: 0.9900 +[2025-09-09 16:14:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:14:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:14:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:14:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:14:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:14:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:14:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:14:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:14:50] [Rank 0] step:9501/10000 train_time:431648ms step_avg:45.43ms +[2025-09-09 16:14:50] [Rank 0] step:9501/10000 train_time:431648ms step_avg:45.43ms +[2025-09-09 16:14:51] [Rank 0] step:9521/10000 train_time:432474ms step_avg:45.42ms +[2025-09-09 16:14:51] [Rank 0] step:9521/10000 train_time:432474ms step_avg:45.42ms +[2025-09-09 16:14:52] [Rank 0] step:9541/10000 train_time:433297ms step_avg:45.41ms +[2025-09-09 16:14:52] [Rank 0] step:9541/10000 train_time:433297ms step_avg:45.41ms +[2025-09-09 16:14:52] [Rank 0] step:9561/10000 train_time:434120ms step_avg:45.41ms +[2025-09-09 16:14:52] [Rank 0] step:9561/10000 train_time:434120ms step_avg:45.41ms +[2025-09-09 16:14:53] [Rank 0] step:9581/10000 train_time:434943ms step_avg:45.40ms +[2025-09-09 16:14:53] [Rank 0] step:9581/10000 train_time:434943ms step_avg:45.40ms +[2025-09-09 16:14:54] [Rank 0] step:9601/10000 train_time:435765ms step_avg:45.39ms +[2025-09-09 16:14:54] [Rank 0] step:9601/10000 train_time:435765ms step_avg:45.39ms +[2025-09-09 16:14:55] [Rank 0] step:9621/10000 train_time:436588ms step_avg:45.38ms +[2025-09-09 16:14:55] [Rank 0] step:9621/10000 train_time:436588ms step_avg:45.38ms +[2025-09-09 16:14:56] [Rank 0] step:9641/10000 train_time:437410ms step_avg:45.37ms +[2025-09-09 16:14:56] [Rank 0] step:9641/10000 train_time:437410ms step_avg:45.37ms +[2025-09-09 16:14:57] [Rank 0] step:9661/10000 train_time:438510ms step_avg:45.39ms +[2025-09-09 16:14:57] [Rank 0] step:9661/10000 train_time:438510ms step_avg:45.39ms +[2025-09-09 16:14:58] [Rank 0] step:9681/10000 train_time:439331ms step_avg:45.38ms +[2025-09-09 16:14:58] [Rank 0] step:9681/10000 train_time:439331ms step_avg:45.38ms +[2025-09-09 16:14:59] [Rank 0] step:9701/10000 train_time:440153ms step_avg:45.37ms +[2025-09-09 16:14:59] [Rank 0] step:9701/10000 train_time:440153ms step_avg:45.37ms +[2025-09-09 16:14:59] [Rank 0] step:9721/10000 train_time:440976ms step_avg:45.36ms +[2025-09-09 16:14:59] [Rank 0] step:9721/10000 train_time:440976ms step_avg:45.36ms +[2025-09-09 16:15:00] [Rank 0] step:9741/10000 train_time:441797ms step_avg:45.35ms +[2025-09-09 16:15:00] [Rank 0] step:9741/10000 train_time:441797ms step_avg:45.35ms +[2025-09-09 16:15:01] [Rank 0] step:9761/10000 train_time:442619ms step_avg:45.35ms +[2025-09-09 16:15:01] [Rank 0] step:9761/10000 train_time:442619ms step_avg:45.35ms +[2025-09-09 16:15:02] [Rank 0] step:9781/10000 train_time:443555ms step_avg:45.35ms +[2025-09-09 16:15:02] [Rank 0] step:9781/10000 train_time:443555ms step_avg:45.35ms +[2025-09-09 16:15:03] [Rank 0] step:9801/10000 train_time:444379ms step_avg:45.34ms +[2025-09-09 16:15:03] [Rank 0] step:9801/10000 train_time:444379ms step_avg:45.34ms +[2025-09-09 16:15:04] [Rank 0] step:9821/10000 train_time:445202ms step_avg:45.33ms +[2025-09-09 16:15:04] [Rank 0] step:9821/10000 train_time:445202ms step_avg:45.33ms +[2025-09-09 16:15:04] [Rank 0] step:9841/10000 train_time:446024ms step_avg:45.32ms +[2025-09-09 16:15:04] [Rank 0] step:9841/10000 train_time:446024ms step_avg:45.32ms +[2025-09-09 16:15:05] [Rank 0] step:9861/10000 train_time:446845ms step_avg:45.31ms +[2025-09-09 16:15:05] [Rank 0] step:9861/10000 train_time:446845ms step_avg:45.31ms +[2025-09-09 16:15:06] [Rank 0] step:9881/10000 train_time:447668ms step_avg:45.31ms +[2025-09-09 16:15:06] [Rank 0] step:9881/10000 train_time:447668ms step_avg:45.31ms +[2025-09-09 16:15:07] [Rank 0] step:9901/10000 train_time:448489ms step_avg:45.30ms +[2025-09-09 16:15:07] [Rank 0] step:9901/10000 train_time:448489ms step_avg:45.30ms +[2025-09-09 16:15:08] [Rank 0] step:9921/10000 train_time:449311ms step_avg:45.29ms +[2025-09-09 16:15:08] [Rank 0] step:9921/10000 train_time:449311ms step_avg:45.29ms +[2025-09-09 16:15:08] [Rank 0] step:9941/10000 train_time:450134ms step_avg:45.28ms +[2025-09-09 16:15:08] [Rank 0] step:9941/10000 train_time:450134ms step_avg:45.28ms +[2025-09-09 16:15:09] [Rank 0] step:9961/10000 train_time:450957ms step_avg:45.27ms +[2025-09-09 16:15:09] [Rank 0] step:9961/10000 train_time:450957ms step_avg:45.27ms +[2025-09-09 16:15:10] [Rank 0] step:9981/10000 train_time:451786ms step_avg:45.26ms +[2025-09-09 16:15:10] [Rank 0] step:9981/10000 train_time:451786ms step_avg:45.26ms +[2025-09-09 16:15:11] [Rank 0] step:10000/10000 train_time:452566ms step_avg:45.26ms +[2025-09-09 16:15:11] [Rank 0] step:10000/10000 train_time:452566ms step_avg:45.26ms +[2025-09-09 16:15:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:15:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-09 16:15:11] [Rank 0] PRINT: step:10000/10000 train_loss:0.6070 val_loss:0.6044 train_time:452616ms step_avg:45.26ms +[2025-09-09 16:15:11] [Rank 0] PRINT: step:10000/10000 train_loss:0.6070 val_loss:0.6044 train_time:452616ms step_avg:45.26ms +[2025-09-09 16:15:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:15:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-09 16:15:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:15:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-09 16:16:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:16:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-09 16:16:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:16:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-09 16:16:33] [Rank 0] Total Loss: 5.0913 +[2025-09-09 16:16:33] [Rank 0] Total Loss: 5.0913 +[2025-09-09 16:16:33] [Rank 0] Total FTA (Unweighted): 0.9994 +[2025-09-09 16:16:33] [Rank 0] Total FTA (Unweighted): 0.9994 +[2025-09-09 16:16:33] [Rank 0] Total FTA (Weighted): 0.9994 +[2025-09-09 16:16:33] [Rank 0] Total FTA (Weighted): 0.9994 +[2025-09-09 16:16:33] [Rank 0] Group 0 Loss: 5.0694 +[2025-09-09 16:16:33] [Rank 0] Group 0 Loss: 5.0694 +[2025-09-09 16:16:33] [Rank 0] Group 1 Loss: 4.8130 +[2025-09-09 16:16:33] [Rank 0] Group 1 Loss: 4.8130 +[2025-09-09 16:16:33] [Rank 0] Group 2 Loss: 4.5807 +[2025-09-09 16:16:33] [Rank 0] Group 2 Loss: 4.5807 +[2025-09-09 16:16:33] [Rank 0] Group 3 Loss: 4.9996 +[2025-09-09 16:16:33] [Rank 0] Group 3 Loss: 4.9996 +[2025-09-09 16:16:33] [Rank 0] Group 4 Loss: 5.0029 +[2025-09-09 16:16:33] [Rank 0] Group 4 Loss: 5.0029 +[2025-09-09 16:16:33] [Rank 0] Group 5 Loss: 4.9689 +[2025-09-09 16:16:33] [Rank 0] Group 5 Loss: 4.9689 +[2025-09-09 16:16:33] [Rank 0] Group 6 Loss: 4.9323 +[2025-09-09 16:16:33] [Rank 0] Group 6 Loss: 4.9323 +[2025-09-09 16:16:33] [Rank 0] Group 7 Loss: 5.0304 +[2025-09-09 16:16:33] [Rank 0] Group 7 Loss: 5.0304 +[2025-09-09 16:16:33] [Rank 0] Group 8 Loss: 5.1864 +[2025-09-09 16:16:33] [Rank 0] Group 8 Loss: 5.1864 +[2025-09-09 16:16:33] [Rank 0] Group 9 Loss: 5.1249 +[2025-09-09 16:16:33] [Rank 0] Group 9 Loss: 5.1249 +[2025-09-09 16:16:33] [Rank 0] Group 10 Loss: 5.2630 +[2025-09-09 16:16:33] [Rank 0] Group 10 Loss: 5.2630 +[2025-09-09 16:16:33] [Rank 0] Group 11 Loss: 5.2425 +[2025-09-09 16:16:33] [Rank 0] Group 11 Loss: 5.2425 +[2025-09-09 16:16:33] [Rank 0] Group 12 Loss: 5.2014 +[2025-09-09 16:16:33] [Rank 0] Group 12 Loss: 5.2014 +[2025-09-09 16:16:33] [Rank 0] Group 13 Loss: 5.3624 +[2025-09-09 16:16:33] [Rank 0] Group 13 Loss: 5.3624 +[2025-09-09 16:16:33] [Rank 0] Group 14 Loss: 5.3035 +[2025-09-09 16:16:33] [Rank 0] Group 14 Loss: 5.3035 +[2025-09-09 16:16:33] [Rank 0] Group 15 Loss: 5.3791 +[2025-09-09 16:16:33] [Rank 0] Group 15 Loss: 5.3791 +[2025-09-09 16:16:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 13 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 14 FTA: 1.0000 +[2025-09-09 16:16:33] [Rank 0] Group 15 FTA: 0.9900 +[2025-09-09 16:16:33] [Rank 0] Group 15 FTA: 0.9900 +[2025-09-09 16:16:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:16:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_loss_curves.png +[2025-09-09 16:16:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:16:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/per_class_acc_curves.png +[2025-09-09 16:16:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:16:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_loss_curve.png +[2025-09-09 16:16:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:16:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_muon_gated/diff_mode/mode_7_param_gated_seed_46/total_acc_curve.png +[2025-09-09 16:16:34] [Rank 0] step:10001/10000 train_time:452632ms step_avg:45.26ms +[2025-09-09 16:16:34] [Rank 0] step:10001/10000 train_time:452632ms step_avg:45.26ms +[2025-09-09 16:16:34] [Rank 0] PRINT: --- Training Finished: Tue Sep 9 16:16:34 2025 --- +[2025-09-09 16:16:34] [Rank 0] PRINT: --- Training Finished: Tue Sep 9 16:16:34 2025 --- +[2025-09-09 16:16:34] [Rank 0] PRINT: Peak memory allocated: 3880 MiB reserved: 4808 MiB +[2025-09-09 16:16:34] [Rank 0] PRINT: Peak memory allocated: 3880 MiB reserved: 4808 MiB