| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9998338042213728, | |
| "eval_steps": 500, | |
| "global_step": 47, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.021273059664284527, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.7124, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.042546119328569054, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.7074, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.06381917899285358, | |
| "learning_rate": 1e-05, | |
| "loss": 0.7038, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.08509223865713811, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 0.6777, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.10636529832142264, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 0.6636, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.12763835798570716, | |
| "learning_rate": 2e-05, | |
| "loss": 0.6466, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.1489114176499917, | |
| "learning_rate": 2.3333333333333336e-05, | |
| "loss": 0.5964, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.17018447731427622, | |
| "learning_rate": 2.6666666666666667e-05, | |
| "loss": 0.5744, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.19145753697856074, | |
| "learning_rate": 3e-05, | |
| "loss": 0.6283, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.21273059664284527, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.6229, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.2340036563071298, | |
| "learning_rate": 3.6666666666666666e-05, | |
| "loss": 0.6355, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.2552767159714143, | |
| "learning_rate": 4e-05, | |
| "loss": 0.6621, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.2765497756356989, | |
| "learning_rate": 4.3333333333333334e-05, | |
| "loss": 0.6167, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.2978228352999834, | |
| "learning_rate": 4.666666666666667e-05, | |
| "loss": 0.737, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.31909589496426793, | |
| "learning_rate": 5e-05, | |
| "loss": 0.8185, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.34036895462855243, | |
| "learning_rate": 4.8437500000000005e-05, | |
| "loss": 0.6561, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.361642014292837, | |
| "learning_rate": 4.6875e-05, | |
| "loss": 0.6145, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.3829150739571215, | |
| "learning_rate": 4.5312500000000004e-05, | |
| "loss": 0.6118, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.40418813362140604, | |
| "learning_rate": 4.375e-05, | |
| "loss": 0.5738, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.42546119328569054, | |
| "learning_rate": 4.21875e-05, | |
| "loss": 0.5477, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.4467342529499751, | |
| "learning_rate": 4.0625000000000005e-05, | |
| "loss": 0.5366, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.4680073126142596, | |
| "learning_rate": 3.90625e-05, | |
| "loss": 0.5254, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.48928037227854415, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.5327, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.5105534319428287, | |
| "learning_rate": 3.59375e-05, | |
| "loss": 0.5231, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.5318264916071131, | |
| "learning_rate": 3.4375e-05, | |
| "loss": 0.5227, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.5530995512713978, | |
| "learning_rate": 3.2812500000000005e-05, | |
| "loss": 0.5264, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.5743726109356823, | |
| "learning_rate": 3.125e-05, | |
| "loss": 0.5285, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.5956456705999668, | |
| "learning_rate": 2.96875e-05, | |
| "loss": 0.5143, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.6169187302642513, | |
| "learning_rate": 2.8125000000000003e-05, | |
| "loss": 0.5131, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.6381917899285359, | |
| "learning_rate": 2.6562500000000002e-05, | |
| "loss": 0.5172, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.6594648495928204, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.5174, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.6807379092571049, | |
| "learning_rate": 2.34375e-05, | |
| "loss": 0.5085, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.7020109689213894, | |
| "learning_rate": 2.1875e-05, | |
| "loss": 0.5078, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.723284028585674, | |
| "learning_rate": 2.0312500000000002e-05, | |
| "loss": 0.5107, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.7445570882499585, | |
| "learning_rate": 1.8750000000000002e-05, | |
| "loss": 0.5172, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.765830147914243, | |
| "learning_rate": 1.71875e-05, | |
| "loss": 0.5018, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.7871032075785275, | |
| "learning_rate": 1.5625e-05, | |
| "loss": 0.505, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.8083762672428121, | |
| "learning_rate": 1.4062500000000001e-05, | |
| "loss": 0.5068, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.8296493269070966, | |
| "learning_rate": 1.25e-05, | |
| "loss": 0.5029, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.8509223865713811, | |
| "learning_rate": 1.09375e-05, | |
| "loss": 0.5001, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.8721954462356656, | |
| "learning_rate": 9.375000000000001e-06, | |
| "loss": 0.5043, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.8934685058999502, | |
| "learning_rate": 7.8125e-06, | |
| "loss": 0.5087, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.9147415655642347, | |
| "learning_rate": 6.25e-06, | |
| "loss": 0.5048, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.9360146252285192, | |
| "learning_rate": 4.6875000000000004e-06, | |
| "loss": 0.512, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.9572876848928037, | |
| "learning_rate": 3.125e-06, | |
| "loss": 0.5194, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.9785607445570883, | |
| "learning_rate": 1.5625e-06, | |
| "loss": 0.5199, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.9998338042213728, | |
| "learning_rate": 0.0, | |
| "loss": 0.5087, | |
| "step": 47 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 47, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.3654543728042312e+18, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |