| { | |
| "model": "roberta-base", | |
| "dataset": "glue:cola", | |
| "dataset_train_split": "train", | |
| "dataset_dev_split": "validation", | |
| "tb_writer_step": 1000, | |
| "checkpoint_steps": -1, | |
| "checkpoint_every_epoch": false, | |
| "num_train_epochs": 5, | |
| "early_stopping_epochs": -1, | |
| "batch_size": 32, | |
| "max_length": 128, | |
| "learning_rate": 2e-05, | |
| "grad_accum_steps": 1, | |
| "warmup_proportion": 0.1, | |
| "config_name": "config.json", | |
| "weights_name": "pytorch_model.bin", | |
| "enable_wandb": false, | |
| "output_dir": "/p/qdata/jm8wx/research/text_attacks/textattack/outputs/training/roberta-base-glue:cola-2020-06-29-14:54/", | |
| "num_labels": 2, | |
| "do_regression": false, | |
| "best_eval_score": 0.850431447746884, | |
| "best_eval_score_epoch": 1, | |
| "epochs_since_best_eval_score": 3 | |
| } | |