| { | |
| "bits": 4, | |
| "group_size": 128, | |
| "damp_percent": 0.01, | |
| "desc_act": false, | |
| "static_groups": false, | |
| "sym": true, | |
| "true_sequential": true, | |
| "model_name_or_path": "Qwen2-1.5B-Instruct-4bit-GPTQ", | |
| "model_file_base_name": "gptq_model-4bit-128g", | |
| "is_marlin_format": false, | |
| "quant_method": "gptq" | |
| } |