This model has been quantized using GPTQModel.
- bits: 4
 - group_size: 128
 - desc_act: true
 - static_groups: false
 - sym: true
 - lm_head: false
 - damp_percent: 0.01
 - true_sequential: true
 - model_name_or_path: ""
 - model_file_base_name: "model"
 - quant_method: "gptq"
 - checkpoint_format: "gptq"
 - meta:
- quantizer: "gptqmodel:0.9.9-dev0"
 
 
- Downloads last month
 - 53