Meta-Llama-3-8B-Instruct-GPTQ / quantize_config.json
markoarnauto's picture
4bit model
2f10832 verified
raw
history blame
265 Bytes
{
"bits": 4,
"group_size": 128,
"damp_percent": 0.01,
"desc_act": true,
"static_groups": false,
"sym": true,
"true_sequential": true,
"model_name_or_path": null,
"model_file_base_name": null,
"is_marlin_format": false,
"quant_method": "gptq"
}