{ "bits": 4, "compute_dtype": null, "damp_percent": 0.01, "desc_act": false, "enable_minmax_tuning": true, "enable_quanted_input": true, "group_size": 128, "iters": 1000, "lr": 0.001, "minmax_lr": 0.001, "model_file_base_name": "model", "model_name_or_path": null, "quant_method": "gptq", "scale_dtype": "fp16", "sym": false, "true_sequential": false }