OPEA
/

Safetensors
qwen2_vl
2-bit
gptq
weiweiz1 commited on
Commit
392df24
·
1 Parent(s): eac2fb6

auto_round format

Browse files
Files changed (1) hide show
  1. quantization_config.json +24 -0
quantization_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bits": 2,
3
+ "group_size": 64,
4
+ "sym": true,
5
+ "data_type": "int",
6
+ "enable_quanted_input": true,
7
+ "enable_minmax_tuning": true,
8
+ "seqlen": 2048,
9
+ "batch_size": 8,
10
+ "scale_dtype": "torch.float16",
11
+ "lr": 0.0005,
12
+ "minmax_lr": 0.0005,
13
+ "gradient_accumulate_steps": 1,
14
+ "iters": 2000,
15
+ "amp": true,
16
+ "nsamples": 1024,
17
+ "low_gpu_mem_usage": true,
18
+ "to_quant_block_names": "model.layers",
19
+ "enable_norm_bias_tuning": false,
20
+ "dataset": "NeelNanda/pile-10k",
21
+ "autoround_version": "0.4.2",
22
+ "quant_method": "intel/auto-round",
23
+ "backend": "auto_round:gptq:exllamav2"
24
+ }