File size: 596 Bytes
e1275a6
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
quant_stage:
  quant_modifiers:
    QuantizationModifier:
      ignore: [lm_head]
      config_groups:
        group_0:
          weights: {num_bits: 8, type: int, strategy: tensor, dynamic: false, symmetric: true}
          input_activations: {num_bits: 8, type: int, strategy: token, dynamic: true, symmetric: true}
          targets: [Linear]
  pruning_modifiers:
    ConstantPruningModifier:
      targets: ['re:.*q_proj.weight', 're:.*k_proj.weight', 're:.*v_proj.weight', 're:.*o_proj.weight',
        're:.*gate_proj.weight', 're:.*up_proj.weight', 're:.*down_proj.weight']
      start: 0