TinyStories-1M-ONNX / quantize_config.json
mkly's picture
Update models with earlier onnx version
79b0de4
raw
history blame
2.15 kB
{
"per_channel": true,
"reduce_range": true,
"per_model_config": {
"decoder_with_past_model": {
"op_types": [
"MatMul",
"Sub",
"Gather",
"Where",
"Shape",
"Concat",
"Range",
"Unsqueeze",
"Mul",
"Tanh",
"Transpose",
"Pow",
"Reshape",
"Constant",
"Div",
"Cast",
"Softmax",
"Slice",
"Add",
"Sqrt",
"ReduceMean",
"Squeeze"
],
"weight_type": "QInt8"
},
"decoder_model_merged": {
"op_types": [
"MatMul",
"Sub",
"Gather",
"Where",
"Shape",
"Concat",
"Range",
"Unsqueeze",
"Mul",
"Tanh",
"Transpose",
"Pow",
"Reshape",
"Constant",
"Div",
"If",
"Cast",
"Softmax",
"Slice",
"Add",
"Sqrt",
"ReduceMean",
"Squeeze"
],
"weight_type": "QInt8"
},
"decoder_model": {
"op_types": [
"MatMul",
"Sub",
"Gather",
"Where",
"Shape",
"Concat",
"Range",
"Unsqueeze",
"Mul",
"Tanh",
"Transpose",
"Pow",
"Reshape",
"Constant",
"Div",
"Cast",
"Softmax",
"Slice",
"Add",
"Sqrt",
"ReduceMean",
"Squeeze"
],
"weight_type": "QInt8"
}
}
}