mohitsha's picture
mohitsha HF staff
Upload 2 files
c02e4ab verified
from functools import partial
from optimum.amd.ryzenai import (
AutoQuantizationConfig,
RyzenAIOnnxQuantizer,
)
from optimum.exporters.onnx import main_export
from transformers import AutoFeatureExtractor
# Define paths for exporting ONNX model and saving quantized model
export_dir = "resnet_onnx"
quantization_dir = "resnet_onnx_quantized"
# Specify the model ID from Transformers
model_id = "microsoft/resnet-18"
# Step 1: Export the model to ONNX format using Optimum Exporters
main_export(
model_name_or_path=model_id,
output=export_dir,
task="image-classification",
opset=13,
batch_size=1,
height=224,
width=224,
no_dynamic_axes=True,
)
# Step 2: Preprocess configuration and data transformations
feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)
def preprocess_fn(ex, feature_extractor):
image = ex["image"]
if image.mode == "L":
image = image.convert("RGB")
pixel_values = feature_extractor(image).pixel_values[0]
return {"pixel_values": pixel_values}
# Step 3: Initialize the RyzenAIOnnxQuantizer with the exported model
quantizer = RyzenAIOnnxQuantizer.from_pretrained(export_dir)
# Step 4: Load recommended quantization config for model
quantization_config = AutoQuantizationConfig.ipu_cnn_config()
# Step 5: Obtain a calibration dataset for computing quantization parameters
train_calibration_dataset = quantizer.get_calibration_dataset(
"imagenet-1k",
preprocess_function=partial(preprocess_fn, feature_extractor=feature_extractor),
num_samples=100,
dataset_split="train",
preprocess_batch=False,
streaming=True,
)
# Step 6: Run the quantizer with the specified configuration and calibration data
quantizer.quantize(
quantization_config=quantization_config, dataset=train_calibration_dataset, save_dir=quantization_dir
)