docs / huggingface_optimum-quanto.txt
danidarko's picture
Upload 59 files
b1d4de0 verified
# File: optimum-quanto-main/bench/generation/evaluate_configurations.py
import argparse
import json
import torch
from evaluate_model import evaluate
from gen_barchart import gen_barchart
from transformers import AutoConfig
from optimum.quanto import qtype
def evaluate_model_configurations(model_id: str, metric: str, device: torch.device, batch_size: int=32, dtype: torch.dtype=torch.float16):
weights = ['int4', 'int8', 'float8']
activations = ['none', 'float8']
def short_name(qtype: qtype):
return {'none': 'f16' if dtype == torch.float16 else 'bf16', 'int4': 'i4', 'int8': 'i8', 'float8': 'f8'}[qtype]
results = {}
config_name = f"W{short_name('none')}A{short_name('none')}"
print(f'{model_id}[{config_name}]:')
results[config_name] = evaluate(model_id, metric, 'quanto', 'none', 'none', batch_size, device, dtype)
for w in weights:
for a in activations:
config_name = f'W{short_name(w)}A{short_name(a)}'
print(f'{model_id}[{config_name}]:')
results[config_name] = evaluate(model_id, metric, 'quanto', w, a, batch_size, device, dtype)
return results
def main():
parser = argparse.ArgumentParser(description='Evaluate quantized model predictions on Lambada Dataset')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--model', type=str, default='facebook/opt-350m', help='The name of the trained Model.')
parser.add_argument('--device', type=str, default=None, help='The device to use for generation.')
parser.add_argument('--metric', type=str, default='prediction', choices=['latency', 'prediction', 'perplexity'])
parser.add_argument('--batch_size', type=int, default=32, help='The batch size during evaluation.')
parser.add_argument('--dtype', type=str, help='Use the following dtype to load the model.')
parser.add_argument('--json', action='store_true', help='Dump the results to a json file.')
parser.add_argument('--png', action='store_true', help='Generate a PNG.')
args = parser.parse_args()
torch.manual_seed(args.seed)
if args.device is None:
if torch.cuda.is_available():
device = torch.device('cuda')
elif torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
else:
device = torch.device(args.device)
if args.dtype is None:
config = AutoConfig.from_pretrained(args.model)
dtype = getattr(config, 'torch_dtype', torch.float16)
else:
dtype = torch.float16 if args.dtype == 'fp16' else torch.bfloat16
results = evaluate_model_configurations(args.model, args.metric, device, batch_size=args.batch_size, dtype=dtype)
if args.json:
model_name = args.model.split('/')[-1]
json_path = f'{model_name}-{args.metric}.json'
with open(json_path, 'w') as fp:
json.dump({model_name: results}, fp, indent=4)
if args.png:
if args.metric == 'latency':
title = f'{args.model}: Mean latency per token'
label = 'Latency (ms)'
elif args.metric == 'prediction':
title = f'{args.model}: Prediction accuracy on Lambada dataset'
label = 'Accuracy'
elif args.metric == 'perplexity':
title = f'{args.model}: Perplexity evaluated on WikiText dataset'
label = 'Perplexity'
gen_barchart(args.model, title, label, results, dtype)
if __name__ == '__main__':
main()
# File: optimum-quanto-main/bench/generation/evaluate_model.py
import argparse
import torch
from datasets import load_dataset
from metrics.latency import latency
from metrics.perplexity import perplexity
from metrics.prediction import prediction_accuracy
from setup.awq import setup as awq_setup
from setup.bnb import setup as bnb_setup
from setup.hqq import setup as hqq_setup
from setup.quanto import setup as quanto_setup
from transformers import AutoConfig
@torch.no_grad()
def calibrate(model, tokenizer, batch_size, batches):
samples = batch_size * batches
cal_dataset = load_dataset('lambada', split=['validation'])[0]
model.eval()
total = 0
for batch in cal_dataset.iter(batch_size=batch_size):
inputs = tokenizer(batch['text'], return_tensors='pt', padding=True)
input_ids = inputs.input_ids.to(model.device)
attention_mask = inputs.attention_mask.to(model.device)
model(input_ids, attention_mask=attention_mask)
total += input_ids.size(0)
if total >= samples:
break
def evaluate(model_id: str, metric: str, quantizer: str, weights: str, activations: str, batch_size: int, device: torch.device, dtype: torch.dtype=None):
if quantizer == 'quanto':
if dtype is None:
config = AutoConfig.from_pretrained(model_id)
dtype = getattr(config, 'torch_dtype', torch.float16)
(model, tokenizer) = quanto_setup(model_id, weights, activations, batch_size, device, dtype)
elif quantizer == 'awq':
(model, tokenizer) = awq_setup(model_id, weights, activations, group_size=128)
elif quantizer == 'bnb':
(model, tokenizer) = bnb_setup(model_id, weights, activations, device)
elif quantizer == 'hqq':
(model, tokenizer) = hqq_setup(model_id, weights, activations, device)
else:
raise ValueError(f'Unsupported quantizer {quantizer}')
dtype = next(model.parameters()).dtype
weights = dtype if weights == 'none' else weights
activations = dtype if activations == 'none' else activations
print(f'Evaluating {model_id} {metric} with {weights} weights and {activations} activations.')
if metric == 'latency':
return latency(model, tokenizer, device, batch_size=1, prompt_length=512, nb_tokens=512, iterations=3)
elif metric == 'prediction':
return prediction_accuracy(model, tokenizer, batch_size)
elif metric == 'perplexity':
return perplexity(model, tokenizer)
def main():
parser = argparse.ArgumentParser(description='Evaluate quantized model metrics')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--model', type=str, default='facebook/opt-350m', help='The name of the trained Model.')
parser.add_argument('--device', type=str, default=None, help='The device to use for generation.')
parser.add_argument('--metric', type=str, default='prediction', choices=['latency', 'prediction', 'perplexity'])
parser.add_argument('--quantizer', type=str, default='quanto', choices=['quanto', 'awq', 'bnb', 'hqq'])
parser.add_argument('--weights', type=str, default='none', choices=['none', 'int4', 'int8', 'float8'])
parser.add_argument('--activations', type=str, default='none', choices=['none', 'int8', 'float8'])
parser.add_argument('--batch_size', type=int, default=32, help='The batch size during evaluation.')
parser.add_argument('--dtype', type=str, default='none', choices=['none', 'fp16', 'bf16'])
args = parser.parse_args()
torch.manual_seed(args.seed)
if args.device is None:
if torch.cuda.is_available():
device = torch.device('cuda')
elif torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
else:
device = torch.device(args.device)
dtype = {'none': None, 'fp16': torch.float16, 'bf16': torch.bfloat16}[args.dtype]
evaluate(args.model, args.metric, args.quantizer, args.weights, args.activations, args.batch_size, device, dtype)
if __name__ == '__main__':
main()
# File: optimum-quanto-main/bench/generation/gen_barchart.py
import argparse
import json
import matplotlib.pyplot as plt
import numpy as np
import torch
def save_bar_chart(title, labels, ylabel, series, save_path):
x = np.arange(len(labels))
width = 0.15
multiplier = 0
(fig, ax) = plt.subplots(layout='constrained')
fig.set_figwidth(10)
max_value = 0
for (attribute, measurement) in series.items():
max_value = max(max_value, max(measurement))
offset = width * multiplier
rects = ax.bar(x + offset, measurement, width, label=attribute)
ax.bar_label(rects, padding=5)
multiplier += 1
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xticks(x + width, labels)
ax.legend(loc='upper left', ncols=4)
ax.set_ylim(0, max_value * 1.2)
plt.savefig(save_path)
def gen_barchart(model_id, title, label, results, dtype):
dtype_str = 'f16' if dtype is torch.float16 else 'bf16'
activations = (dtype_str, 'f8')
weights = ('i4', 'i8', 'f8')
series = {}
reference = round(results[f'W{dtype_str}A{dtype_str}'], 2)
series[f'Weights {dtype_str}'] = [reference] * len(activations)
for w in weights:
name = f'Weights {w}'
series[name] = []
for a in activations:
result = results[f'W{w}A{a}']
series[name].append(round(result, 2))
model_name = model_id.replace('/', '-')
metric_name = label.replace(' ', '_').replace('(', '_').replace(')', '_')
save_bar_chart(title=title, labels=[f'Activations {a}' for a in activations], series=series, ylabel=label, save_path=f'{model_name}_{dtype_str}_{metric_name}.png')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('benchmark', type=str, help='A benchmark result file (.json).')
parser.add_argument('--title', type=str, required=True, help='The graph title.')
parser.add_argument('--label', type=str, required=True, help='The graph vertical label.')
args = parser.parse_args()
with open(args.benchmark) as f:
benchmark = json.load(f)
for (model_id, results) in benchmark.items():
gen_barchart(model_id, args.title, args.label, results)
if __name__ == '__main__':
main()
# File: optimum-quanto-main/bench/generation/metrics/latency.py
import gc
import time
import numpy as np
import torch
from tqdm.auto import tqdm
from transformers import GenerationConfig
def latency(model, tokenizer, device, batch_size=1, prompt_length=512, nb_tokens=512, iterations=10):
def synchronize(device):
if device.type == 'cuda':
torch.cuda.synchronize()
elif device.type == 'mps':
torch.mps.synchronize()
else:
torch.cpu.synchronize()
def timing_event(device):
if device.type == 'cuda':
return torch.cuda.Event(enable_timing=True)
elif device.type == 'mps':
return torch.mps.Event(enable_timing=True)
class CPUEvent:
def __init__(self):
self.time = None
def record(self):
self.time = time.time()
def elapsed_time(self, other):
assert self.time is not None
assert other.time is not None
return (other.time - self.time) * 1000
return CPUEvent()
generation_config = GenerationConfig(max_new_tokens=nb_tokens, min_new_tokens=nb_tokens, use_cache=True, pad_token_id=tokenizer.pad_token_id, num_beams=1, do_sample=False, eos_token_id=None)
if getattr(model, 'generation_config', None) is not None:
model.generation_config.eos_token_id = None
synchronize(device)
if device.type == 'cuda':
torch.cuda.reset_peak_memory_stats()
memory = get_device_memory(device)
if memory is not None:
print(f'Device memory: {memory / 2 ** 30:.4f} GB')
latencies = []
input_ids = torch.randint(1, model.config.vocab_size - 1, size=(batch_size, prompt_length)).to(device)
masks = torch.ones(batch_size, prompt_length, dtype=torch.int32).to(device)
for _ in tqdm(range(iterations)):
start_event = timing_event(device)
end_event = timing_event(device)
synchronize(device)
start_event.record()
_ = model.generate(input_ids, attention_mask=masks, generation_config=generation_config)
end_event.record()
synchronize(device)
latency_ms = start_event.elapsed_time(end_event)
latencies.append(latency_ms)
if device.type == 'cuda':
peak_memory = torch.cuda.max_memory_allocated()
print(f'Peak memory during benchmark: {peak_memory / 2 ** 30:.4f} GB')
mean_latency = np.mean(latencies) / generation_config.min_new_tokens
print(f'Average latency per token: {mean_latency} ms')
return mean_latency
def get_device_memory(device):
gc.collect()
if device.type == 'cuda':
torch.cuda.empty_cache()
return torch.cuda.memory_allocated()
elif device.type == 'mps':
torch.mps.empty_cache()
return torch.mps.current_allocated_memory()
return None
# File: optimum-quanto-main/bench/generation/metrics/perplexity.py
import sys
import numpy as np
import torch
from datasets import load_dataset
from tqdm import tqdm
class Perplexity:
def __init__(self, model, tokenizer, dataset_path='wikitext', dataset_name=None, split='test', text_column='text'):
self._model = model
self._tokenizer = tokenizer
self._dataset_path = dataset_path
self._dataset_name = dataset_name
self._split = split
self._text_column = text_column
self._text = self._prepare_data()
def _prepare_data(self):
if self._dataset_path == 'wikitext':
self._dataset_name = 'wikitext-2-raw-v1'
data = load_dataset(self._dataset_path, self._dataset_name, split=self._split)
text_list = [' \n' if s == '' else s for s in data[self._text_column]]
return ''.join(text_list)
@staticmethod
def softmax(logits):
e_x = np.exp(logits - np.max(logits))
return e_x / e_x.sum(axis=0)
def calculate_perplexity(self, n_ctx=512, n_batch=512):
self._tokenizer.model_max_length = sys.maxsize
tokens = self._tokenizer(self._text, truncation=False, return_tensors='pt').input_ids.to(self._model.device)
nll = 0.0
count = 0
curr_ppl = 0
all_perplexity = []
with tqdm(range(len(tokens[0]) // n_ctx), desc='Perplexity: - ') as progress:
for i in progress:
(nll, count) = self._process_batch(i, n_ctx, n_batch, tokens, nll, count)
curr_ppl = np.exp(nll / count)
all_perplexity.append(curr_ppl)
progress.set_description(f'Perplexity: {curr_ppl:.4f}')
return all_perplexity
def _process_batch(self, i, n_ctx, n_batch, tokens, nll, count):
start = i * n_ctx
end = start + n_ctx
num_batches = (n_ctx + n_batch - 1) // n_batch
logits = []
for j in range(num_batches):
batch_start = start + j * n_batch
batch_size = min(end - batch_start, n_batch)
token_org = tokens[0][batch_start].item()
if j == 0:
tokens[0][batch_start] = self._tokenizer.bos_token_id
batch_logits = self._compute_batch_logits(tokens, batch_start, batch_size)
tokens[0][batch_start] = token_org
logits.append(batch_logits)
for j in range(min(512, n_ctx // 2), n_ctx - 1):
tok_logits = logits[0][0][j].cpu().numpy()
prob = self.softmax(tok_logits)[tokens[0][start + j + 1]]
nll += -np.log(prob, where=prob > 0)
count += 1
return (nll, count)
def _compute_batch_logits(self, tokens, batch_start, batch_size):
with torch.no_grad():
outputs = self._model(tokens[:, batch_start:batch_start + batch_size])
return outputs.logits.detach()
def perplexity(model, tokenizer, stride: int=512):
print('Evaluating perplexity')
ppl = Perplexity(model, tokenizer)
ppl_value = np.mean(ppl.calculate_perplexity(n_ctx=stride))
return ppl_value
# File: optimum-quanto-main/bench/generation/metrics/prediction.py
import time
import torch
from datasets import load_dataset
@torch.no_grad()
def prediction_accuracy(model, tokenizer, batch_size, samples=None):
test_dataset = load_dataset('lambada', split=['test'])[0]
model.eval()
(total, hit) = (0, 0)
start = time.time()
for batch in test_dataset.iter(batch_size=batch_size):
inputs = tokenizer(batch['text'], return_tensors='pt', padding=True)
input_ids = inputs.input_ids.to(model.device)
attention_mask = inputs.attention_mask.to(model.device)
labels = input_ids[:, -1]
outputs = model(input_ids[:, :-1], attention_mask=attention_mask[:, :-1])
preds = outputs.logits[:, -1, :].argmax(dim=-1)
total += labels.size(0)
hit += (preds == labels).sum().item()
if samples is not None and total >= samples:
break
end = time.time()
acc = hit / total
print(f'{total} sequences evaluated in {end - start:.2f} s. accuracy = {acc:.2f}')
return acc
# File: optimum-quanto-main/bench/generation/setup/awq.py
from awq import AutoAWQForCausalLM
from transformers import AutoTokenizer
def prepare_inputs_for_generation(input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs):
if past_key_values is not None:
cache_length = past_length = past_key_values[0][0].shape[2]
max_cache_length = None
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length):]
elif past_length < input_ids.shape[1]:
input_ids = input_ids[:, past_length:]
if max_cache_length is not None and attention_mask is not None and (cache_length + input_ids.shape[1] > max_cache_length):
attention_mask = attention_mask[:, -max_cache_length:]
position_ids = kwargs.get('position_ids', None)
if attention_mask is not None and position_ids is None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -input_ids.shape[1]:]
if inputs_embeds is not None and past_key_values is None:
model_inputs = {'inputs_embeds': inputs_embeds}
else:
model_inputs = {'input_ids': input_ids}
model_inputs.update({'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'attention_mask': attention_mask})
return model_inputs
def setup(model_id: str, weights: str, activations: str, group_size: int=64, version='GEMV_FAST'):
if activations != 'none':
raise ValueError('Activation quantization is not supported by HQQ')
if weights != 'int4':
raise ValueError('AWQ only supports int4 weights.')
quant_config = {'zero_point': True, 'q_group_size': group_size, 'w_bit': 4, 'version': version}
model = AutoAWQForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
tokenizer.pad_token_id = tokenizer.eos_token_id
tokenizer.padding_side = 'left'
model.quantize(tokenizer, quant_config=quant_config)
quant_path = model_id.replace('/', '-') + f'_{group_size}_{version}'
model.save_quantized(quant_path)
model = AutoAWQForCausalLM.from_quantized(quant_path)
model.model.prepare_inputs_for_generation = prepare_inputs_for_generation
model.device = next(model.parameters()).device
return (model, tokenizer)
# File: optimum-quanto-main/bench/generation/setup/bnb.py
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
def setup(model_id: str, weights: str, activations: str, device: torch.device):
if activations != 'none':
raise ValueError('Activation quantization is not supported by BitsAndBytes')
if weights == 'int4':
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type='fp4')
elif weights == 'int8':
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
else:
raise ValueError('BitsAndBytes only supports int4 and int8 weights.')
dtype = torch.float32 if device.type == 'cpu' else torch.float16
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token_id = tokenizer.eos_token_id
tokenizer.padding_side = 'left'
quantization_config.bnb_4bit_compute_dtype = dtype
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=dtype, low_cpu_mem_usage=True, quantization_config=quantization_config)
return (model, tokenizer)
# File: optimum-quanto-main/bench/generation/setup/hqq.py
import torch
from hqq.core.quantize import BaseQuantizeConfig
from hqq.engine.hf import HQQModelForCausalLM
from transformers import AutoTokenizer
def setup(model_id: str, weights: str, activations: str, device: torch.device, group_size: int=64):
if activations != 'none':
raise ValueError('Activation quantization is not supported by HQQ')
if weights == 'int4':
quant_config = BaseQuantizeConfig(nbits=4, group_size=group_size)
elif weights == 'int8':
quant_config = BaseQuantizeConfig(nbits=8, group_size=group_size)
else:
raise ValueError('HQQ only supports int4 and int8 weights.')
model = HQQModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16)
model.quantize_model(quant_config=quant_config, compute_dtype=torch.float16, device=device)
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
tokenizer.pad_token_id = tokenizer.eos_token_id
tokenizer.padding_side = 'left'
return (model, tokenizer)
# File: optimum-quanto-main/bench/generation/setup/quanto.py
import time
import torch
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from optimum.quanto import Calibration, freeze, qfloat8, qint4, qint8, quantize
@torch.no_grad()
def calibrate(model, tokenizer, batch_size, batches):
samples = batch_size * batches
cal_dataset = load_dataset('lambada', split=['validation'])[0]
model.eval()
total = 0
for batch in cal_dataset.iter(batch_size=batch_size):
inputs = tokenizer(batch['text'], return_tensors='pt', padding=True)
input_ids = inputs.input_ids.to(model.device)
attention_mask = inputs.attention_mask.to(model.device)
model(input_ids, attention_mask=attention_mask)
total += input_ids.size(0)
if total >= samples:
break
def setup(model_id: str, weights: str, activations: str, batch_size: int, device: torch.device, dtype: torch.dtype):
weights = keyword_to_qtype(weights)
activations = keyword_to_qtype(activations)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token_id = tokenizer.eos_token_id
tokenizer.padding_side = 'left'
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=dtype, low_cpu_mem_usage=True).to(device)
if weights is not None or activations is not None:
print('Quantizing')
start = time.time()
quantization_root = model
if hasattr(model, 'model'):
quantization_root = model.model
quantize(quantization_root, weights=weights, activations=activations)
if activations is not None:
print('Calibrating')
with Calibration():
calibrate(model, tokenizer, batch_size, batches=4)
print('Freezing')
freeze(model)
print(f'Finished: {time.time() - start:.2f}')
return (model, tokenizer)
def keyword_to_qtype(k):
return {'none': None, 'int4': qint4, 'int8': qint8, 'float8': qfloat8}[k]
# File: optimum-quanto-main/bench/kernels/benchmark.py
import argparse
import time
from contextlib import nullcontext
import numpy as np
import torch
from tqdm.auto import tqdm
from optimum.quanto.library import disable_extensions
def get_unpack_bench(bits, device):
qmax = 2 ** bits
a = torch.randint(0, qmax, [10240, 10240], dtype=torch.uint8).to(device)
def bench_fn():
return torch.ops.quanto.unpack(a, bits)
return bench_fn
def timing(get_bench_func, device, iterations=10):
def synchronize(device):
if device.type == 'cuda':
torch.cuda.synchronize()
elif device.type == 'mps':
torch.mps.synchronize()
else:
torch.cpu.synchronize()
def timing_event(device):
if device.type == 'cuda':
return torch.cuda.Event(enable_timing=True)
elif device.type == 'mps':
return torch.mps.Event(enable_timing=True)
class CPUEvent:
def __init__(self):
self.time = None
def record(self):
self.time = time.time()
def elapsed_time(self, other):
assert self.time is not None
assert other.time is not None
return (other.time - self.time) * 1000
return CPUEvent()
synchronize(device)
bench_func = get_bench_func(device)
bench_func()
latencies = np.empty((iterations, 2))
for i in tqdm(range(iterations)):
for (j, context) in enumerate([disable_extensions(), nullcontext()]):
start_event = timing_event(device)
end_event = timing_event(device)
synchronize(device)
start_event.record()
with context:
bench_func()
end_event.record()
synchronize(device)
latencies[i, j] = start_event.elapsed_time(end_event)
return (np.mean(latencies[:, 0]), np.mean(latencies[:, 1]))
GET_BENCH_FUNCTIONS = {'unpack_2bit': lambda device: get_unpack_bench(2, device), 'unpack_4bit': lambda device: get_unpack_bench(4, device)}
def main():
parser = argparse.ArgumentParser(description='Kernel benchmark')
parser.add_argument('--kernel', type=str, default=None, help='The kernel to benchmark. None to test all of them')
parser.add_argument('--device', type=str, default=None, help='The device to use for benchmark.')
parser.add_argument('--it', type=int, default=10, help='The number of benchmark iterations')
args = parser.parse_args()
if args.device is None:
if torch.cuda.is_available():
device = torch.device('cuda')
elif torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
else:
device = torch.device(args.device)
all_kernels = GET_BENCH_FUNCTIONS.keys()
kernels = all_kernels if args.kernel is None else [args.kernel]
for kernel in kernels:
get_bench_fn = GET_BENCH_FUNCTIONS[kernel]
(python_ms, ext_ms) = timing(get_bench_fn, device, iterations=args.it)
ratio = python_ms / ext_ms
print(f'\n{kernel}[{device.type}]: python = {python_ms:.3f} ms, ext = {ext_ms:.3f} ms, ratio = {ratio:.1f}x')
if __name__ == '__main__':
main()
# File: optimum-quanto-main/bench/kernels/benchmark_marlin_fp8.py
import argparse
from typing import Optional
import numpy as np
import torch
from optimum.quanto.tensor.weights.marlin.packed import pack_fp8_as_int32
M_SHAPES = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192]
N_SHAPES = [4096]
K_SHAPES = [4096]
def run_benchmark(m: Optional[int], n: Optional[int], k: Optional[int], n_runs: int, n_warmup: int, dtype: torch.dtype=torch.float16):
print(f'\n----------- m={m}, n={n}, k={k}')
n_tokens = m
in_features = k
out_features = n
assert m is not None
device = torch.device('cuda')
inputs = torch.rand(n_tokens, in_features, dtype=dtype, device=device)
other_shape = (in_features, out_features)
other_data = torch.rand(other_shape, dtype=dtype, device=device).to(torch.float8_e4m3fn)
other_data_int32 = pack_fp8_as_int32(other_data)
perm = torch.empty(0, dtype=torch.int, device=device)
other_data_repack = torch.ops.quanto.gptq_marlin_repack(b_q_weight=other_data_int32, perm=perm, size_k=in_features, size_n=out_features, num_bits=8)
other_scale = torch.rand(1, dtype=dtype, device=device)
other_scale = other_scale.repeat(1, out_features)
workspace = torch.zeros(out_features // 64 * 16, dtype=torch.int, device=device)
latencies_marlin_fp8 = []
latencies_torch = []
with torch.no_grad():
for i in range(n_runs):
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
torch.cuda.synchronize(device)
start_event.record()
_ = torch.ops.quanto.fp8_marlin_gemm(a=inputs, b_q_weight=other_data_repack, b_scales=other_scale, workspace=workspace, num_bits=8, size_m=n_tokens, size_n=out_features, size_k=in_features)
end_event.record()
torch.cuda.synchronize(device)
latency_ms = start_event.elapsed_time(end_event)
if i >= n_warmup:
latencies_marlin_fp8.append(latency_ms)
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
torch.cuda.synchronize(device)
start_event.record()
other = other_data.to(dtype) * other_scale
_ = torch.matmul(inputs, other)
end_event.record()
torch.cuda.synchronize(device)
latency_ms = start_event.elapsed_time(end_event)
if i >= n_warmup:
latencies_torch.append(latency_ms)
mean_latency_torch = np.mean(latencies_torch)
mean_latency_marlin_fp8 = np.mean(latencies_marlin_fp8)
print('mean_latency_torch:', mean_latency_torch)
print('mean_latency_marlin_fp8:', mean_latency_marlin_fp8)
return (mean_latency_torch, mean_latency_marlin_fp8)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Marlin FP8 kernel benchmark')
parser.add_argument('--nruns', type=int, default=20, help='The number of benchmark iterations')
parser.add_argument('--nwarmup', type=int, default=2, help='The number of warmup iterations (deducted from nruns)')
parser.add_argument('--m', type=int, help='m dimension of A=m*k', default=None)
parser.add_argument('--n', type=int, help='n dimension of B=k*n (out_features)', default=None)
parser.add_argument('--k', type=int, help='k dimension of A=m*k and B=k*n (in_features), hidden_size', default=None)
args = parser.parse_args()
if args.m is not None:
def shape_generator():
yield (args.m, args.n, args.k)
else:
def shape_generator():
for m in M_SHAPES:
for n in N_SHAPES:
for k in K_SHAPES:
yield (m, n, k)
result = 'm,n_out,k_in,torch_latency_ms,marlin_fp8_latency_ms\n'
for (m, n, k) in shape_generator():
(mean_latency_torch, mean_latency_marlin_fp8) = run_benchmark(m, n, k, args.nruns, args.nwarmup)
result += ','.join([str(m), str(n), str(k), f'{mean_latency_torch:.4f}', f'{mean_latency_marlin_fp8:.4f}']) + '\n'
print('\nResults:')
print(result)
# File: optimum-quanto-main/external/awq/pack_intweight.py
import torch
def pack_intweight(unpacked_qweight, interleave, kstride):
N = unpacked_qweight.shape[0]
K = unpacked_qweight.shape[1]
Packed_Kernel = unpacked_qweight.cpu().numpy().reshape(N, K // 32, 32)
Packed_Kernel = Packed_Kernel.reshape(N, K // 32, 4, 4, 2).transpose(0, 1, 3, 2, 4)
Packed_Kernel = Packed_Kernel.reshape(N, K // 32, 32)
Packed_Kernel = Packed_Kernel.reshape(N, K // 32, 4, 8)
Packed_Kernel = Packed_Kernel.reshape(N, K // 32, 4, 4, 2).transpose(0, 1, 2, 4, 3)
Packed_Kernel = Packed_Kernel.reshape(N, K)
Packed_Kernel = Packed_Kernel.reshape(N // interleave, interleave, K // kstride, kstride)
Packed_Kernel = Packed_Kernel.transpose(0, 2, 1, 3)
Packed_Kernel = Packed_Kernel.reshape(N // interleave, K // kstride, kstride, interleave)
Packed_Kernel = Packed_Kernel[..., 0] | Packed_Kernel[..., 1] << 4 | Packed_Kernel[..., 2] << 8 | Packed_Kernel[..., 3] << 12
Packed_Kernel = Packed_Kernel.reshape(N // interleave, K)
qweight = torch.tensor(Packed_Kernel.astype('int16')).to(unpacked_qweight.device).contiguous()
return qweight
# File: optimum-quanto-main/external/awq/packing_utils.py
import torch
AWQ_ORDER = [0, 2, 4, 6, 1, 3, 5, 7]
AWQ_REVERSE_ORDER = [0, 4, 1, 5, 2, 6, 3, 7]
def pack_awq(intweight: torch.Tensor, reorder=False):
bits = 4
pack_num = 32 // bits
qweight = torch.zeros(intweight.shape[0], intweight.shape[1] // pack_num, dtype=torch.int32, device=intweight.device)
for col in range(intweight.shape[1] // pack_num):
if reorder:
order_map = [0, 2, 4, 6, 1, 3, 5, 7]
else:
order_map = [0, 1, 2, 3, 4, 5, 6, 7]
for i in range(pack_num):
qweight_col = intweight[:, col * pack_num + order_map[i]]
qweight[:, col] |= qweight_col << i * bits
return qweight
def unpack_awq(qweight: torch.Tensor, bits: int):
shifts = torch.arange(0, 32, bits, device=qweight.device)
iweights = torch.bitwise_right_shift(qweight[:, :, None], shifts[None, None, :]).to(torch.int8)
iweights = iweights.view(iweights.shape[0], -1)
return iweights
def reverse_awq_order(iweights: torch.Tensor, bits: int):
reverse_order_tensor = torch.arange(iweights.shape[-1], dtype=torch.int32, device=iweights.device)
reverse_order_tensor = reverse_order_tensor.view(-1, 32 // bits)
reverse_order_tensor = reverse_order_tensor[:, AWQ_REVERSE_ORDER]
reverse_order_tensor = reverse_order_tensor.view(-1)
iweights = iweights[:, reverse_order_tensor]
return iweights
def pack_exllama(iweights: torch.Tensor, izeros: torch.Tensor, bits: int):
shifts = torch.arange(0, 32, bits, device=iweights.device)
iweights = iweights.view(iweights.shape[0] // (32 // bits), 32 // bits, -1)
qweight = torch.bitwise_left_shift(iweights, shifts[None, :, None]).sum(dim=1).to(torch.int32)
izeros = izeros.view(-1, izeros.shape[1] // (32 // bits), 32 // bits)
qzeros = torch.bitwise_left_shift(izeros, shifts[None, None, :]).sum(dim=-1).to(torch.int32)
return (qweight, qzeros)
def unpack_reorder_pack(qweight, qzeros, bits):
(iweight, izeros) = unpack_awq(qweight, qzeros, bits)
(iweight, izeros) = reverse_awq_order(iweight, izeros, bits)
iweight = torch.bitwise_and(iweight, 2 ** bits - 1)
izeros = torch.bitwise_and(izeros, 2 ** bits - 1)
izeros = izeros - 1
(qweight, qzeros) = pack_exllama(iweight, izeros, bits)
return (qweight, qzeros)
def dequantize_gemm(qweight, qzeros, scales, bits, group_size):
(iweight, izeros) = unpack_awq(qweight, qzeros, bits)
(iweight, izeros) = reverse_awq_order(iweight, izeros, bits)
iweight = torch.bitwise_and(iweight, 2 ** bits - 1)
izeros = torch.bitwise_and(izeros, 2 ** bits - 1)
scales = scales.repeat_interleave(group_size, dim=0)
izeros = izeros.repeat_interleave(group_size, dim=0)
iweight = (iweight - izeros) * scales
return iweight
# File: optimum-quanto-main/external/smoothquant/smoothquant.py
import argparse
import functools
import os
import torch
import torch.nn as nn
from datasets import load_dataset
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.models.bloom.modeling_bloom import BloomBlock
from transformers.models.opt.modeling_opt import OPTDecoderLayer
from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaRMSNorm
from transformers.models.mistral.modeling_mistral import MistralDecoderLayer, MistralRMSNorm
def get_act_scales(model, tokenizer, dataset, num_samples=512, seq_len=512):
model.eval()
device = next(model.parameters()).device
act_scales = {}
def stat_tensor(name, tensor):
hidden_dim = tensor.shape[-1]
tensor = tensor.view(-1, hidden_dim).abs().detach()
comming_max = torch.max(tensor, dim=0)[0].float().cpu()
if name in act_scales:
act_scales[name] = torch.max(act_scales[name], comming_max)
else:
act_scales[name] = comming_max
def stat_input_hook(m, x, y, name):
if isinstance(x, tuple):
x = x[0]
stat_tensor(name, x)
hooks = []
for (name, m) in model.named_modules():
if isinstance(m, nn.Linear):
hooks.append(m.register_forward_hook(functools.partial(stat_input_hook, name=name)))
for i in tqdm(range(num_samples)):
input_ids = tokenizer(dataset[i]['text'], return_tensors='pt', max_length=seq_len, truncation=True).input_ids.to(device)
model(input_ids)
for h in hooks:
h.remove()
return act_scales
@torch.no_grad()
def smooth_ln_fcs(ln, fcs, act_scales, alpha=0.5):
if not isinstance(fcs, list):
fcs = [fcs]
assert isinstance(ln, (nn.LayerNorm, LlamaRMSNorm, MistralRMSNorm))
for fc in fcs:
assert isinstance(fc, nn.Linear)
assert ln.weight.numel() == fc.in_features == act_scales.numel()
(device, dtype) = (fcs[0].weight.device, fcs[0].weight.dtype)
act_scales = act_scales.to(device=device, dtype=dtype)
weight_scales = torch.cat([fc.weight.abs().max(dim=0, keepdim=True)[0] for fc in fcs], dim=0)
weight_scales = weight_scales.max(dim=0)[0].clamp(min=1e-05)
scales = (act_scales.pow(alpha) / weight_scales.pow(1 - alpha)).clamp(min=1e-05).to(device).to(dtype)
ln.weight.div_(scales)
if getattr(ln, 'bias', None) is not None:
ln.bias.div_(scales)
for fc in fcs:
fc.weight.mul_(scales.view(1, -1))
@torch.no_grad()
def smooth_lm(model, scales, alpha=0.5):
for (name, module) in model.named_modules():
if isinstance(module, OPTDecoderLayer):
attn_ln = module.self_attn_layer_norm
qkv = [module.self_attn.q_proj, module.self_attn.k_proj, module.self_attn.v_proj]
qkv_input_scales = scales[name + '.self_attn.q_proj']
smooth_ln_fcs(attn_ln, qkv, qkv_input_scales, alpha)
ffn_ln = module.final_layer_norm
fc1 = module.fc1
fc1_input_scales = scales[name + '.fc1']
smooth_ln_fcs(ffn_ln, fc1, fc1_input_scales, alpha)
elif isinstance(module, BloomBlock):
attn_ln = module.input_layernorm
qkv = module.self_attention.query_key_value
qkv_input_scales = scales[name + '.self_attention.query_key_value']
smooth_ln_fcs(attn_ln, qkv, qkv_input_scales, alpha)
ffn_ln = module.post_attention_layernorm
fc1 = module.mlp.dense_h_to_4h
fc1_input_scales = scales[name + '.mlp.dense_h_to_4h']
smooth_ln_fcs(ffn_ln, fc1, fc1_input_scales, alpha)
elif isinstance(module, (LlamaDecoderLayer, MistralDecoderLayer)):
attn_ln = module.input_layernorm
qkv = [module.self_attn.q_proj, module.self_attn.k_proj, module.self_attn.v_proj]
qkv_input_scales = scales[name + '.self_attn.q_proj']
smooth_ln_fcs(attn_ln, qkv, qkv_input_scales, alpha)
ffn_ln = module.post_attention_layernorm
fc = [module.mlp.gate_proj, module.mlp.up_proj]
fc_input_scales = scales[name + '.mlp.gate_proj']
smooth_ln_fcs(ffn_ln, fc, fc_input_scales, alpha)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='facebook/opt-125m', help='model name')
parser.add_argument('--save-path', type=str, default=None, help='smoothed model model save path')
parser.add_argument('--num-samples', type=int, default=512)
parser.add_argument('--seq-len', type=int, default=512)
parser.add_argument('--device', type=str, default=None, help='The device to use for generation.')
args = parser.parse_args()
if args.device is None:
if torch.cuda.is_available():
device = torch.device('cuda')
elif torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
else:
device = torch.device(args.device)
dataset = load_dataset('lambada', split=f'validation[:{args.num_samples}]').shuffle()
tokenizer = AutoTokenizer.from_pretrained(args.model, model_max_length=args.seq_len)
model = AutoModelForCausalLM.from_pretrained(args.model, torch_dtype='auto').to(device)
act_scales = get_act_scales(model, tokenizer, dataset, args.num_samples, args.seq_len)
smooth_lm(model, act_scales, 0.5)
save_path = args.save_path
if save_path is None:
save_path = os.path.join('smoothed_models', args.model)
model.save_pretrained(save_path)
tokenizer.save_pretrained(save_path)
if __name__ == '__main__':
main()
# File: optimum-quanto-main/optimum/quanto/calibrate.py
from typing import Optional
import torch
from torch.nn.modules.module import register_module_forward_hook, register_module_forward_pre_hook
from torch.overrides import TorchFunctionMode
from .nn import QModuleMixin
from .tensor import ActivationQBytesTensor, QTensor, axis_to_dim, dtype_info, qint8, qtype
__all__ = ['Calibration', 'absmax_scale']
def _updated_scale(scale, new_scale, momentum):
if torch.all(scale == 1):
return new_scale
return momentum * scale + new_scale * (1.0 - momentum)
def absmax_scale(base: torch.Tensor, qtype: qtype=qint8, axis: Optional[int]=None) -> torch.Tensor:
base = torch.abs(base)
if axis is None:
qranges = torch.max(base)
else:
dim = axis_to_dim(base, axis)
qranges = torch.amax(base, dim=dim, keepdim=True)
info = dtype_info(qtype.dtype)
return qranges / info.max
class Calibration(TorchFunctionMode):
def __init__(self, *args, momentum: float=0.9, streamline=True, debug=False, **kwargs):
super().__init__(*args, **kwargs)
self.momentum = momentum
self.streamline = streamline
if streamline:
self.modules_qactivations = {}
self.streamline_hooks = {}
self.debug = debug
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs is not None else {}
qinput = QTensor in types
output = func(*args, **kwargs)
if self.streamline and qinput:
for (i, arg) in enumerate(args):
module = getattr(arg, 'src_module', None)
if module is not None:
if isinstance(output, ActivationQBytesTensor):
self.modules_qactivations[module] = True
elif isinstance(output, torch.Tensor):
qactivations_required = self.modules_qactivations.get(module, False)
self.modules_qactivations[module] = qactivations_required
return output
def __enter__(self):
super().__enter__()
self.pre_handle = register_module_forward_pre_hook(self.calibrate_input)
self.post_handle = register_module_forward_hook(self.calibrate_output)
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
self.pre_handle.remove()
self.post_handle.remove()
if self.streamline:
for handle in self.streamline_hooks.values():
handle.remove()
def calibrate_input(self, module: torch.nn.Module, input, momentum: float=0.9):
if isinstance(module, QModuleMixin) and module.activation_qtype is not None:
input = input[0]
if isinstance(input, ActivationQBytesTensor):
module.input_scale = torch.max(input._scale)
else:
input_scale = absmax_scale(input, module.activation_qtype)
module.input_scale = _updated_scale(module.input_scale, input_scale, momentum)
if self.streamline and module not in self.streamline_hooks:
self.streamline_hooks[module] = module.register_forward_hook(self.tag_outputs)
return input
def calibrate_output(self, module: torch.nn.Module, input: torch.Tensor, output: torch.Tensor):
if isinstance(module, QModuleMixin) and module.activation_qtype is not None:
output_scale = absmax_scale(output, module.activation_qtype, axis=None)
module.output_scale = _updated_scale(module.output_scale, output_scale, self.momentum)
return output
else:
if self.streamline:
for (name, child) in module.named_children():
if isinstance(child, QModuleMixin) and child.activation_qtype is not None:
qactivations_required = self.modules_qactivations.get(child, False)
if not qactivations_required:
child.disable_output_quantization()
if self.debug:
for (name, child) in module.named_children():
if isinstance(child, QModuleMixin):
classname = child.__class__.__name__
trace = f'{name}({classname}) activations are'
if child.activation_qtype is None:
trace += ' not quantized.'
else:
trace += f' quantized to {child.activation_qtype} with scale {child.output_scale}.'
print(trace)
def tag_outputs(self, module: torch.nn.Module, input: torch.Tensor, output: torch.Tensor):
output.src_module = module
# File: optimum-quanto-main/optimum/quanto/library/extensions/cpp/__init__.py
import os
import torch
from ..extension import Extension
__all__ = []
ext = Extension('quanto_cpp', root_dir=os.path.dirname(__file__), sources=['unpack.cpp', 'pybind_module.cpp'], extra_cflags=['-O3'])
@torch.library.impl('quanto_ext::unpack', ['CPU'])
def unpack_cpp(t: torch.Tensor, bits: int):
return ext.lib.unpack(t, bits)
# File: optimum-quanto-main/optimum/quanto/library/extensions/cuda/__init__.py
import os
import torch
from ..extension import Extension
__all__ = []
def get_max_cuda_arch():
capability_list = []
supported_sm = [int(arch.split('_')[1]) for arch in torch.cuda.get_arch_list() if 'sm_' in arch]
if supported_sm:
max_supported_sm = max(((sm // 10, sm % 10) for sm in supported_sm))
for i in range(torch.cuda.device_count()):
capability = torch.cuda.get_device_capability(i)
capability = min(max_supported_sm, capability)
if capability not in capability_list:
capability_list.append(capability)
max_capability = max(sorted(capability_list)) if len(capability_list) > 0 else (0, 0)
return f'{max_capability[0]}{max_capability[1]}0'
extra_cflags = ['-g', '-O3', '-fopenmp', '-lgomp', '-std=c++17', '-DENABLE_BF16']
extra_cuda_cflags = ['-O3', '-std=c++17', '-DENABLE_BF16', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_BFLOAT16_OPERATORS__', '-U__CUDA_NO_BFLOAT16_CONVERSIONS__', '-U__CUDA_NO_BFLOAT162_OPERATORS__', '-U__CUDA_NO_BFLOAT162_CONVERSIONS__', '--expt-relaxed-constexpr', '--expt-extended-lambda', '--use_fast_math', '--threads=8']
quanto_cuda_arch = get_max_cuda_arch()
extra_cuda_cflags += [f'-DQUANTO_CUDA_ARCH={quanto_cuda_arch}']
module_path = os.path.dirname(__file__)
sources = ['unpack.cu', 'awq/v2/gemm_cuda.cu', 'awq/v2/gemv_cuda.cu', 'marlin/fp8_marlin.cu', 'marlin/gptq_marlin_repack.cu', 'pybind_module.cpp']
ext = Extension('quanto_cuda', root_dir=os.path.dirname(__file__), sources=sources, extra_cflags=extra_cflags, extra_cuda_cflags=extra_cuda_cflags)
@torch.library.impl('quanto_ext::unpack', ['CUDA'])
def unpack_cuda(t: torch.Tensor, bits: int):
return ext.lib.unpack(t, bits)
torch.library.define('quanto::gemm', '(Tensor input, Tensor other, Tensor other_scale, Tensor other_shift, int rows, int out_cols, int in_cols, int bits, int group_size) -> Tensor')
@torch.library.impl('quanto::gemm', ['CUDA'])
def gemm_cuda(input: torch.Tensor, other: torch.Tensor, scales: torch.Tensor, shift: torch.Tensor, rows: int, out_cols: int, in_cols: int, bits: int, group_size: int):
assert out_cols >= 128
assert input.dtype == torch.float16
assert input.numel() == rows * in_cols
assert other.dtype == torch.int16
assert scales.dtype == torch.float16
assert scales.shape[-1] == out_cols
assert shift.dtype == torch.float16
assert shift.shape[-1] == out_cols
assert bits == 4
assert group_size == 128
if rows < 8:
return ext.lib.awq_v2_gemv_f16i4(input, other, scales, shift, rows, out_cols, in_cols, group_size)
return ext.lib.awq_v2_gemm_f16i4(input, other, scales, shift)
@torch.library.custom_op('quanto::fp8_marlin_gemm', mutates_args=(), device_types=['cuda'])
def fp8_marlin_gemm(a: torch.Tensor, b_q_weight: torch.Tensor, b_scales: torch.Tensor, workspace: torch.Tensor, num_bits: int, size_m: int, size_n: int, size_k: int) -> torch.Tensor:
assert b_scales.dtype == torch.float16 or b_scales.dtype == torch.bfloat16
assert b_q_weight.dim() == 2
assert b_q_weight.dtype == torch.int32
return ext.lib.fp8_marlin_gemm(a, b_q_weight, b_scales, workspace, num_bits, size_m, size_n, size_k)
@torch.library.custom_op('quanto::gptq_marlin_repack', mutates_args=(), device_types=['cuda'])
def gptq_marlin_repack(b_q_weight: torch.Tensor, perm: torch.Tensor, size_k: int, size_n: int, num_bits: int) -> torch.Tensor:
assert b_q_weight.dim() == 2
assert b_q_weight.dtype == torch.int32
return ext.lib.gptq_marlin_repack(b_q_weight, perm, size_k, size_n, num_bits)
# File: optimum-quanto-main/optimum/quanto/library/extensions/extension.py
import os
import shutil
import warnings
from typing import List
import torch
from torch.utils.cpp_extension import load
class Extension(object):
def __init__(self, name: str, root_dir: str, sources: List[str], extra_cflags: List[str]=None, extra_cuda_cflags: List[str]=None):
self.name = name
self.sources = [f'{root_dir}/{source}' for source in sources]
self.extra_cflags = extra_cflags
self.extra_cuda_cflags = extra_cuda_cflags
self.build_directory = os.path.join(root_dir, 'build')
self._lib = None
@property
def lib(self):
if self._lib is None:
version_file = os.path.join(self.build_directory, 'pytorch_version.txt')
if os.path.exists(version_file):
with open(version_file, 'r') as f:
pytorch_build_version = f.read().rstrip()
if pytorch_build_version != torch.__version__:
shutil.rmtree(self.build_directory)
warnings.warn(f'{self.name} was compiled with pytorch {pytorch_build_version}, but {torch.__version__} is installed: it will be recompiled.')
os.makedirs(self.build_directory, exist_ok=True)
self._lib = load(name=self.name, sources=self.sources, extra_cflags=self.extra_cflags, extra_cuda_cflags=self.extra_cuda_cflags, build_directory=self.build_directory)
if not os.path.exists(version_file):
with open(version_file, 'w') as f:
f.write(torch.__version__)
return self._lib
# File: optimum-quanto-main/optimum/quanto/library/extensions/mps/__init__.py
import os
import torch
from ..extension import Extension
__all__ = []
ext = Extension('quanto_mps', root_dir=os.path.dirname(__file__), sources=['unpack.mm', 'pybind_module.cpp'], extra_cflags=['-std=c++17'])
@torch.library.impl('quanto_ext::unpack', 'MPS')
def unpack_mps(t: torch.Tensor, bits: int):
return ext.lib.unpack(t, bits)
# File: optimum-quanto-main/optimum/quanto/library/ops.py
import warnings
from contextlib import contextmanager
import torch
_ext_enabled = True
@contextmanager
def disable_extensions():
try:
global _ext_enabled
_ext_enabled = False
yield
finally:
_ext_enabled = True
def define(name, schema):
for libname in ['quanto', 'quanto_py', 'quanto_ext']:
torch.library.define(f'{libname}::{name}', schema)
@torch.library.impl(f'quanto::{name}', 'default')
def impl(*args, **kwargs):
if _ext_enabled:
try:
return getattr(torch.ops.quanto_ext, name)(*args, **kwargs)
except Exception as e:
if isinstance(e, NotImplementedError):
message = f'No optimized kernel found for quanto::{name}.'
else:
message = f'An exception was raised while calling the optimized kernel for quanto::{name}: {e}'
warnings.warn(message + ' Falling back to default implementation.')
return getattr(torch.ops.quanto_py, name)(*args, **kwargs)
define('unpack', '(Tensor self, int bits) -> Tensor')
# File: optimum-quanto-main/optimum/quanto/library/python/unpack.py
import torch
@torch.library.impl('quanto_py::unpack', 'default')
def unpack(packed: torch.Tensor, bits: int) -> torch.Tensor:
unpacked = []
values_per_item = 8 // bits
def rshift(t: torch.Tensor, bits: int):
if t.device.type == 'mps':
return t // 2 ** bits
return t >> bits
for i in range(values_per_item):
mask = 2 ** (bits * (i + 1)) - 1
unpacked.append(rshift(packed & mask, bits * i))
return torch.cat(unpacked).to(torch.uint8)
# File: optimum-quanto-main/optimum/quanto/library/qbytes_mm.py
import torch
from packaging import version
__all__ = []
torch.library.define('quanto::qbytes_mm', '(Tensor A, Tensor B, Tensor scales) -> Tensor')
def qbytes_mm(activations: torch.Tensor, weights: torch.Tensor, output_scales: torch.Tensor) -> torch.Tensor:
activations = activations.to(output_scales.dtype)
if weights.dtype.is_floating_point:
weights = weights.to(output_scales.dtype)
weights = output_scales * weights
return torch.matmul(activations, weights.t())
def qbytes_int_mm(activations: torch.Tensor, weights: torch.Tensor, output_scales: torch.Tensor) -> torch.Tensor:
in_features = activations.shape[-1]
out_features = weights.shape[0]
weights = weights.t()
if activations.ndim == 2:
out_data = torch._int_mm(activations, weights)
else:
output_shape = activations.shape[:-1] + (out_features,)
out_data = torch._int_mm(activations.view(-1, in_features), weights)
out_data = out_data.view(output_shape)
fp32_output = out_data.to(torch.float32) * output_scales.t()
return fp32_output.to(output_scales.dtype)
def qbytes_int8pack_mm(activations: torch.Tensor, weights: torch.Tensor, output_scales: torch.Tensor) -> torch.Tensor:
output_scales = output_scales.flatten()
if activations.ndim == 2:
return torch._weight_int8pack_mm(activations, weights, output_scales)
else:
in_features = activations.shape[-1]
out_features = weights.shape[0]
output_shape = activations.shape[:-1] + (out_features,)
out_data = torch._weight_int8pack_mm(activations.view(-1, in_features), weights, output_scales)
return out_data.view(output_shape)
@torch.library.impl('quanto::qbytes_mm', 'default')
def qbytes_mm_impl_default(activations: torch.Tensor, weights: torch.Tensor, output_scales: torch.Tensor) -> torch.Tensor:
return qbytes_mm(activations, weights, output_scales)
@torch.library.impl('quanto::qbytes_mm', 'CUDA')
def qbytes_mm_impl_cuda(activations: torch.Tensor, weights: torch.Tensor, output_scales: torch.Tensor) -> torch.Tensor:
assert activations.ndim in (2, 3)
in_features = activations.shape[-1]
tokens = activations.shape[0] if activations.ndim == 2 else activations.shape[0] * activations.shape[1]
out_features = weights.shape[0]
if activations.dtype == torch.int8 and weights.dtype == torch.int8 and (tokens > 16) and (tokens % 8 == 0) and (in_features % 8 == 0) and (out_features % 8 == 0):
return qbytes_int_mm(activations, weights, output_scales)
return qbytes_mm(activations, weights, output_scales)
@torch.library.impl('quanto::qbytes_mm', 'CPU')
def qbytes_mm_impl_cpu(activations: torch.Tensor, weights: torch.Tensor, output_scales: torch.Tensor) -> torch.Tensor:
if version.parse(torch.__version__).release > version.parse('2.4.0').release and activations.dtype == torch.int8 and (weights.dtype == torch.int8):
return qbytes_int_mm(activations, weights, output_scales)
in_features = activations.shape[-1]
if activations.dtype == torch.bfloat16 and weights.dtype == torch.int8 and (in_features % 4 == 0):
if type(activations) is not torch.Tensor:
activations = activations.dequantize()
return qbytes_int8pack_mm(activations, weights, output_scales)
return qbytes_mm(activations, weights, output_scales)
@torch.library.impl('quanto_py::qbytes_mm', 'MPS')
def qbytes_mm_impl_mps(activations: torch.Tensor, weights: torch.Tensor, output_scales: torch.Tensor) -> torch.Tensor:
in_features = activations.shape[-1]
out_features = weights.shape[0]
if version.parse(torch.__version__).release >= version.parse('2.4.0').release and activations.dtype == torch.bfloat16 and (weights.dtype == torch.int8) and (in_features % 32 == 0) and (out_features % 32 == 0):
if type(activations) is not torch.Tensor:
activations = activations.dequantize()
return qbytes_int8pack_mm(activations, weights, output_scales)
return qbytes_mm(activations, weights, output_scales)
# File: optimum-quanto-main/optimum/quanto/library/quantize.py
from typing import Union
import torch
from ..tensor import dtype_info, group
@torch.library.custom_op('quanto::quantize_symmetric', mutates_args=())
def quantize_symmetric(base: torch.Tensor, dtype: torch.dtype, axis: Union[int, None], scale: torch.Tensor) -> torch.Tensor:
if axis is None:
if scale.ndim > 0:
raise ValueError('Scale must be a scalar when quantizing per-tensor')
else:
if base.ndim == 1:
raise ValueError('1D Tensors cannot be quantized per-axis')
if axis == base.ndim - 1:
axis = -1
if axis not in (0, -1):
raise ValueError('Quantization is only supported along the first or last axis.')
if base.shape[axis] == 1:
raise ValueError(f'Cannot quantize Tensor of shape {base.shape} along axis {axis} of size 1')
if torch.squeeze(scale).ndim > 1:
raise ValueError('Quantizing along multiple axis is not supported')
if scale.ndim != base.ndim:
raise ValueError('When quantizing per-axis, the scale must be broadcastable to the base (Tip: try to add missing dims of length zero).')
data = base / scale
if not dtype.is_floating_point:
data = torch.round(data)
info = dtype_info(dtype)
return torch.clamp(data, min=info.min, max=info.max).to(dtype)
@torch.library.custom_op('quanto::quantize_affine', mutates_args=())
def quantize_affine(base: torch.Tensor, bits: int, axis: int, group_size: Union[int, None], scale: torch.Tensor, shift: torch.Tensor) -> torch.Tensor:
if axis not in (0, -1):
raise ValueError('QBitsTensor axis parameter must be 0 (first axis) or -1 (last axis)')
if group_size is not None:
base = group(base, axis=axis, group_size=group_size)
if shift.dtype.is_floating_point:
data = torch.round((base + shift) / scale)
else:
data = torch.round(base / scale) + shift
return torch.clamp(data, min=0, max=2 ** bits - 1).to(torch.uint8)
# File: optimum-quanto-main/optimum/quanto/models/__init__.py
import importlib
import os
from collections.abc import Mapping
from typing import Any, Dict, List, Optional, Union
def is_transformers_available() -> bool:
return importlib.util.find_spec('transformers') is not None
def is_diffusers_available() -> bool:
return importlib.util.find_spec('diffusers') is not None
if is_transformers_available():
from .transformers_models import *
if is_diffusers_available():
from .diffusers_models import *
# File: optimum-quanto-main/optimum/quanto/models/diffusers_models.py
import json
import os
from pathlib import Path
from typing import Any, List, Optional, Union
from huggingface_hub import ModelHubMixin, snapshot_download
from ..quantize import Optimizer, freeze, qtype, quantization_map, quantize, requantize
from . import is_diffusers_available
__all__ = ['QuantizedDiffusersModel', 'QuantizedPixArtTransformer2DModel']
if not is_diffusers_available():
raise ImportError(f'{__all__} require the diffusers library')
from diffusers import PixArtTransformer2DModel
from diffusers.models.model_loading_utils import load_state_dict
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import CONFIG_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFETENSORS_WEIGHTS_NAME, _get_checkpoint_shard_files, is_accelerate_available
from .shared_dict import ShardedStateDict
class QuantizedDiffusersModel(ModelHubMixin):
BASE_NAME = 'quanto'
base_class = None
def __init__(self, model: ModelMixin):
if not isinstance(model, ModelMixin) or len(quantization_map(model)) == 0:
raise ValueError('The source model must be a quantized diffusers model.')
self._wrapped = model
def __getattr__(self, name: str) -> Any:
try:
return super().__getattr__(name)
except AttributeError:
wrapped = self.__dict__['_wrapped']
return getattr(wrapped, name)
def forward(self, *args, **kwargs):
return self._wrapped.forward(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self._wrapped.forward(*args, **kwargs)
@staticmethod
def _qmap_name():
return f'{QuantizedDiffusersModel.BASE_NAME}_qmap.json'
@classmethod
def quantize(cls, model: ModelMixin, weights: Optional[Union[str, qtype]]=None, activations: Optional[Union[str, qtype]]=None, optimizer: Optional[Optimizer]=None, include: Optional[Union[str, List[str]]]=None, exclude: Optional[Union[str, List[str]]]=None):
if not isinstance(model, ModelMixin):
raise ValueError('The source model must be a diffusers model.')
quantize(model, weights=weights, activations=activations, optimizer=optimizer, include=include, exclude=exclude)
freeze(model)
return cls(model)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs):
if cls.base_class is None:
raise ValueError('The `base_class` attribute needs to be configured.')
if not is_accelerate_available():
raise ValueError('Reloading a quantized diffusers model requires the accelerate library.')
from accelerate import init_empty_weights
if os.path.isdir(pretrained_model_name_or_path):
working_dir = pretrained_model_name_or_path
else:
working_dir = snapshot_download(pretrained_model_name_or_path, **kwargs)
qmap_path = os.path.join(working_dir, cls._qmap_name())
if not os.path.exists(qmap_path):
raise ValueError(f'No quantization map found in {pretrained_model_name_or_path}: is this a quantized model ?')
model_config_path = os.path.join(working_dir, CONFIG_NAME)
if not os.path.exists(model_config_path):
raise ValueError(f'{CONFIG_NAME} not found in {pretrained_model_name_or_path}.')
with open(qmap_path, 'r', encoding='utf-8') as f:
qmap = json.load(f)
with open(model_config_path, 'r', encoding='utf-8') as f:
original_model_cls_name = json.load(f)['_class_name']
configured_cls_name = cls.base_class.__name__
if configured_cls_name != original_model_cls_name:
raise ValueError(f'Configured base class ({configured_cls_name}) differs from what was derived from the provided configuration ({original_model_cls_name}).')
config = cls.base_class.load_config(pretrained_model_name_or_path, **kwargs)
with init_empty_weights():
model = cls.base_class.from_config(config)
checkpoint_file = os.path.join(working_dir, SAFE_WEIGHTS_INDEX_NAME)
if os.path.exists(checkpoint_file):
(_, sharded_metadata) = _get_checkpoint_shard_files(working_dir, checkpoint_file)
state_dict = ShardedStateDict(working_dir, sharded_metadata['weight_map'])
else:
checkpoint_file = os.path.join(working_dir, SAFETENSORS_WEIGHTS_NAME)
if not os.path.exists(checkpoint_file):
raise ValueError(f'No safetensor weights found in {pretrained_model_name_or_path}.')
state_dict = load_state_dict(checkpoint_file)
requantize(model, state_dict=state_dict, quantization_map=qmap)
model.eval()
return cls(model)
def _save_pretrained(self, save_directory: Path) -> None:
self._wrapped.save_pretrained(save_directory)
qmap_name = os.path.join(save_directory, self._qmap_name())
qmap = quantization_map(self._wrapped)
with open(qmap_name, 'w', encoding='utf8') as f:
json.dump(qmap, f, indent=4)
class QuantizedPixArtTransformer2DModel(QuantizedDiffusersModel):
base_class = PixArtTransformer2DModel
# File: optimum-quanto-main/optimum/quanto/models/shared_dict.py
import os
from collections.abc import Mapping
from typing import Any, Dict
from safetensors import safe_open
class ShardedStateDict(Mapping):
def __init__(self, base_dir: str, tensor_index: Dict[str, str]):
self._base_dir = base_dir
self._index = tensor_index
self._handles = {}
def __iter__(self):
yield from self._index
def __len__(self):
return self._index.__len__()
def __getitem__(self, key: Any) -> Any:
filename = self._index.__getitem__(key)
if filename not in self._handles:
f = safe_open(os.path.join(self._base_dir, filename), framework='pytorch')
self._handles[filename] = f
f = self._handles[filename]
return f.get_tensor(key)
def __contains__(self, key: object) -> bool:
return self._index.__contains__(key)
def keys(self):
return self._index.keys()
# File: optimum-quanto-main/optimum/quanto/models/transformers_models.py
import json
import os
from pathlib import Path
from typing import Any, List, Optional, Union
from huggingface_hub import ModelHubMixin, snapshot_download
from ..nn import QModuleMixin
from ..quantize import Optimizer, freeze, qtype, quantization_map, quantize, requantize
from . import is_transformers_available
from .shared_dict import ShardedStateDict
__all__ = ['QuantizedTransformersModel', 'QuantizedModelForCausalLM']
if not is_transformers_available():
raise ImportError(f'{__all__} require the transformers library')
from transformers import AutoConfig, AutoModelForCausalLM, PreTrainedModel
from transformers.modeling_utils import get_checkpoint_shard_files, load_state_dict
from transformers.utils import SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, is_accelerate_available
class QuantizedTransformersModel(ModelHubMixin):
BASE_NAME = 'quanto'
auto_class = None
def __init__(self, model: PreTrainedModel):
if not isinstance(model, PreTrainedModel) or len(quantization_map(model)) == 0:
raise ValueError('The source model must be a quantized transformers model.')
self._wrapped = model
def __getattr__(self, name: str) -> Any:
try:
return super().__getattr__(name)
except AttributeError:
wrapped = self.__dict__['_wrapped']
return getattr(wrapped, name)
def forward(self, *args, **kwargs):
return self._wrapped.forward(*args, **kwargs)
@staticmethod
def _qmap_name():
return f'{QuantizedTransformersModel.BASE_NAME}_qmap.json'
@classmethod
def quantize(cls, model: PreTrainedModel, weights: Optional[Union[str, qtype]]=None, activations: Optional[Union[str, qtype]]=None, optimizer: Optional[Optimizer]=None, include: Optional[Union[str, List[str]]]=None, exclude: Optional[Union[str, List[str]]]=None):
if not isinstance(model, PreTrainedModel):
raise ValueError('The source model must be a transformers model.')
quantize(model, weights=weights, activations=activations, optimizer=optimizer, include=include, exclude=exclude)
freeze(model)
return cls(model)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs):
if cls.auto_class is None:
raise ValueError('Quantized models cannot be reloaded using {cls}: use a specialized quantized class such as QuantizedModelForCausalLM instead.')
if not is_accelerate_available():
raise ValueError('Reloading a quantized transformers model requires the accelerate library.')
from accelerate import init_empty_weights
if os.path.isdir(pretrained_model_name_or_path):
working_dir = pretrained_model_name_or_path
else:
working_dir = snapshot_download(pretrained_model_name_or_path, **kwargs)
qmap_path = os.path.join(working_dir, cls._qmap_name())
if not os.path.exists(qmap_path):
raise ValueError(f'No quantization map found in {pretrained_model_name_or_path}: is this a quantized model ?')
with open(qmap_path, 'r', encoding='utf-8') as f:
qmap = json.load(f)
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
with init_empty_weights():
model = cls.auto_class.from_config(config)
checkpoint_file = os.path.join(working_dir, SAFE_WEIGHTS_INDEX_NAME)
if os.path.exists(checkpoint_file):
(checkpoint_file, sharded_metadata) = get_checkpoint_shard_files(working_dir, checkpoint_file)
state_dict = ShardedStateDict(working_dir, sharded_metadata['weight_map'])
else:
checkpoint_file = os.path.join(working_dir, SAFE_WEIGHTS_NAME)
if not os.path.exists(checkpoint_file):
raise ValueError(f'No safetensor weights found in {pretrained_model_name_or_path}.')
state_dict = load_state_dict(checkpoint_file)
requantize(model, state_dict=state_dict, quantization_map=qmap)
if getattr(model.config, 'tie_word_embeddings', True):
model.tie_weights()
model.eval()
return cls(model)
def _save_pretrained(self, save_directory: Path) -> None:
model = self._wrapped
if getattr(model.config, 'tie_word_embeddings', True):
if isinstance(model.get_input_embeddings(), QModuleMixin) or isinstance(model.get_output_embeddings(), QModuleMixin):
model.config.tie_word_embeddings = False
self._wrapped.save_pretrained(save_directory, safe_serialization=True)
qmap_name = os.path.join(save_directory, self._qmap_name())
qmap = quantization_map(self._wrapped)
with open(qmap_name, 'w', encoding='utf8') as f:
json.dump(qmap, f, indent=4)
class QuantizedModelForCausalLM(QuantizedTransformersModel):
auto_class = AutoModelForCausalLM
# File: optimum-quanto-main/optimum/quanto/nn/qconv2d.py
from typing import Optional
import torch
from ..tensor import Optimizer, qtype
from .qmodule import QModuleMixin, register_qmodule
__all__ = ['QConv2d']
@register_qmodule(torch.nn.Conv2d)
class QConv2d(QModuleMixin, torch.nn.Conv2d):
@classmethod
def qcreate(cls, module, weights: qtype, activations: Optional[qtype]=None, optimizer: Optional[Optimizer]=None, device: Optional[torch.device]=None):
return cls(in_channels=module.in_channels, out_channels=module.out_channels, kernel_size=module.kernel_size, stride=module.stride, padding=module.padding, dilation=module.dilation, groups=module.groups, bias=module.bias is not None, padding_mode=module.padding_mode, dtype=module.weight.dtype, device=device, weights=weights, activations=activations, optimizer=optimizer)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return self._conv_forward(input, self.qweight, self.bias)
# File: optimum-quanto-main/optimum/quanto/nn/qlayernorm.py
from typing import Optional
import torch
from ..tensor import Optimizer, qtype
from .qmodule import QModuleMixin, register_qmodule
__all__ = ['QLayerNorm']
@register_qmodule(torch.nn.LayerNorm)
class QLayerNorm(QModuleMixin, torch.nn.LayerNorm):
@classmethod
def qcreate(cls, module, weights: Optional[qtype]=None, activations: Optional[qtype]=None, optimizer: Optional[Optimizer]=None, device: Optional[torch.device]=None):
if activations is None:
return None
return cls(module.normalized_shape, module.eps, module.elementwise_affine, module.bias is not None, dtype=module.weight.dtype, device=device, weights=None, activations=activations, optimizer=None)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps)
# File: optimum-quanto-main/optimum/quanto/nn/qlinear.py
from typing import Optional
import torch
from ..tensor import Optimizer, qtype
from .qmodule import QModuleMixin, register_qmodule
__all__ = ['QLinear']
@register_qmodule(torch.nn.Linear)
class QLinear(QModuleMixin, torch.nn.Linear):
@classmethod
def qcreate(cls, module, weights: qtype, activations: Optional[qtype]=None, optimizer: Optional[Optimizer]=None, device: Optional[torch.device]=None):
return cls(module.in_features, module.out_features, module.bias is not None, dtype=module.weight.dtype, device=device, weights=weights, activations=activations, optimizer=optimizer, quantize_input=True)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.linear(input, self.qweight, bias=self.bias)
# File: optimum-quanto-main/optimum/quanto/nn/qmodule.py
from abc import ABC
from typing import Optional, Union
import torch
from ..tensor import ActivationQBytesTensor, Optimizer, QBitsTensor, QTensor, WeightQBytesTensor, qint2, qint4, qtype, qtypes, quantize_activation, quantize_weight
__all__ = ['QModuleMixin', 'register_qmodule', 'quantize_module']
_QMODULE_TABLE = {}
def register_qmodule(module_cls):
def wrapper(cls):
_QMODULE_TABLE[module_cls] = cls
return cls
return wrapper
def quantize_module(module, weights: Optional[Union[qtype, str]]=None, activations: Optional[Union[qtype, str]]=None, optimizer: Optional[Optimizer]=None):
for cls in _QMODULE_TABLE:
if isinstance(module, cls):
qcls = _QMODULE_TABLE[cls]
return qcls.from_module(module, weights=weights, activations=activations, optimizer=optimizer)
return None
class QModuleMixin(ABC):
def __init__(self, *args, weights: Optional[Union[qtype, str]]=None, activations: Optional[Union[qtype, str]]=None, optimizer: Optional[Optimizer]=None, quantize_input: Optional[bool]=False, device: Optional[torch.device]=None, **kwargs):
mro = self.__class__.__mro__
if torch.nn.Module not in mro:
raise TypeError('Quantized modules must inherit from a torch.nn.Module class')
if mro.index(__class__) > mro.index(torch.nn.Module):
raise TypeError('QModuleMixin must be placed before any torch.nn.Module class in quantized module inheritance.')
super().__init__(*args, device=device, **kwargs)
if weights is not None and (not isinstance(weights, qtype)):
weights = qtypes[weights]
if activations is not None and (not isinstance(activations, qtype)):
activations = qtypes[activations]
self.weight_qtype = weights
self.weight_group_size = None
if self.weight_qtype in (qint2, qint4):
out_features = self.weight.shape[0]
in_features = self.weight.numel() // out_features
group_size = 128
if in_features > group_size:
while in_features % group_size != 0 and group_size > 32:
group_size -= 32
if in_features % group_size == 0:
self.weight_group_size = group_size
self.activation_qtype = activations
self._quantize_hooks = {}
if activations is not None:
if quantize_input:
self._quantize_hooks['input'] = self.register_forward_pre_hook(self.quantize_input)
self._quantize_hooks['output'] = self.register_forward_hook(self.quantize_output)
self.optimizer = optimizer
self.register_buffer('input_scale', torch.ones((), dtype=self.weight.dtype, device=device))
self.register_buffer('output_scale', torch.ones((), dtype=self.weight.dtype, device=device))
def disable_output_quantization(self):
if 'output' in self._quantize_hooks:
self._quantize_hooks['output'].remove()
def _save_to_state_dict(self, destination, prefix, keep_vars):
if self.weight_qtype is None or not self.frozen:
destination[prefix + 'weight'] = self.weight if keep_vars else self.weight.detach()
else:
self.weight.save_to_state_dict(destination, prefix + 'weight.', keep_vars)
if self.bias is not None:
destination[prefix + 'bias'] = self.bias if keep_vars else self.bias.detach()
destination[prefix + 'input_scale'] = self.input_scale if keep_vars else self.input_scale.detach()
destination[prefix + 'output_scale'] = self.output_scale if keep_vars else self.output_scale.detach()
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
weight_name = prefix + 'weight'
if self.weight_qtype is not None and weight_name not in state_dict:
weight_prefix = weight_name + '.'
if self.weight_qtype.bits == 8:
deserialized_weight = WeightQBytesTensor.load_from_state_dict(state_dict, weight_prefix, qtype=self.weight_qtype, axis=0, size=self.weight.size(), stride=self.weight.stride(), activation_qtype=self.activation_qtype, missing_keys=missing_keys)
else:
deserialized_weight = QBitsTensor.load_from_state_dict(state_dict, weight_prefix, qtype=self.weight_qtype, axis=0, group_size=self.weight_group_size, size=self.weight.size(), stride=self.weight.stride(), missing_keys=missing_keys)
if deserialized_weight is not None:
deserialized_weight = deserialized_weight.optimize()
assign_to_params_buffers = local_metadata.get('assign_to_params_buffers', False)
if assign_to_params_buffers and deserialized_weight is not None:
self.weight = torch.nn.Parameter(deserialized_weight)
elif deserialized_weight is not None:
if type(self.weight.data) is not type(deserialized_weight):
self.weight = torch.nn.Parameter(deserialized_weight.to(self.weight.device))
else:
self.weight = torch.nn.Parameter(deserialized_weight.to(self.weight.device))
super()._load_from_state_dict(state_dict, prefix, local_metadata, False, missing_keys, unexpected_keys, error_msgs)
@classmethod
def from_module(cls, module: torch.nn.Module, weights: Optional[qtype]=None, activations: Optional[qtype]=None, optimizer: Optional[Optimizer]=None):
qmodule = cls.qcreate(module, weights, activations, optimizer, device='meta')
if qmodule is None:
return None
qmodule = qmodule.to_empty(device=module.weight.device)
qmodule.input_scale = torch.ones_like(qmodule.input_scale)
qmodule.output_scale = torch.ones_like(qmodule.output_scale)
with torch.no_grad():
qmodule.weight = module.weight
if module.bias is not None:
qmodule.bias = module.bias
return qmodule.to(module.weight.device)
@classmethod
def qcreate(cls, module: torch.nn.Module, weights: Optional[qtype], activations: Optional[qtype]=None, optimizer: Optional[Optimizer]=None, device: Optional[torch.device]=None):
raise NotImplementedError
@property
def qweight(self):
if self.weight_qtype is None:
return None
if isinstance(self.weight, QTensor):
return self.weight
return quantize_weight(self.weight, qtype=self.weight_qtype, axis=0, group_size=self.weight_group_size, optimizer=self.optimizer, activation_qtype=self.activation_qtype)
def qforward(self, input: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
def quantize_input(self, module: torch.nn.Module, input: torch.Tensor) -> torch.Tensor:
input = input[0]
if isinstance(input, ActivationQBytesTensor):
if input.qtype != self.activation_qtype:
raise ValueError(f'Models with heterogeneous quantized activations are not supported: expected {self.activation_qtype.name} input but got {input.qtype.name} instead.')
else:
input = quantize_activation(input, qtype=self.activation_qtype, scale=self.input_scale)
return input
def quantize_output(self, module: torch.nn.Module, input: torch.Tensor, output: torch.Tensor) -> torch.Tensor:
return quantize_activation(output, qtype=self.activation_qtype, scale=self.output_scale)
def freeze(self):
qweight = self.qweight
if qweight is not None:
self.weight = torch.nn.Parameter(qweight)
@property
def frozen(self):
return isinstance(self.weight, QTensor)
# File: optimum-quanto-main/optimum/quanto/quantize.py
from fnmatch import fnmatch
from typing import Any, Dict, List, Optional, Union
import torch
from .nn import QModuleMixin, quantize_module
from .tensor import Optimizer, qtype
__all__ = ['quantize', 'freeze', 'requantize', 'quantization_map']
def set_module_by_name(parent_module, name, child_module):
module_names = name.split('.')
if len(module_names) == 1:
setattr(parent_module, name, child_module)
else:
parent_module_name = name[:name.rindex('.')]
parent_module = parent_module.get_submodule(parent_module_name)
setattr(parent_module, module_names[-1], child_module)
def _quantize_submodule(model: torch.nn.Module, name: str, module: torch.nn.Module, weights: Optional[Union[str, qtype]]=None, activations: Optional[Union[str, qtype]]=None, optimizer: Optional[Optimizer]=None):
qmodule = quantize_module(module, weights=weights, activations=activations, optimizer=optimizer)
if qmodule is not None:
set_module_by_name(model, name, qmodule)
qmodule.name = name
for (name, param) in module.named_parameters():
setattr(module, name, None)
del param
def quantize(model: torch.nn.Module, weights: Optional[Union[str, qtype]]=None, activations: Optional[Union[str, qtype]]=None, optimizer: Optional[Optimizer]=None, include: Optional[Union[str, List[str]]]=None, exclude: Optional[Union[str, List[str]]]=None):
if include is not None:
include = [include] if isinstance(include, str) else include
if exclude is not None:
exclude = [exclude] if isinstance(exclude, str) else exclude
for (name, m) in model.named_modules():
if include is not None and (not any((fnmatch(name, pattern) for pattern in include))):
continue
if exclude is not None and any((fnmatch(name, pattern) for pattern in exclude)):
continue
_quantize_submodule(model, name, m, weights=weights, activations=activations, optimizer=optimizer)
def requantize(model: torch.nn.Module, state_dict: Dict[str, Any], quantization_map: Dict[str, Dict[str, str]], device: torch.device=None):
if device is None:
device = next(model.parameters()).device
if device.type == 'meta':
device = torch.device('cpu')
for (name, m) in model.named_modules():
qconfig = quantization_map.get(name, None)
if qconfig is not None:
weights = qconfig['weights']
if weights == 'none':
weights = None
activations = qconfig['activations']
if activations == 'none':
activations = None
_quantize_submodule(model, name, m, weights=weights, activations=activations)
for (name, m) in model.named_modules():
def move_tensor(t, device):
if t.device.type == 'meta':
return torch.empty_like(t, device=device)
return t.to(device)
for (name, param) in m.named_parameters(recurse=False):
setattr(m, name, torch.nn.Parameter(move_tensor(param, 'cpu')))
for (name, param) in m.named_buffers(recurse=False):
setattr(m, name, move_tensor(param, 'cpu'))
model.to(device)
model.load_state_dict(state_dict, strict=False)
def freeze(model):
for (name, m) in model.named_modules():
if isinstance(m, QModuleMixin):
m.freeze()
def quantization_map(model: torch.nn.Module) -> Dict[str, Dict[str, str]]:
config = {}
for (name, m) in model.named_modules():
if isinstance(m, QModuleMixin):
config[name] = {'weights': m.weight_qtype.name, 'activations': 'none' if m.activation_qtype is None else m.activation_qtype.name}
return config
# File: optimum-quanto-main/optimum/quanto/subpackage/commands/base.py
from optimum.commands import BaseOptimumCLICommand, CommandInfo
from optimum.commands.optimum_cli import optimum_cli_subcommand
from .quantize import QuantizeCommand
__all__ = ['QuantoCommand']
@optimum_cli_subcommand()
class QuantoCommand(BaseOptimumCLICommand):
COMMAND = CommandInfo(name='quanto', help='Hugging Face models quantization tools')
SUBCOMMANDS = (CommandInfo(name='quantize', help='Quantize Hugging Face models.', subcommand_class=QuantizeCommand),)
# File: optimum-quanto-main/optimum/quanto/subpackage/commands/quantize.py
""""""
from typing import TYPE_CHECKING
import torch
from optimum.commands import BaseOptimumCLICommand
from optimum.exporters import TasksManager
from ...models import QuantizedTransformersModel
if TYPE_CHECKING:
from argparse import ArgumentParser
SUPPORTED_LIBRARIES = ['transformers']
def parse_quantize_args(parser: 'ArgumentParser'):
required_group = parser.add_argument_group('Required arguments')
required_group.add_argument('output', type=str, help='The path to save the quantized model.')
required_group.add_argument('-m', '--model', type=str, required=True, help='Hugging Face Hub model id or path to a local model.')
required_group.add_argument('--weights', type=str, default='int8', choices=['int2', 'int4', 'int8', 'float8'], help='The Hugging Face library to use to load the model.')
optional_group = parser.add_argument_group('Optional arguments')
optional_group.add_argument('--revision', type=str, default=None, help='The Hugging Face model revision.')
optional_group.add_argument('--trust_remote_code', action='store_true', default=False, help='Trust remote code when loading the model.')
optional_group.add_argument('--library', type=str, default=None, choices=SUPPORTED_LIBRARIES, help='The Hugging Face library to use to load the model.')
optional_group.add_argument('--task', type=str, default=None, help='The model task (useful for models supporting multiple tasks).')
optional_group.add_argument('--torch_dtype', type=str, default='auto', choices=['auto', 'fp16', 'bf16'], help='The torch dtype to use when loading the model weights.')
optional_group.add_argument('--device', type=str, default='cpu', help='The device to use when loading the model.')
class QuantizeCommand(BaseOptimumCLICommand):
@staticmethod
def parse_args(parser: 'ArgumentParser'):
return parse_quantize_args(parser)
def run(self):
model_name_or_path = self.args.model
library_name = self.args.library
if library_name is None:
library_name = TasksManager.infer_library_from_model(model_name_or_path)
if library_name not in SUPPORTED_LIBRARIES:
raise ValueError(f'{library_name} models are not supported by this CLI, but can be quantized using the python API directly.')
task = self.args.task
if task is None:
task = TasksManager.infer_task_from_model(model_name_or_path)
torch_dtype = self.args.torch_dtype
if torch_dtype != 'auto':
torch_dtype = torch.float16 if self.args.torch_dtype == 'fp16' else torch.bfloat16
model = TasksManager.get_model_from_task(task, model_name_or_path, revision=self.args.revision, trust_remote_code=self.args.trust_remote_code, framework='pt', torch_dtype=torch_dtype, device=torch.device(self.args.device), library_name=library_name, low_cpu_mem_usage=True)
weights = f'q{self.args.weights}'
qmodel = QuantizedTransformersModel.quantize(model, weights=weights)
qmodel.save_pretrained(self.args.output)
# File: optimum-quanto-main/optimum/quanto/tensor/activations/qbytes.py
import ast
import torch
from torch.autograd import Function
from ..qbytes import QBytesTensor
from ..qtensor import qfallback
from ..qtype import qtype, qtypes
__all__ = ['ActivationQBytesTensor']
class ActivationQBytesQuantizer(Function):
@staticmethod
def forward(ctx, base: torch.Tensor, qtype: qtype, scale: torch.Tensor) -> torch.Tensor:
if qtype.bits != 8:
raise ValueError('QBytesTensor can only be of 8-bit qtype')
size = base.size()
stride = base.stride()
data = torch.ops.quanto.quantize_symmetric(base, dtype=qtype.dtype, axis=None, scale=scale)
return ActivationQBytesTensor(qtype, size, stride, data, scale)
@staticmethod
def backward(ctx, gO):
return (gO, None, None, None, None, None)
class ActivationQBytesTensor(QBytesTensor):
@staticmethod
def __new__(cls, qtype, size, stride, data, scale, requires_grad=False):
assert data.device == scale.device
return torch.Tensor._make_wrapper_subclass(cls, size, strides=stride, dtype=scale.dtype, device=data.device, requires_grad=requires_grad)
def __init__(self, qtype, size, stride, data, scale, requires_grad=False):
super().__init__(qtype, None, size, stride, data, scale, requires_grad)
@classmethod
def quantize(cls, base: torch.Tensor, qtype: qtype, scale: torch.Tensor) -> torch.Tensor:
return ActivationQBytesQuantizer.apply(base, qtype, scale)
def __tensor_flatten__(self):
inner_tensors = ['_data', '_scale']
meta = {'qtype': self._qtype.name, 'size': str(list(self.size())), 'stride': str(list(self.stride()))}
return (inner_tensors, meta)
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert len(inner_tensors) == 2
assert len(meta) == 3
(data, scale) = (inner_tensors['_data'], inner_tensors['_scale'])
qtype = qtypes[meta['qtype']]
size = ast.literal_eval(meta['size'])
stride = ast.literal_eval(meta['stride'])
return ActivationQBytesTensor(qtype, size, stride, data, scale)
@classmethod
def __torch_dispatch__(cls, op, types, args, kwargs=None):
from .qbytes_ops import get_qbytestensor_op_dispatch
kwargs = kwargs or {}
op = op.overloadpacket
qdispatch = get_qbytestensor_op_dispatch(op)
if qdispatch is not None:
return qdispatch(*args, **kwargs)
return qfallback(op, *args, **kwargs)
# File: optimum-quanto-main/optimum/quanto/tensor/activations/qbytes_ops.py
import numbers
from functools import partial
from typing import Callable, List
import torch
from ..core import dtype_info
from ..qtensor import QTensor, qfallback
from ..qtype import qint8
from .qbytes import ActivationQBytesTensor
from .quantization import quantize_activation
__all__ = ['get_qbytestensor_op_dispatch', 'register_qbytestensor_op']
_QBYTESTENSOR_OP_TABLE = {}
def register_qbytestensor_op(aten_ops: List[Callable]):
def wrapper(op):
for aten_op in aten_ops:
_QBYTESTENSOR_OP_TABLE[aten_op] = partial(op, aten_op)
return wrapper
def get_qbytestensor_op_dispatch(aten_op):
return _QBYTESTENSOR_OP_TABLE.get(aten_op, None)
def is_scalar(t):
return isinstance(t, numbers.Number) or (type(t) is torch.Tensor and len(t.shape) == 0)
@register_qbytestensor_op([torch.ops.aten._to_copy, torch.ops.aten.to])
def _to_copy(op, t, dtype=None, **kwargs):
out_data = op(t._data, dtype=t._data.dtype, **kwargs)
out_scale = op(t._scale, dtype=dtype, **kwargs)
return ActivationQBytesTensor(t.qtype, t.size(), t.stride(), out_data, out_scale)
@register_qbytestensor_op([torch.ops.aten.detach])
def detach(op, t):
out_data = op(t._data)
out_scale = op(t._scale)
return ActivationQBytesTensor(t.qtype, t.size(), t.stride(), out_data, out_scale)
@register_qbytestensor_op([torch.ops.aten.cat])
def cat(op, inputs, dim=0):
if len(inputs) == 2:
(t1, t2) = inputs
if isinstance(t1, ActivationQBytesTensor) and isinstance(t2, ActivationQBytesTensor) and torch.equal(t1._scale, t2._scale) and (t1.qtype == t2.qtype):
if t1.qtype.is_floating_point or t2.qtype.is_floating_point:
return qfallback(op, inputs, dim)
out_data = op([t1._data, t2._data], dim)
return ActivationQBytesTensor(t1.qtype, out_data.size(), out_data.stride(), out_data, t1._scale)
return qfallback(op, inputs, dim)
@register_qbytestensor_op([torch.ops.aten.lt])
def lt(op, input, other):
if isinstance(input, ActivationQBytesTensor) and isinstance(other, ActivationQBytesTensor) and torch.equal(input._scale, other._scale):
return op(input._data, other._data)
return qfallback(op, input, other)
@register_qbytestensor_op([torch.ops.aten.clone])
def clone(op, t, memory_format=torch.preserve_format):
data_shape = t._data.shape
out_data = t._data.reshape(t.shape)
out_data = op(t._data, memory_format=memory_format)
out_stride = out_data.stride()
out_data = out_data.reshape(data_shape)
out_scale = op(t._scale, memory_format=memory_format)
return ActivationQBytesTensor(t.qtype, t.size(), out_stride, out_data, out_scale)
@register_qbytestensor_op([torch.ops.aten.copy_])
def copy_(op, dest, src):
assert dest.qtype == src.qtype
dest._data = op(dest._data, src._data)
dest._scale = op(dest._scale, src._scale)
return dest
@register_qbytestensor_op([torch.ops.aten.div])
def div(op, input, other):
if not is_scalar(other):
return op(input.dequantize(), other)
return ActivationQBytesTensor(input.qtype, input.size(), input.stride(), input._data, op(input._scale, other))
@register_qbytestensor_op([torch.ops.aten.neg])
def neg(op, input, *args, **kwargs):
if input.qtype.is_floating_point:
return op(input.dequantize(), *args, **kwargs)
out_data = op(input._data, *args, **kwargs)
return ActivationQBytesTensor(input.qtype, input.size(), input.stride(), out_data, input._scale)
@register_qbytestensor_op([torch.ops.aten.expand, torch.ops.aten.permute, torch.ops.aten.select, torch.ops.aten.slice, torch.ops.aten.unsqueeze])
def unary_type_agnostic_op(op, input, *args, **kwargs):
if input.axis is not None:
return op(input.dequantize(), *args, **kwargs)
out_data = op(input._data, *args, **kwargs)
return ActivationQBytesTensor(input.qtype, out_data.size(), out_data.stride(), out_data, input._scale)
@register_qbytestensor_op([torch.ops.aten.is_same_size])
def is_same_size(op, input, other):
a = input._data if isinstance(input, ActivationQBytesTensor) else input
b = other._data if isinstance(other, ActivationQBytesTensor) else other
return op(a, b)
def cannot_mm(t: QTensor):
return t.axis is not None and t.size() != t._data.size()
@register_qbytestensor_op([torch.ops.aten.bmm])
def bmm(op, input, other):
if not isinstance(input, ActivationQBytesTensor):
return op(input, other.dequantize())
if not isinstance(other, QTensor) or input.axis is not None:
return op(input.dequantize(), other)
if input.qtype != qint8 or other.qtype != qint8 or cannot_mm(other):
return qfallback(op, input, other)
out_data = op(input._data.to(torch.float32), other._data.to(torch.float32))
out_scale = (input._scale * other._scale).to(torch.float32)
return (out_data * out_scale).to(input._scale.dtype)
@register_qbytestensor_op([torch.ops.aten.mul])
def mul(op, input, other):
if is_scalar(input):
return ActivationQBytesTensor(other.qtype, other.size(), other.stride(), other._data, input * other._scale)
if is_scalar(other):
return ActivationQBytesTensor(input.qtype, input.size(), input.stride(), input._data, other * input._scale)
return qfallback(op, input, other)
@register_qbytestensor_op([torch.ops.aten.relu])
def relu(op, input):
if input.qtype.is_floating_point:
return qfallback(op, input)
out_data = op(input._data)
return ActivationQBytesTensor(input.qtype, input.size(), input.stride(), out_data, input._scale)
@register_qbytestensor_op([torch.ops.aten._softmax])
def _softmax(op, input, dim, half_to_float):
float_data = op(input.dequantize(), dim, half_to_float)
out_scale = torch.tensor(1 / dtype_info(input.qtype.dtype).max, dtype=input._scale.dtype).to(input.device)
return quantize_activation(float_data, qtype=input.qtype, scale=out_scale)
@register_qbytestensor_op([torch.ops.aten.stack])
def stack(op, inputs, dim=0):
if len(inputs) == 2:
(t1, t2) = inputs
if isinstance(t1, ActivationQBytesTensor) and isinstance(t2, ActivationQBytesTensor) and (t1.axis is None) and (t2.axis is None) and torch.equal(t1._scale, t2._scale) and (t1.qtype == t2.qtype):
out_data = op([t1._data, t2._data], dim)
return ActivationQBytesTensor(t1.qtype, out_data.size(), out_data.stride(), out_data, t1._scale)
return qfallback(inputs, dim)
@register_qbytestensor_op([torch.ops.aten.split])
def split(op, input, *args, **kwargs):
if input.axis is not None:
return qfallback(op, input, *args, **kwargs)
out_datas = op(input._data, *args, **kwargs)
return [ActivationQBytesTensor(input.qtype, input.size(), input.stride(), out_data, input._scale) for out_data in out_datas]
@register_qbytestensor_op([torch.ops.aten.transpose])
def transpose(op, input, *args):
out_data = op(input._data, *args)
out_size = out_data.size()
out_stride = out_data.stride()
out_scale = input._scale
return ActivationQBytesTensor(input.qtype, out_size, out_stride, out_data, out_scale)
@register_qbytestensor_op([torch.ops.aten.t])
def transpose2d(op, input):
out_data = op(input._data)
out_scale = input._scale
(dim0, dim1) = input.size()
out_size = torch.Size([dim1, dim0])
out_stride = input.stride()[::-1]
return ActivationQBytesTensor(input.qtype, out_size, out_stride, out_data, out_scale)
@register_qbytestensor_op([torch.ops.aten.view, torch.ops.aten._unsafe_view])
def view(op, input, *shape):
if input.axis is None:
out_data = op(input._data, *shape)
return ActivationQBytesTensor(input.qtype, out_data.size(), out_data.stride(), out_data, input._scale)
return qfallback(op, input, *shape)
@register_qbytestensor_op([torch.ops.aten.where])
def where(op, condition, input, other):
if isinstance(condition, QTensor) or isinstance(other, QTensor):
raise NotImplementedError
float_data = op(condition, input.dequantize(), other)
if input.axis is None:
return quantize_activation(float_data, qtype=input.qtype, scale=input._scale)
return float_data
# File: optimum-quanto-main/optimum/quanto/tensor/activations/quantization.py
import torch
from ..qtype import qtype
from .qbytes import ActivationQBytesTensor
__all__ = ['quantize_activation']
def quantize_activation(t: torch.Tensor, qtype: qtype, scale: torch.Tensor):
if scale.numel() != 1:
raise ValueError('Parameter scale must be a scalar because activations can only be quantized per-tensor')
return ActivationQBytesTensor.quantize(t, qtype, scale)
# File: optimum-quanto-main/optimum/quanto/tensor/core.py
import torch
__all__ = ['axis_to_dim', 'dtype_info']
def dtype_info(dtype):
info = torch.finfo if dtype.is_floating_point else torch.iinfo
return info(dtype)
def axis_to_dim(t, axis):
dim = list(range(t.ndim))
if axis == -1:
dim = dim[:-1]
else:
dim.remove(axis)
return dim
# File: optimum-quanto-main/optimum/quanto/tensor/function.py
import torch
__all__ = ['QuantizedLinearFunction']
class QuantizedLinearFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, other, bias=None):
ctx.save_for_backward(input, other)
output = torch.matmul(input, other.t())
if bias is not None:
output = output + bias
return output
def backward(ctx, gO):
input_gO = other_gO = bias_gO = None
(input, other) = ctx.saved_tensors
(out_features, in_features) = other.shape
if ctx.needs_input_grad[0]:
input_gO = torch.matmul(gO, other)
if ctx.needs_input_grad[1]:
other_gO = torch.matmul(gO.view(-1, out_features).t(), input.view(-1, in_features))
if ctx.needs_input_grad[2]:
dim = tuple(range(gO.ndim - 1))
bias_gO = gO.sum(dim)
return (input_gO, other_gO, bias_gO)
# File: optimum-quanto-main/optimum/quanto/tensor/optimizers/absmax_optimizer.py
from typing import Optional, Tuple, Union
import torch
from .symmetric_optimizer import SymmetricOptimizer
__all__ = ['AbsmaxOptimizer']
class AbsmaxOptimizer(SymmetricOptimizer):
def optimize(self, base: torch.Tensor, qmax: float, axis: Optional[int]=None) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
base = torch.abs(base)
if axis is None:
rmax = torch.max(base)
else:
dim = list(range(1, base.ndim)) if axis == 0 else list(range(0, base.ndim - 1))
rmax = torch.amax(torch.abs(base), dim=dim, keepdim=True)
return rmax / qmax
# File: optimum-quanto-main/optimum/quanto/tensor/optimizers/affine_optimizer.py
from typing import Optional, Tuple
import torch
from ..qbits import group
from .optimizer import Optimizer
__all__ = ['AffineOptimizer']
class AffineOptimizer(Optimizer):
def __call__(self, base: torch.Tensor, bits: int, axis: int, group_size: Optional[int]=None) -> Tuple[torch.Tensor, torch.Tensor]:
if axis not in [0, -1]:
raise ValueError('axis parameter must be 0 (first axis) or -1 (last axis)')
if group_size is not None:
base = group(base, axis, group_size)
(scale, shift) = self.optimize(base, bits, axis)
assert scale.dtype == base.dtype
assert shift.dtype == base.dtype
return (scale, shift)
def optimize(self, base: torch.Tensor, bits: int, axis: int) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
# File: optimum-quanto-main/optimum/quanto/tensor/optimizers/hqq_optimizer.py
from typing import Optional, Tuple, Union
import torch
from ..qbits import QBitsTensor
from ..qtype import qint2, qint4
from .max_optimizer import MaxOptimizer
__all__ = ['HqqOptimizer']
def shrink_lp_op(x: torch.Tensor, beta: float, lp_norm: float) -> torch.Tensor:
if lp_norm == 1:
return torch.sign(x) * torch.nn.functional.relu(torch.abs(x) - 1.0 / beta)
else:
return torch.sign(x) * torch.nn.functional.relu(torch.abs(x) - 1.0 / beta * torch.pow(torch.abs(x), lp_norm - 1))
class HqqOptimizer(MaxOptimizer):
def __init__(self, lp_norm: Optional[float]=0.7, beta: Optional[int]=10.0, kappa: Optional[float]=1.01, iters: Optional[int]=20, verbose: Optional[bool]=False) -> None:
self.lp_norm = lp_norm
self.beta = beta
self.kappa = kappa
self.iters = iters
self.verbose = verbose
def optimize(self, base: torch.Tensor, bits: int, axis: int) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
(scale, shift) = super().optimize(base, bits, axis)
best_error = None
beta = self.beta
qtype = qint2 if bits == 2 else qint4
base_q = QBitsTensor.quantize(base, qtype, axis, None, scale, shift)
for i in range(self.iters):
error = base - base_q
if best_error is None:
best_error = float(torch.abs(base - base_q).mean())
if self.verbose:
print(f'Start error: {best_error:.6f}')
e = shrink_lp_op(error, beta, self.lp_norm)
mean_axis = 0 if axis == -1 else -1
hqq_shift = torch.mean(base_q._data * scale - (base - e), axis=mean_axis, keepdim=True)
base_q = QBitsTensor.quantize(base, qtype, axis, None, scale, hqq_shift)
mean_error = float(torch.abs(base - base_q).mean())
if self.verbose:
print(f'HQQ error at it #{i}: {mean_error:.6f}')
if mean_error < best_error:
best_error = mean_error
shift = hqq_shift
beta *= self.kappa
else:
break
return (scale, shift)
# File: optimum-quanto-main/optimum/quanto/tensor/optimizers/max_optimizer.py
from typing import Tuple, Union
import torch
from .affine_optimizer import AffineOptimizer
__all__ = ['MaxOptimizer']
class MaxOptimizer(AffineOptimizer):
def optimize(self, base: torch.Tensor, bits: int, axis: int) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
dim = list(range(1, base.ndim)) if axis == 0 else list(range(0, base.ndim - 1))
rmin = torch.amin(base, dim=dim, keepdim=True)
rmax = torch.amax(base, dim=dim, keepdim=True)
qmin = -2 ** (bits - 1)
qmax = 2 ** (bits - 1) - 1
scale = (rmax - rmin) / (qmax - qmin)
shift = -rmin
return (scale, shift)
# File: optimum-quanto-main/optimum/quanto/tensor/optimizers/symmetric_optimizer.py
from typing import Optional
import torch
from .optimizer import Optimizer
__all__ = ['SymmetricOptimizer']
class SymmetricOptimizer(Optimizer):
def __call__(self, base: torch.Tensor, qmax: float, axis: Optional[int]=None) -> torch.Tensor:
if axis not in [None, 0, -1]:
raise ValueError('axis parameter must be None, 0 (first axis) or -1 (last axis)')
if qmax <= 0.0:
raise ValueError('qmax must be set to the maximum positive value that can be represented by the quantized type.')
scale = self.optimize(base, qmax, axis)
assert scale.dtype == base.dtype
return scale
def optimize(self, base: torch.Tensor, qmax: float, axis: Optional[int]=None) -> torch.Tensor:
raise NotImplementedError
# File: optimum-quanto-main/optimum/quanto/tensor/qbits/awq/packed.py
import ast
from copy import copy
from enum import Enum
import numpy as np
import torch
from torch.utils import _pytree as pytree
__all__ = ['AWQPackedTensor', 'AWQPacking']
AWQ_ORDER = [0, 2, 4, 6, 1, 3, 5, 7]
AWQ_REVERSE_ORDER = [0, 4, 1, 5, 2, 6, 3, 7]
def pack(unpacked: torch.Tensor, reorder=False):
bits = 4
pack_num = 32 // bits
packed = torch.zeros(unpacked.shape[0], unpacked.shape[1] // pack_num, dtype=torch.int32, device=unpacked.device)
for col in range(unpacked.shape[1] // pack_num):
if reorder:
order_map = AWQ_ORDER
else:
order_map = [0, 1, 2, 3, 4, 5, 6, 7]
for i in range(pack_num):
packed_col = unpacked[:, col * pack_num + order_map[i]].to(torch.int32)
packed[:, col] |= packed_col << i * bits
return packed
def reverse_awq_order(t: torch.Tensor):
bits = 4
reverse_order_tensor = torch.arange(t.shape[-1], dtype=torch.int32, device=t.device)
reverse_order_tensor = reverse_order_tensor.view(-1, 32 // bits)
reverse_order_tensor = reverse_order_tensor[:, AWQ_REVERSE_ORDER]
reverse_order_tensor = reverse_order_tensor.view(-1)
t = t[:, reverse_order_tensor]
return t
def unpack(packed: torch.Tensor, reorder=False):
bits = 4
shifts = torch.arange(0, 32, bits, device=packed.device)
unpacked = torch.bitwise_right_shift(packed[:, :, None], shifts[None, None, :]).to(torch.int8)
unpacked = unpacked.view(unpacked.shape[0], -1)
if reorder:
unpacked = reverse_awq_order(unpacked)
unpacked = torch.bitwise_and(unpacked, 2 ** bits - 1)
return unpacked
def pack_v2(unpacked: torch.Tensor) -> torch.Tensor:
assert unpacked.device.type == 'cuda'
assert unpacked.ndim == 2
(N, K) = unpacked.shape
I = 4
S = 64
packed = unpacked.reshape(N, K // 32, 4, 4, 2).permute(0, 1, 3, 2, 4)
packed = packed.permute(0, 1, 2, 4, 3)
packed = packed.reshape(N, K)
packed = packed.reshape(N // I, I, K // S, S)
packed = packed.permute(0, 2, 1, 3)
packed = packed.reshape(N // I, K // S, S, I)
packed = packed.to(torch.int32)
packed = packed[..., 0] | packed[..., 1] << 4 | packed[..., 2] << 8 | packed[..., 3] << 12
packed = packed.reshape(N // I, K)
return packed.to(torch.int16).contiguous()
def unpack_v2(packed):
assert packed.device.type == 'cuda'
assert packed.ndim == 2
I = 4
S = 64
(N_div_I, K) = packed.shape
N = N_div_I * I
unpacked = packed.reshape(N // I, K // S, S, 1)
unpacked = unpacked.cpu().numpy().astype(np.uint16)
unpacked = torch.cat([torch.tensor((unpacked & 15).astype(np.uint8)).to(packed.device), torch.tensor(((unpacked & 240) >> 4).astype(np.uint8)).to(packed.device), torch.tensor(((unpacked & 3840) >> 8).astype(np.uint8)).to(packed.device), torch.tensor(((unpacked & 61440) >> 12).astype(np.uint8)).to(packed.device)], axis=-1)
unpacked = unpacked.reshape(N // I, K // S, I, S)
unpacked = unpacked.permute(0, 2, 1, 3)
unpacked = unpacked.reshape(N, K)
unpacked = unpacked.reshape(N, K // 32, 4, 2, 4).permute(0, 1, 2, 4, 3)
unpacked = unpacked.permute(0, 1, 3, 2, 4)
unpacked = unpacked.reshape(N, K)
return unpacked
class AWQPacking(Enum):
V1 = 1
V2 = 2
class AWQPackedTensor(torch.Tensor):
@staticmethod
def __new__(cls, data, packing, reorder, size, stride, requires_grad=False):
assert data.device.type == 'cuda'
assert data.dtype == torch.int32 if packing == AWQPacking.V1 else torch.int16
assert requires_grad is False
return torch.Tensor._make_wrapper_subclass(cls, size, strides=stride, dtype=torch.uint8, device=data.device, requires_grad=requires_grad)
def __init__(self, data, packing, reorder, size, stride, requires_grad=False):
self._data = data
self._packing = packing
self._reorder = reorder
def __repr__(self):
return f'AWQPackedTensor({self._data}, packing={self._packing}, reorder={self._reorder})'
@classmethod
def pack(cls, t, packing=AWQPacking.V1, reorder=False):
if packing == AWQPacking.V1:
data = pack(t, reorder=reorder)
else:
data = pack_v2(t)
return AWQPackedTensor(data, packing, reorder, t.size(), t.stride())
def unpack(self):
if self._packing == AWQPacking.V1:
return unpack(self._data, self._reorder)
return unpack_v2(self._data)
@property
def dtype(self):
return torch.uint8
def __tensor_flatten__(self):
inner_tensors = ['_data']
meta = {'packing': str(self._packing), 'reorder': str(self._reorder), 'size': str(list(self.size())), 'stride': str(self.stride())}
return (inner_tensors, meta)
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert len(inner_tensors) == 1
assert len(meta) == 4
data = inner_tensors['_data']
packing = ast.literal_eval(meta['packing'])
reorder = ast.literal_eval(meta['reorder'])
size = ast.literal_eval(meta['size'])
stride = ast.literal_eval(meta['stride'])
return AWQPackedTensor(data, packing, reorder, size, stride)
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, op, types, args, kwargs=None):
if op.overloadpacket is torch.ops.aten.detach:
t = args[0]
data = op(t._data)
return AWQPackedTensor(data, t._packing, t._reorder, t.size(), t.stride())
elif op.overloadpacket in (torch.ops.aten._to_copy, torch.ops.aten.to):
t = args[0]
dtype = kwargs.get('dtype', torch.uint8)
if dtype != torch.uint8:
raise ValueError(f'AWQPackedTensor are torch.uint8 only and cannot be moved to {dtype}.')
device = kwargs.get('device', t.device)
if device.type == 'cuda':
data_kwargs = copy(kwargs)
data_kwargs['dtype'] = t._data.dtype
data = op(t._data, **data_kwargs)
return AWQPackedTensor(data, t._packing, t._reorder, t.size(), t.stride())
(args, kwargs) = pytree.tree_map_only(AWQPackedTensor, lambda x: x.unpack(), (args, kwargs or {}))
return op(*args, **kwargs)
def numpy(self):
return self.unpack().cpu().numpy()
# File: optimum-quanto-main/optimum/quanto/tensor/qbits/awq/qbits.py
import ast
import torch
from torch.autograd import Function
from ...function import QuantizedLinearFunction
from ...qtype import qtypes
from ..group import group, ungroup
from ..qbits import QBitsTensor
from .packed import AWQPackedTensor, AWQPacking
__all__ = ['AWQBitsTensor']
class AWQBitsDequantizer(Function):
@staticmethod
def forward(ctx, t):
unpacked = t._data.unpack()
scale = t._scale
shift = t._shift
unpacked = group(unpacked, axis=0, group_size=t._group_size)
n_scales = scale.numel()
scale = scale.t().reshape((n_scales, 1))
shift = shift.t().reshape((n_scales, 1))
dqt = scale * unpacked + shift
return ungroup(dqt, axis=t.axis, orig_shape=t.shape)
@staticmethod
def backward(ctx, gO):
return gO
class AWQBitsLinearFunction(QuantizedLinearFunction):
@staticmethod
def forward(ctx, input, other, bias):
ctx.save_for_backward(input, other)
if type(input) is not torch.Tensor:
input = input.dequantize()
(out_features, in_features) = other.shape
rows = input.numel() // in_features
output = torch.ops.quanto.gemm(input, other._data._data, other._scale, other._shift, rows=rows, out_cols=out_features, in_cols=in_features, bits=4, group_size=other._group_size)
if bias is not None:
output = output + bias
return output
class AWQBitsTensor(QBitsTensor):
@staticmethod
def __new__(cls, qtype, axis, group_size, size, stride, data, scale, shift, requires_grad=False):
assert data.device.type == 'cuda'
assert data.device == scale.device
assert data.device == shift.device
return torch.Tensor._make_wrapper_subclass(cls, size, strides=stride, dtype=scale.dtype, device=data.device, requires_grad=requires_grad)
def __init__(self, qtype, axis, group_size, size, stride, data, scale, shift, requires_grad=False):
assert axis == 0
if not isinstance(data, AWQPackedTensor):
assert type(data) is torch.Tensor
ungrouped = ungroup(data, axis=0, orig_shape=size)
data = AWQPackedTensor.pack(ungrouped, packing=AWQPacking.V2)
(out_features, in_features) = size
scale = scale.reshape(out_features, in_features // group_size).t().contiguous()
shift = shift.reshape(out_features, in_features // group_size).t()
if not shift.dtype.is_floating_point:
shift = scale * shift
shift = -shift.contiguous()
super().__init__(qtype, axis, group_size, size, stride, data, scale, shift)
def dequantize(self):
return AWQBitsDequantizer.apply(self)
def qbits_tensor(self):
data = group(self._data.unpack(), axis=self.axis, group_size=self._group_size)
n_scales = self._scale.numel()
scale = self._scale.t().reshape((n_scales, 1))
shift = -self._shift.t().reshape((n_scales, 1))
return QBitsTensor(self._qtype, self._axis, self._group_size, self.size(), self.stride(), data, scale, shift)
def __tensor_flatten__(self):
inner_tensors = ['_data', '_scale', '_shift']
meta = {'qtype': self._qtype.name, 'axis': str(self._axis), 'group_size': str(self._group_size), 'size': str(list(self.size())), 'stride': str(list(self.stride()))}
return (inner_tensors, meta)
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert len(inner_tensors) == 3
assert len(meta) == 5
(data, scale, shift) = (inner_tensors['_data'], inner_tensors['_scale'], inner_tensors['_shift'])
qtype = qtypes[meta['qtype']]
axis = ast.literal_eval(meta['axis'])
group_size = ast.literal_eval(meta['group_size'])
size = ast.literal_eval(meta['size'])
stride = ast.literal_eval(meta['stride'])
return AWQBitsTensor(qtype, axis, group_size, size, stride, data, scale, shift)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
if func is torch.nn.functional.linear:
def qlinear(input, other, bias=None):
return AWQBitsLinearFunction.apply(input, other, bias)
return qlinear(*args, **kwargs)
with torch._C.DisableTorchFunctionSubclass():
return func(*args, **kwargs)
# File: optimum-quanto-main/optimum/quanto/tensor/qbits/group.py
import math
from typing import List
import torch
__all__ = ['group', 'ungroup', 'grouped_shape']
def grouped_shape(shape: List, axis: int, group_size: int) -> List:
if axis not in (0, -1):
raise ValueError('Axis must be 0 or -1 for group-wise quantization')
n_groups = math.prod(shape) // group_size
return (n_groups, group_size) if axis == 0 else (group_size, n_groups)
def group(base: torch.Tensor, axis: int, group_size: int):
if axis not in (0, -1):
raise ValueError('Axis must be 0 or -1 for group-wise quantization')
axis_dim = base.shape[axis]
axis_numel = base.numel() // axis_dim
if group_size > axis_numel or axis_numel % group_size != 0:
raise ValueError(f'Group size ({group_size}) must be a divisor of ({axis_numel})')
axis_groups = axis_numel // group_size
if axis == 0:
return base.reshape([-1, group_size])
grouped = base.reshape((axis_groups, group_size, axis_dim))
grouped = grouped.permute(1, 2, 0)
return grouped.reshape(group_size, axis_dim * axis_groups)
def ungroup(grouped: torch.Tensor, axis: int, orig_shape: torch.Size):
if grouped.shape == orig_shape:
return grouped
if axis == 0:
return grouped.reshape(orig_shape)
group_size = grouped.shape[0] if axis == -1 else grouped.shape[-1]
axis_dim = orig_shape[axis]
axis_groups = grouped.numel() // axis_dim // group_size
ungrouped = grouped.reshape(group_size, axis_dim, axis_groups)
ungrouped = ungrouped.permute(2, 0, 1)
return ungrouped.reshape(orig_shape)
# File: optimum-quanto-main/optimum/quanto/tensor/qbits/packed.py
import ast
import torch
from torch.utils import _pytree as pytree
__all__ = ['PackedTensor']
def pack_weights(intweights: torch.Tensor, bits: int) -> torch.Tensor:
original_shape = intweights.shape
values_per_item = 8 // bits
row_dim = (original_shape[0] + values_per_item - 1) // values_per_item
if len(original_shape) == 1:
packed_tensor_shape = (row_dim,)
else:
packed_tensor_shape = (row_dim, *original_shape[1:])
packed = torch.zeros(packed_tensor_shape, device=intweights.device, dtype=torch.uint8)
unpacked = intweights.to(torch.uint8)
def lshift(t: torch.Tensor, bits: int):
if t.device.type == 'mps':
return t * 2 ** bits
return t << bits
it = min(values_per_item, original_shape[0] // row_dim + 1)
for i in range(it):
start = i * row_dim
end = min(start + row_dim, original_shape[0])
packed[:end - start] |= lshift(unpacked[start:end], bits * i)
return packed
class PackedTensor(torch.Tensor):
@staticmethod
def __new__(cls, data, bits, size, stride, requires_grad=False):
assert data.dtype == torch.uint8
assert requires_grad is False
return torch.Tensor._make_wrapper_subclass(cls, size, strides=stride, dtype=torch.uint8, device=data.device, requires_grad=requires_grad)
def __init__(self, data, bits, size, stride, requires_grad=False):
self._bits = bits
self._data = data
def __repr__(self):
autograd_info = f', grad_fn={self.grad_fn}' if self.grad_fn else ', requires_grad=True' if self.requires_grad else ''
return f'PackedTensor({self._data}, bits={self._bits}, public_dtype={self.dtype}{autograd_info})'
@classmethod
def pack(cls, t, bits=4):
assert bits in (2, 4)
assert t.dtype == torch.uint8
data = pack_weights(t, bits)
return PackedTensor(data, bits, t.size(), t.stride())
def unpack(self):
unpacked_data = torch.ops.quanto.unpack(self._data, self._bits)
return unpacked_data[:self.shape[0]]
@property
def bits(self):
return self._bits
@property
def dtype(self):
return torch.uint8
@staticmethod
def load_from_state_dict(state_dict, prefix, bits, size, stride, missing_keys):
if prefix + '_data' not in state_dict:
missing_keys.append(prefix + '_data')
return
inner_tensors_dict = {'_data': state_dict.pop(prefix + '_data')}
meta = [name.replace(prefix, '') for name in state_dict.keys() if name.startswith(prefix)]
meta = {'bits': str(bits), 'size': str(list(size)), 'stride': str(stride)}
return PackedTensor.__tensor_unflatten__(inner_tensors_dict, meta, None, None)
def __tensor_flatten__(self):
inner_tensors = ['_data']
meta = {'bits': str(self._bits), 'size': str(list(self.size())), 'stride': str(self.stride())}
return (inner_tensors, meta)
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert len(inner_tensors) == 1
assert len(meta) == 3
data = inner_tensors['_data']
bits = ast.literal_eval(meta['bits'])
size = ast.literal_eval(meta['size'])
stride = ast.literal_eval(meta['stride'])
return PackedTensor(data, bits, size, stride)
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, op, types, args, kwargs=None):
if op.overloadpacket is torch.ops.aten.detach:
t = args[0]
data = op(t._data)
return PackedTensor(data, t._bits, t.size(), t.stride())
elif op.overloadpacket in (torch.ops.aten._to_copy, torch.ops.aten.to):
t = args[0]
dtype = kwargs.get('dtype', torch.uint8)
if dtype != torch.uint8:
raise ValueError(f'PackedTensor are torch.uint8 only and cannot be moved to {dtype}.')
data = op(t._data, **kwargs)
return PackedTensor(data, t._bits, t.size(), t.stride())
(args, kwargs) = pytree.tree_map_only(PackedTensor, lambda x: x.unpack(), (args, kwargs or {}))
return op(*args, **kwargs)
def numpy(self):
return self.unpack().cpu().numpy()
# File: optimum-quanto-main/optimum/quanto/tensor/qbits/qbits.py
import ast
import torch
from packaging import version
from torch.autograd import Function
from ..function import QuantizedLinearFunction
from ..qtensor import QTensor, qfallback
from ..qtype import qint2, qint4, qtype, qtypes
from .group import grouped_shape, ungroup
from .packed import PackedTensor
__all__ = ['QBitsTensor']
class QBitsQuantizer(Function):
@staticmethod
def forward(ctx, base: torch.Tensor, qtype: qtype, axis: int, group_size: int, scale: torch.Tensor, shift: torch.Tensor):
if qtype not in (qint2, qint4):
raise ValueError('QBitsTensor can only be of qint2 or qint4 qtype')
if axis not in (0, -1):
raise ValueError('QBitsTensor axis parameter must be 0 (first axis) or -1 (last axis)')
size = base.size()
stride = base.stride()
data = torch.ops.quanto.quantize_affine(base, bits=qtype.bits, axis=axis, group_size=group_size, scale=scale, shift=shift)
return QBitsTensor.create(qtype, axis, group_size, size, stride, data, scale, shift)
@staticmethod
def backward(ctx, gO):
return (gO, None, None, None, None, None)
class QBitsDequantizer(Function):
@staticmethod
def forward(ctx, t):
data = t._data.unpack()
shift = t._shift
if not shift.dtype.is_floating_point:
data = data.to(torch.int8) - shift.to(torch.int8)
if t.qtype.is_floating_point:
dqt = t._scale * data.to(t._scale.dtype)
else:
dqt = t._scale * data
if shift.dtype.is_floating_point:
dqt -= shift
if t.axis is None:
return dqt
return ungroup(dqt, axis=t.axis, orig_shape=t.shape)
@staticmethod
def backward(ctx, gO):
return gO
class QBitsTensor(QTensor):
@staticmethod
def create(qtype, axis, group_size, size, stride, data, scale, shift, requires_grad=False):
from .awq import AWQBitsTensor
from .tinygemm import TinyGemmQBitsTensor
if qtype == qint4 and size[0] >= 128 and (scale.dtype == torch.float16) and (axis == 0) and (group_size == 128) and (len(size) == 2) and (data.device.type == 'cuda') and (torch.cuda.get_device_capability(data.device)[0] >= 8):
if type(data) is PackedTensor:
data = data.unpack()
return AWQBitsTensor(qtype, axis, group_size, size, stride, data, scale, shift, requires_grad)
if qtype == qint4 and scale.dtype == torch.bfloat16 and (axis == 0) and (group_size == 128) and (len(size) == 2):
if data.device.type == 'cpu' or (data.device.type == 'cuda' and version.parse(torch.version.cuda).release >= (12, 1) and (torch.cuda.get_device_capability(data.device)[0] >= 8)):
if type(data) is PackedTensor:
data = data.unpack()
return TinyGemmQBitsTensor(qtype, axis, group_size, size, stride, data, (scale, shift), requires_grad)
return QBitsTensor(qtype, axis, group_size, size, stride, data, scale, shift, requires_grad)
@staticmethod
def __new__(cls, qtype, axis, group_size, size, stride, data, scale, shift, requires_grad=False):
assert data.device == scale.device
assert data.device == shift.device
return torch.Tensor._make_wrapper_subclass(cls, size, strides=stride, dtype=scale.dtype, device=data.device, requires_grad=requires_grad)
def __init__(self, qtype, axis, group_size, size, stride, data, scale, shift, requires_grad=False):
super().__init__(qtype, axis)
if type(data) is torch.Tensor:
data = PackedTensor.pack(data, qtype.bits)
self._data = data
self._scale = scale
self._shift = shift
self._group_size = group_size
def __repr__(self):
return f'{type(self).__name__}({self._data}, scale={self._scale}, shift={self._shift}, dtype={self.dtype})'
@classmethod
def quantize(cls, base: torch.Tensor, qtype: qtype, axis: int, group_size: int, scale: torch.Tensor, shift: torch.Tensor):
return QBitsQuantizer.apply(base, qtype, axis, group_size, scale, shift)
def dequantize(self):
return QBitsDequantizer.apply(self)
@staticmethod
def load_from_state_dict(state_dict, prefix, qtype, axis, group_size, size, stride, missing_keys):
if group_size is None:
data_size = size
data_stride = stride
else:
data_size = grouped_shape(size, axis, group_size)
assert len(data_size) == 2
data_stride = (data_size[1], 1)
inner_tensors_dict = {'_data': PackedTensor.load_from_state_dict(state_dict, prefix + '_data.', qtype.bits, data_size, data_stride, missing_keys=missing_keys)}
missing = inner_tensors_dict['_data'] is None
for name in ['_scale', '_shift']:
if prefix + name not in state_dict:
missing_keys.append(prefix + name)
missing = True
else:
inner_tensors_dict[name] = state_dict.pop(prefix + name)
if missing:
return None
meta = {'qtype': qtype.name, 'axis': str(axis), 'group_size': str(group_size), 'size': str(list(size)), 'stride': str(list(stride))}
return QBitsTensor.__tensor_unflatten__(inner_tensors_dict, meta, None, None)
def optimize(self):
if type(self) is not QBitsTensor:
return self
data = self._data.unpack()
return QBitsTensor.create(self.qtype, self.axis, self._group_size, self.size(), self.stride(), data, self._scale, self._shift, self.requires_grad)
def save_to_state_dict(self, destination, prefix, keep_vars):
if type(self) is QBitsTensor:
super().save_to_state_dict(destination, prefix, keep_vars)
else:
self.qbits_tensor().save_to_state_dict(destination, prefix, keep_vars)
def qbits_tensor(self):
raise NotImplementedError
def __tensor_flatten__(self):
inner_tensors = ['_data', '_scale', '_shift']
meta = {'qtype': self._qtype.name, 'axis': str(self._axis), 'group_size': str(self._group_size), 'size': str(list(self.size())), 'stride': str(list(self.stride()))}
return (inner_tensors, meta)
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert len(inner_tensors) == 3
assert len(meta) == 5
(data, scale, shift) = (inner_tensors['_data'], inner_tensors['_scale'], inner_tensors['_shift'])
qtype = qtypes[meta['qtype']]
axis = ast.literal_eval(meta['axis'])
group_size = ast.literal_eval(meta['group_size'])
size = ast.literal_eval(meta['size'])
stride = ast.literal_eval(meta['stride'])
return QBitsTensor(qtype, axis, group_size, size, stride, data, scale, shift)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
if func is torch.nn.functional.linear:
def qlinear(input, other, bias=None):
return QuantizedLinearFunction.apply(input, other, bias)
return qlinear(*args, **kwargs)
elif func is torch.equal:
(input, other) = args
return input.equal(other)
with torch._C.DisableTorchFunctionSubclass():
return func(*args, **kwargs)
@classmethod
def __torch_dispatch__(cls, op, types, args, kwargs=None):
from .qbits_ops import get_qbitstensor_op_dispatch
op = op.overloadpacket
qdispatch = get_qbitstensor_op_dispatch(op)
if qdispatch is not None:
return qdispatch(*args, **kwargs)
return qfallback(op, *args, **kwargs)
# File: optimum-quanto-main/optimum/quanto/tensor/qbits/qbits_ops.py
from functools import partial
from typing import Callable, List
import torch
from .qbits import QBitsTensor
__all__ = ['get_qbitstensor_op_dispatch', 'register_qbitstensor_op']
_QBITSTENSOR_OP_TABLE = {}
def register_qbitstensor_op(aten_ops: List[Callable]):
def wrapper(op):
for aten_op in aten_ops:
_QBITSTENSOR_OP_TABLE[aten_op] = partial(op, aten_op)
return wrapper
def get_qbitstensor_op_dispatch(aten_op):
return _QBITSTENSOR_OP_TABLE.get(aten_op, None)
@register_qbitstensor_op([torch.ops.aten._to_copy])
def _to_copy(op, t, dtype=None, device=None, **kwargs):
if dtype is not None and dtype != t.dtype:
raise ValueError('The dtype of a QBitsTensor cannot be changed')
if type(t) is not QBitsTensor and t.device.type != device.type:
t = t.qbits_tensor()
scale = op(t._scale, dtype=dtype, device=device, **kwargs)
data = op(t._data, device=device, **kwargs)
shift = op(t._shift, device=device, **kwargs)
return QBitsTensor.create(t._qtype, t._axis, t._group_size, t.size(), t.stride(), data, scale, shift)
@register_qbitstensor_op([torch.ops.aten.detach])
def detach(op, t):
(inner_tensor_names, meta) = t.__tensor_flatten__()
detached_tensors = {}
for inner_name in inner_tensor_names:
detached_tensors[inner_name] = op(getattr(t, inner_name))
return t.__class__.__tensor_unflatten__(detached_tensors, meta, t.size(), t.stride())
# File: optimum-quanto-main/optimum/quanto/tensor/qbits/tinygemm/packed.py
import ast
from copy import copy
import torch
from packaging import version
from torch.utils import _pytree as pytree
__all__ = ['TinyGemmPackedTensor']
class TinyGemmPackedTensor(torch.Tensor):
@staticmethod
def __new__(cls, data, size, stride, requires_grad=False):
assert data.dtype == torch.int32
assert requires_grad is False
return torch.Tensor._make_wrapper_subclass(cls, size, strides=stride, dtype=torch.uint8, device=data.device, requires_grad=requires_grad)
def __init__(self, data, size, stride, requires_grad=False):
self._data = data
def __repr__(self):
return f'TinyGemmPackedTensor({self._data})'
@classmethod
def pack(cls, t):
inner_ktiles = 2
t = t.to(torch.int32).contiguous()
if version.parse(torch.__version__).release >= version.parse('2.5.0').release:
t_uint8 = (t[:, ::2] << 4 | t[:, 1::2]).to(torch.uint8)
data = torch._convert_weight_to_int4pack(t_uint8, innerKTiles=inner_ktiles)
else:
data = torch._convert_weight_to_int4pack(t, innerKTiles=inner_ktiles)
return TinyGemmPackedTensor(data, t.size(), t.stride())
def unpack(self):
(out_features, in_features) = self.size()
group_size = 32
scale_and_shift_shape = (in_features // group_size, out_features, 2)
id_scale_and_shift = torch.ones(scale_and_shift_shape, dtype=torch.bfloat16, device=self.device)
id_scale_and_shift[:, :, 1] = 8
identity = torch.eye(in_features, dtype=torch.bfloat16, device=self.device)
unpacked_data = torch._weight_int4pack_mm(identity, self._data, group_size, id_scale_and_shift)
return unpacked_data.t().to(torch.uint8)
@property
def dtype(self):
return torch.uint8
def __tensor_flatten__(self):
inner_tensors = ['_data']
meta = {'size': str(list(self.size())), 'stride': str(self.stride())}
return (inner_tensors, meta)
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert len(inner_tensors) == 1
assert len(meta) == 2
data = inner_tensors['_data']
size = ast.literal_eval(meta['size'])
stride = ast.literal_eval(meta['stride'])
return TinyGemmPackedTensor(data, size, stride)
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, op, types, args, kwargs=None):
if op.overloadpacket is torch.ops.aten.detach:
t = args[0]
data = op(t._data)
return TinyGemmPackedTensor(data, t.size(), t.stride())
elif op.overloadpacket in (torch.ops.aten._to_copy, torch.ops.aten.to):
t = args[0]
dtype = kwargs.get('dtype', torch.uint8)
if dtype != torch.uint8:
raise ValueError(f'TinyGemmPackedTensor are torch.uint8 only and cannot be moved to {dtype}.')
data_kwargs = copy(kwargs)
data_kwargs['dtype'] = t._data.dtype
if kwargs.get('device', t.device).type != t.device.type:
unpacked = t.unpack()
unpacked = op(unpacked, **data_kwargs)
return TinyGemmPackedTensor.pack(unpacked)
data = op(t._data, **data_kwargs)
return TinyGemmPackedTensor(data, t.size(), t.stride())
(args, kwargs) = pytree.tree_map_only(TinyGemmPackedTensor, lambda x: x.unpack(), (args, kwargs or {}))
return op(*args, **kwargs)
def numpy(self):
return self.unpack().cpu().numpy()
# File: optimum-quanto-main/optimum/quanto/tensor/qbits/tinygemm/qbits.py
import ast
import torch
from torch.autograd import Function
from ...function import QuantizedLinearFunction
from ...qtype import qtypes
from ..group import group, ungroup
from ..qbits import QBitsTensor
from .packed import TinyGemmPackedTensor
__all__ = ['TinyGemmQBitsTensor']
class TinyGemmQBitsDequantizer(Function):
@staticmethod
def forward(ctx, t):
qbt = t.qbits_tensor()
return qbt.dequantize()
@staticmethod
def backward(ctx, gO):
return gO
class TinyGemmQBitsLinearFunction(QuantizedLinearFunction):
@staticmethod
def forward(ctx, input, other, bias):
ctx.save_for_backward(input, other)
if type(input) is not torch.Tensor:
input = input.dequantize()
in_features = input.shape[-1]
out_features = other.shape[0]
output_shape = input.shape[:-1] + (out_features,)
output = torch._weight_int4pack_mm(input.view(-1, in_features), other._data._data, other._group_size, other._scale_shift)
output = output.view(output_shape)
if bias is not None:
output = output + bias
return output
class TinyGemmQBitsTensor(QBitsTensor):
@staticmethod
def __new__(cls, qtype, axis, group_size, size, stride, data, scale_shift, requires_grad=False):
if isinstance(scale_shift, torch.Tensor):
dtype = scale_shift.dtype
assert data.device == scale_shift.device
else:
assert isinstance(scale_shift, (tuple, list))
(scale, shift) = scale_shift
dtype = scale.dtype
assert shift.dtype == dtype
assert data.device == scale.device
assert data.device == shift.device
return torch.Tensor._make_wrapper_subclass(cls, size, strides=stride, dtype=dtype, device=data.device, requires_grad=requires_grad)
def __init__(self, qtype, axis, group_size, size, stride, data, scale_shift, requires_grad=False):
assert axis == 0
if not isinstance(data, TinyGemmPackedTensor):
assert type(data) is torch.Tensor
assert isinstance(scale_shift, (tuple, list))
ungrouped = ungroup(data, axis=0, orig_shape=size)
self._data = TinyGemmPackedTensor.pack(ungrouped)
(out_features, in_features) = size
(scale, shift) = scale_shift
scale = scale.reshape(out_features, in_features // group_size, 1)
shift = shift.reshape(out_features, in_features // group_size, 1)
if not shift.dtype.is_floating_point:
shift = scale * shift
min_range = -shift
half_qrange = 2 ** (qtype.bits - 1) * scale
shift = min_range + half_qrange
self._scale_shift = torch.cat([scale, shift], 2).transpose(0, 1).contiguous()
else:
self._data = data
self._scale_shift = scale_shift
self._qtype = qtype
self._axis = axis
self._group_size = group_size
def dequantize(self):
return TinyGemmQBitsDequantizer.apply(self)
def qbits_tensor(self):
data = group(self._data.unpack(), axis=self.axis, group_size=self._group_size)
n_scales = self._scale_shift.numel() // 2
scale = self._scale_shift[:, :, 0].t().reshape((n_scales, 1))
shift = self._scale_shift[:, :, 1].t().reshape((n_scales, 1))
half_qrange = 2 ** (self.qtype.bits - 1) * scale
shift = half_qrange - shift
return QBitsTensor(self._qtype, self._axis, self._group_size, self.size(), self.stride(), data, scale, shift)
def __tensor_flatten__(self):
inner_tensors = ['_data', '_scale_shift']
meta = {'qtype': self._qtype.name, 'axis': str(self._axis), 'group_size': str(self._group_size), 'size': str(list(self.size())), 'stride': str(list(self.stride()))}
return (inner_tensors, meta)
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert len(inner_tensors) == 2
assert len(meta) == 5
(data, scale_shift) = (inner_tensors['_data'], inner_tensors['_scale_shift'])
qtype = qtypes[meta['qtype']]
axis = ast.literal_eval(meta['axis'])
group_size = ast.literal_eval(meta['group_size'])
size = ast.literal_eval(meta['size'])
stride = ast.literal_eval(meta['stride'])
return TinyGemmQBitsTensor(qtype, axis, group_size, size, stride, data, scale_shift)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
if func is torch.nn.functional.linear:
def qlinear(input, other, bias=None):
return TinyGemmQBitsLinearFunction.apply(input, other, bias)
return qlinear(*args, **kwargs)
with torch._C.DisableTorchFunctionSubclass():
return func(*args, **kwargs)
# File: optimum-quanto-main/optimum/quanto/tensor/qbytes.py
from torch.autograd import Function
from .qtensor import QTensor
__all__ = ['QBytesTensor']
class QBytesDequantizer(Function):
@staticmethod
def forward(ctx, t):
if t.qtype.is_floating_point:
dqt = t._scale * t._data.to(t._scale.dtype)
else:
dqt = t._scale * t._data
return dqt
@staticmethod
def backward(ctx, gO):
return gO
class QBytesTensor(QTensor):
def __init__(self, qtype, axis, size, stride, data, scale, requires_grad=False):
super().__init__(qtype, axis)
self._data = data
self._scale = scale
def __repr__(self):
return f'{self.__class__}({self._data}, scale={self._scale}, dtype={self.dtype})'
def dequantize(self):
return QBytesDequantizer.apply(self)
# File: optimum-quanto-main/optimum/quanto/tensor/qtensor.py
import torch
from torch.utils import _pytree as pytree
__all__ = ['QTensor', 'qfallback']
def qfallback(callable, *args, **kwargs):
(args, kwargs) = pytree.tree_map_only(QTensor, lambda x: x.dequantize(), (args, kwargs or {}))
return callable(*args, **kwargs)
class QTensor(torch.Tensor):
def __init__(self, qtype, axis):
self._qtype = qtype
self._axis = axis
def dequantize(self):
raise NotImplementedError
def save_to_state_dict(self, destination, prefix, keep_vars):
def serialize_tensor_subclass(t, destination, prefix, keep_vars):
(inner_tensors, meta) = t.__tensor_flatten__()
for name in inner_tensors:
inner_tensor = getattr(t, name)
if type(inner_tensor) is torch.Tensor:
destination[prefix + name] = inner_tensor if keep_vars else inner_tensor.detach()
else:
serialize_tensor_subclass(inner_tensor, destination, prefix + name + '.', keep_vars)
serialize_tensor_subclass(self, destination, prefix, keep_vars)
@property
def axis(self):
return self._axis
@property
def qtype(self):
return self._qtype
def numpy(self):
return self.dequantize().cpu().numpy()
def equal(self, other):
if type(self) is not type(other):
return False
(self_tensors, self_meta) = self.__tensor_flatten__()
(_, other_meta) = other.__tensor_flatten__()
for (name, value) in self_meta.items():
if other_meta[name] != value:
return False
for name in self_tensors:
self_t = getattr(self, name)
other_t = getattr(other, name)
if self_t.device.type == 'cpu' and self_t.dtype in (torch.float8_e4m3fn, torch.float8_e5m2):
if self_t.dtype != other_t.dtype:
return False
if not torch.equal(self_t.to(torch.float32), other_t.to(torch.float32)):
return False
elif not torch.equal(self_t, other_t):
return False
return True
# File: optimum-quanto-main/optimum/quanto/tensor/qtype.py
from dataclasses import dataclass
import torch
@dataclass
class qtype:
name: str
is_floating_point: bool
bits: int
dtype: torch.dtype
qmin: float
qmax: float
def __str__(self):
return f'quanto.{self.name}'
def __hash__(self):
return hash(str(self))
def qint(bits):
qmin = -2 ** (bits - 1)
qmax = 2 ** (bits - 1) - 1
return qtype(f'qint{bits}', is_floating_point=False, bits=bits, dtype=torch.int8, qmin=qmin, qmax=qmax)
qint2 = qint(2)
qint4 = qint(4)
qint8 = qint(8)
def qfloat(dtype: torch.dtype):
finfo = torch.finfo(dtype)
qmin = finfo.min
qmax = finfo.max
return qtype(f'q{finfo.dtype}', is_floating_point=True, bits=8, dtype=dtype, qmin=qmin, qmax=qmax)
qfloat8_e4m3fn = qfloat(torch.float8_e4m3fn)
qfloat8_e5m2 = qfloat(torch.float8_e5m2)
qfloat8 = qfloat8_e4m3fn
qtypes = {name: q for (name, q) in locals().items() if isinstance(q, qtype)}
__all__ = ['qtype', 'qtypes'] + [str(name) for name in qtypes.keys()]
# File: optimum-quanto-main/optimum/quanto/tensor/weights/marlin/marlin.py
import ast
import torch
from ...function import QuantizedLinearFunction
from ...qtype import qfloat8_e4m3fn, qtypes
from ..qbytes import WeightQBytesTensor
from .packed import MarlinF8PackedTensor, get_scale_perms
class MarlinF8QBytesLinearFunction(QuantizedLinearFunction):
@staticmethod
def forward(ctx, input, other, bias=None):
ctx.save_for_backward(input, other)
input_shape = input.shape
if input.ndim > 2:
input = input.view(-1, input_shape[-1])
output = torch.ops.quanto.fp8_marlin_gemm(input, b_q_weight=other._data._data, b_scales=other._scale, workspace=other._workspace, num_bits=8, size_m=input.shape[0], size_n=other._scale.shape[1], size_k=input.shape[1])
if len(input_shape) > 2:
output = output.reshape(input_shape[:-1] + (other._scale.shape[1],))
return output
class MarlinF8QBytesTensor(WeightQBytesTensor):
@staticmethod
def __new__(cls, qtype, axis, size, stride, data, scale, requires_grad=False):
assert data.device.type == 'cuda'
assert data.device == scale.device
return torch.Tensor._make_wrapper_subclass(cls, size, strides=stride, dtype=scale.dtype, device=data.device, requires_grad=requires_grad)
def __init__(self, qtype, axis, size, stride, data, scale, requires_grad=False):
assert axis == 0
assert data.ndim == 2
out_features = size[0]
self._workspace = torch.zeros(out_features // 64 * 16, dtype=torch.int, device=data.device)
if data.dtype != torch.int32:
assert scale.shape == (out_features, 1)
scale_perm_single = get_scale_perms()
scale = scale.reshape((-1, len(scale_perm_single)))[:, scale_perm_single]
scale = scale.reshape(-1, out_features).contiguous()
data_packed = MarlinF8PackedTensor.pack(data)
else:
data_packed = data
super().__init__(qtype, axis, size, stride, data_packed, scale, activation_qtype=qfloat8_e4m3fn, requires_grad=requires_grad)
def dequantize(self):
float8_data = self._data.unpack()
scale_perm_single = get_scale_perms()
scale_perm_single_rev = torch.empty_like(scale_perm_single)
scale_perm_single_rev[scale_perm_single] = torch.arange(len(scale_perm_single))
scale_reordered = self._scale.reshape((-1, len(scale_perm_single_rev)))[:, scale_perm_single_rev]
scale_reordered = scale_reordered.reshape(-1, self._scale.shape[1]).contiguous()
return float8_data.to(scale_reordered.dtype) * scale_reordered.T
def __repr__(self):
return f'MarlinF8QBytesTensor({self._data}, scale={self._scale}, dtype={self.dtype})'
def weight_qbytes_tensor(self):
data = self._data.unpack()
scale_perm_single = get_scale_perms()
scale_perm_single_rev = torch.empty_like(scale_perm_single)
scale_perm_single_rev[scale_perm_single] = torch.arange(len(scale_perm_single))
scale_reordered = self._scale.reshape((-1, len(scale_perm_single_rev)))[:, scale_perm_single_rev]
scale_reordered = scale_reordered.reshape(-1, self._scale.shape[1]).t().contiguous()
return WeightQBytesTensor(self._qtype, self._axis, self.size(), self.stride(), data, scale_reordered, self.activation_qtype)
def __tensor_flatten__(self):
inner_tensors = ['_data', '_scale']
meta = {'qtype': self._qtype.name, 'axis': str(self._axis), 'size': str(list(self.size())), 'stride': str(list(self.stride()))}
return (inner_tensors, meta)
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert len(inner_tensors) == 2
assert len(meta) == 4
(data, scale) = (inner_tensors['_data'], inner_tensors['_scale'])
qtype = qtypes[meta['qtype']]
axis = ast.literal_eval(meta['axis'])
size = ast.literal_eval(meta['size'])
stride = ast.literal_eval(meta['stride'])
return MarlinF8QBytesTensor(qtype, axis, size, stride, data, scale)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
if func is torch.nn.functional.linear:
def qlinear(input, other, bias=None):
return MarlinF8QBytesLinearFunction.apply(input, other, bias)
return qlinear(*args, **kwargs)
elif func is torch.equal:
(input, other) = args
return input.equal(other)
with torch._C.DisableTorchFunctionSubclass():
return func(*args, **kwargs)
# File: optimum-quanto-main/optimum/quanto/tensor/weights/marlin/packed.py
import ast
from copy import copy
import torch
from torch.utils import _pytree as pytree
def pack_fp8_as_int32(fp8_tensor: torch.Tensor) -> torch.Tensor:
assert fp8_tensor.dtype == torch.float8_e4m3fn
if fp8_tensor.shape[0] % 4 != 0:
raise ValueError(f'Leading tensor dimension is not divisable by 4: {fp8_tensor.shape[0]}')
reshaped = fp8_tensor.reshape(-1, 4, *fp8_tensor.shape[1:])
byte_tensor = reshaped.view(torch.uint8)
packed = torch.zeros(fp8_tensor.shape[0] // 4, fp8_tensor.shape[1], dtype=torch.int32, device=fp8_tensor.device)
for i in range(4):
packed.bitwise_or_(byte_tensor[:, i].to(torch.int32) << i * 8)
return packed
def unpack_int32_to_fp8(int32_tensor: torch.Tensor) -> torch.Tensor:
bits = 8
unpacked = []
for i in range(4):
mask = 2 ** (bits * (i + 1)) - 1
tmp = (int32_tensor & mask) >> bits * i
tmp = tmp.to(torch.uint8)
unpacked.append(tmp)
unpacked = torch.cat(unpacked).view(torch.float8_e4m3fn)
return unpacked
def get_scale_perms() -> torch.Tensor:
scale_perm_single = []
for i in range(4):
scale_perm_single.extend([2 * i + j for j in [0, 1, 8, 9, 16, 17, 24, 25]])
return torch.tensor(scale_perm_single, dtype=torch.int64)
def get_row_permutation(n_rows: int) -> torch.Tensor:
modulo = n_rows // 4 * 16 - 8
b = n_rows // 2
rows_idx = [i * 16 % modulo for i in range(b)]
rows_idx[-1] = rows_idx[-2] + 16 if b > 2 else 8
rows_idx = torch.tensor(rows_idx)
rows_idx = torch.cat((rows_idx, rows_idx + 1))
rows_idx = torch.tile(rows_idx[:, None], (1, 4))
rows_idx = rows_idx + torch.tensor([[0, 2, 4, 6]])
rows_idx = rows_idx.reshape(-1)
rows_idx_rev = torch.empty_like(rows_idx)
rows_idx_rev[rows_idx] = torch.arange(len(rows_idx))
return rows_idx_rev
def get_column_permutation(n_col: int) -> torch.Tensor:
tile_size = 256
n_blocks = n_col // tile_size
a = torch.arange(tile_size)
rest = a % 8
frac = a // 8
original_index = 32 * rest + frac
original_index = torch.arange(n_blocks)[:, None] * 256 + original_index
original_index = original_index.reshape(-1)
original_index = original_index.reshape(4 * n_blocks, 64)
tmp1 = torch.arange(4)
tmp1 = tmp1.repeat(n_blocks, 1).T.reshape(-1)
tmp2 = torch.arange(n_blocks) * 4
tmp2 = tmp2.repeat(4)
remap_col_index = tmp1 + tmp2
original_index = original_index[remap_col_index]
original_index = original_index.reshape(-1)
return original_index
class MarlinF8PackedTensor(torch.Tensor):
def __new__(cls, data, size, stride, requires_grad=False):
assert data.device.type == 'cuda'
assert data.dtype == torch.int32
assert requires_grad is False
return torch.Tensor._make_wrapper_subclass(cls, size, strides=stride, dtype=torch.int32, device=data.device, requires_grad=requires_grad)
def __init__(self, data, size, stride, requires_grad=False):
self._data = data
def __repr__(self):
return f'MarlinF8PackedTensor({self._data})'
@classmethod
def pack(cls, tensor: torch.Tensor):
(out_features, in_features) = tensor.shape
data_int32 = pack_fp8_as_int32(tensor.T)
perm = torch.empty(0, dtype=torch.int, device=tensor.device)
data_int32 = torch.ops.quanto.gptq_marlin_repack(b_q_weight=data_int32, perm=perm, size_k=in_features, size_n=out_features, num_bits=8)
return cls(data_int32, size=tensor.size(), stride=tensor.stride())
def unpack(self) -> torch.Tensor:
float8_data = unpack_int32_to_fp8(self._data)
uint8_data = float8_data.view(torch.uint8)
(n_rows, n_col) = uint8_data.shape
column_map = get_column_permutation(n_col=n_col)
uint8_data = uint8_data.T.contiguous()
uint8_data = uint8_data[column_map]
uint8_data = uint8_data.T.contiguous()
uint8_data = uint8_data.reshape(uint8_data.shape[0] * 4, -1)
row_map = get_row_permutation(n_rows=n_rows)
uint8_data = uint8_data[row_map]
float8_data = uint8_data.view(torch.float8_e4m3fn)
float8_data = float8_data.T
return float8_data
@property
def dtype(self):
return torch.int32
def __tensor_flatten__(self):
inner_tensors = ['_data']
meta = {'size': str(list(self.size())), 'stride': str(self.stride())}
return (inner_tensors, meta)
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert len(inner_tensors) == 1
assert len(meta) == 2
data = inner_tensors['_data']
size = ast.literal_eval(meta['size'])
stride = ast.literal_eval(meta['stride'])
return MarlinF8PackedTensor(data, size, stride)
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, op, types, args, kwargs=None):
if op.overloadpacket is torch.ops.aten.detach:
t = args[0]
data = op(t._data)
return cls(data, t.size(), t.stride())
elif op.overloadpacket in (torch.ops.aten._to_copy, torch.ops.aten.to):
t = args[0]
dtype = kwargs.get('dtype', torch.uint8)
if dtype != torch.float8_e4m3fn:
raise ValueError(f'MarlinF8PackedTensor are torch.int32 only and cannot be moved to {dtype}.')
device = kwargs.get('device', t.device)
if device.type == 'cuda':
data_kwargs = copy(kwargs)
data_kwargs['dtype'] = t._data.dtype
data = op(t._data, **data_kwargs)
return cls(data, t.size(), t.stride())
else:
(args, kwargs) = pytree.tree_map_only(cls, lambda x: x.unpack(), (args, kwargs or {}))
return op(*args, **kwargs)
# File: optimum-quanto-main/optimum/quanto/tensor/weights/qbytes.py
import ast
from typing import Optional
import torch
from torch.autograd import Function
from ..function import QuantizedLinearFunction
from ..qbytes import QBytesTensor
from ..qtensor import qfallback
from ..qtype import qtype, qtypes
__all__ = ['WeightQBytesTensor']
class WeightQBytesQuantizer(Function):
@staticmethod
def forward(ctx, base: torch.Tensor, qtype: qtype, axis: int, scale: torch.Tensor, activation_qtype: Optional[qtype]) -> torch.Tensor:
if qtype.bits != 8:
raise ValueError('QBytesTensor can only be of 8-bit qtype')
data = torch.ops.quanto.quantize_symmetric(base, dtype=qtype.dtype, axis=axis, scale=scale)
return WeightQBytesTensor.create(qtype, axis, size=base.size(), stride=base.stride(), data=data, scale=scale, activation_qtype=activation_qtype)
@staticmethod
def backward(ctx, gO):
return (gO, None, None, None, None, None)
class WeightQBytesLinearFunction(QuantizedLinearFunction):
@staticmethod
def forward(ctx, input, other, bias=None):
ctx.save_for_backward(input, other)
if isinstance(input, QBytesTensor):
output = torch.ops.quanto.qbytes_mm(input._data, other._data, input._scale * other._scale)
else:
output = torch.ops.quanto.qbytes_mm(input, other._data, other._scale)
if bias is not None:
output = output + bias
return output
class WeightQBytesTensor(QBytesTensor):
@staticmethod
def create(qtype, axis, size, stride, data, scale, activation_qtype: Optional[qtype]=None, requires_grad=False):
from .marlin import MarlinF8QBytesTensor
if qtype == qtypes['qfloat8_e4m3fn'] and activation_qtype is None and (scale.dtype in [torch.float16, torch.bfloat16]) and (len(size) == 2) and (data.device.type == 'cuda') and (axis == 0) and (torch.cuda.get_device_capability(data.device)[0] >= 8):
if data.dtype == torch.int32 or (data.shape[0] % 64 == 0 and data.shape[1] % 16 == 0):
return MarlinF8QBytesTensor(qtype, axis, size, stride, data, scale, requires_grad)
return WeightQBytesTensor(qtype, axis, size, stride, data, scale, activation_qtype, requires_grad)
@staticmethod
def __new__(cls, qtype, axis, size, stride, data, scale, activation_qtype, requires_grad=False):
assert data.device == scale.device
return torch.Tensor._make_wrapper_subclass(cls, size, strides=stride, dtype=scale.dtype, device=data.device, requires_grad=requires_grad)
def __init__(self, qtype, axis, size, stride, data, scale, activation_qtype, requires_grad=False):
super().__init__(qtype, axis, size, stride, data, scale, requires_grad=requires_grad)
self.activation_qtype = activation_qtype
@classmethod
def quantize(cls, base: torch.Tensor, qtype: qtype, axis: int, scale: torch.Tensor, activation_qtype: Optional[qtype]=None) -> torch.Tensor:
return WeightQBytesQuantizer.apply(base, qtype, axis, scale, activation_qtype)
@staticmethod
def load_from_state_dict(state_dict, prefix, qtype, axis, size, stride, activation_qtype, missing_keys):
inner_tensors_dict = {}
missing = False
for name in ['_data', '_scale']:
if prefix + name not in state_dict:
missing_keys.append(prefix + name)
missing = True
else:
inner_tensors_dict[name] = state_dict.pop(prefix + name)
if missing:
return None
meta = {'qtype': qtype.name, 'axis': str(axis), 'size': str(list(size)), 'stride': str(list(stride)), 'activation_qtype': 'none' if activation_qtype is None else activation_qtype.name}
return WeightQBytesTensor.__tensor_unflatten__(inner_tensors_dict, meta, None, None)
def optimize(self):
if type(self) is not WeightQBytesTensor:
return self
return WeightQBytesTensor.create(self.qtype, self.axis, self.size(), self.stride(), self._data, self._scale, self.activation_qtype, self.requires_grad)
def save_to_state_dict(self, destination, prefix, keep_vars):
if type(self) is WeightQBytesTensor:
super().save_to_state_dict(destination, prefix, keep_vars)
else:
self.weight_qbytes_tensor().save_to_state_dict(destination, prefix, keep_vars)
def weight_qbytes_tensor(self):
raise NotImplementedError
def __tensor_flatten__(self):
inner_tensors = ['_data', '_scale']
meta = {'qtype': self._qtype.name, 'axis': str(self._axis), 'size': str(list(self.size())), 'stride': str(list(self.stride())), 'activation_qtype': 'none' if self.activation_qtype is None else self.activation_qtype.name}
return (inner_tensors, meta)
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert len(inner_tensors) == 2
assert len(meta) == 5
(data, scale) = (inner_tensors['_data'], inner_tensors['_scale'])
qtype = qtypes[meta['qtype']]
axis = ast.literal_eval(meta['axis'])
size = ast.literal_eval(meta['size'])
stride = ast.literal_eval(meta['stride'])
activation_qtype = None if meta['activation_qtype'] == 'none' else qtypes[meta['activation_qtype']]
return WeightQBytesTensor(qtype, axis, size, stride, data, scale, activation_qtype)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
if func is torch.nn.functional.linear:
def qlinear(input, other, bias=None):
return WeightQBytesLinearFunction.apply(input, other, bias)
return qlinear(*args, **kwargs)
elif func is torch.equal:
(input, other) = args
return input.equal(other)
with torch._C.DisableTorchFunctionSubclass():
return func(*args, **kwargs)
@classmethod
def __torch_dispatch__(cls, op, types, args, kwargs=None):
op = op.overloadpacket
if op is torch.ops.aten.detach:
t = args[0]
(inner_tensor_names, meta) = t.__tensor_flatten__()
detached_tensors = {}
for inner_name in inner_tensor_names:
detached_tensors[inner_name] = op(getattr(t, inner_name))
return cls.__tensor_unflatten__(detached_tensors, meta, t.size(), t.stride())
elif op in [torch.ops.aten._to_copy, torch.ops.aten.to]:
t = args[0]
dtype = kwargs.pop('dtype', t.dtype)
device = kwargs.pop('device', t.device)
if dtype != t.dtype:
raise ValueError('The dtype of a weights Tensor cannot be changed')
if type(t) is not WeightQBytesTensor and t.device.type != device.type:
t = t.weight_qbytes_tensor()
out_data = op(t._data, device=device, **kwargs)
out_scale = op(t._scale, device=device, **kwargs)
return WeightQBytesTensor.create(t.qtype, t.axis, t.size(), t.stride(), out_data, out_scale, activation_qtype=t.activation_qtype, requires_grad=t.requires_grad)
elif op is torch.ops.aten.t and cls is WeightQBytesTensor:
t = args[0]
out_data = op(t._data)
out_scale = t._scale
out_axis = t.axis
(dim0, dim1) = t.size()
out_size = torch.Size([dim1, dim0])
out_stride = t.stride()[::-1]
if t.axis is not None:
out_scale = op(out_scale)
out_axis = 0 if out_axis == -1 else -1
return WeightQBytesTensor(t.qtype, out_axis, out_size, out_stride, out_data, out_scale, t.activation_qtype)
kwargs = kwargs or {}
return qfallback(op, *args, **kwargs)
# File: optimum-quanto-main/optimum/quanto/tensor/weights/quantization.py
from typing import Optional
import torch
from ..optimizers import AbsmaxOptimizer, AffineOptimizer, MaxOptimizer, Optimizer, SymmetricOptimizer
from ..qbits import QBitsTensor
from ..qtype import qtype
from .qbytes import WeightQBytesTensor
__all__ = ['quantize_weight']
default_affine_optimizer = MaxOptimizer()
default_symmetric_optimizer = AbsmaxOptimizer()
def quantize_weight(t: torch.Tensor, qtype: qtype, axis: int, group_size: Optional[int]=None, optimizer: Optional[Optimizer]=None, zeropoint: bool=False, activation_qtype: Optional[qtype]=None):
if axis not in (0, -1):
raise ValueError('axis parameter must be 0 (first axis) or -1 (last axis)')
if qtype.bits == 8:
if optimizer is None:
optimizer = default_symmetric_optimizer
elif not isinstance(optimizer, SymmetricOptimizer):
raise ValueError('A SymmetricOptimizer is expected')
if group_size is not None:
raise ValueError('group_size cannot be specified for 8-bit qtypes.')
if axis is not None and t.shape[axis] == 1:
axis = None
scale = optimizer(t, qtype.qmax, axis)
return WeightQBytesTensor.quantize(t, qtype, axis, scale, activation_qtype)
if optimizer is None:
optimizer = default_affine_optimizer
elif not isinstance(optimizer, AffineOptimizer):
raise ValueError('An AffineOptimizer is expected')
(scale, shift) = optimizer(t, qtype.bits, axis, group_size)
if zeropoint:
shift = torch.clamp(torch.round(shift / scale), 0, 2 ** qtype.bits - 1).to(torch.uint8)
return QBitsTensor.quantize(t, qtype, axis, group_size, scale, shift)