docs / huggingface_optimum-nvidia.txt
danidarko's picture
Upload 59 files
b1d4de0 verified
# File: optimum-nvidia-main/src/optimum/commands/env.py
import platform
import subprocess
import huggingface_hub
from tensorrt import __version__ as trt_version
from tensorrt_llm import __version__ as trtllm_version
from transformers import __version__ as transformers_version
from transformers.utils import is_torch_available
from optimum.commands import BaseOptimumCLICommand, CommandInfo
from optimum.version import __version__ as optimum_version
class EnvironmentCommand(BaseOptimumCLICommand):
COMMAND = CommandInfo(name='env', help='Get information about the environment used.')
@staticmethod
def print_apt_pkgs():
apt = subprocess.Popen(['apt', 'list', '--installed'], stdout=subprocess.PIPE)
grep = subprocess.Popen(['grep', 'cuda'], stdin=apt.stdout, stdout=subprocess.PIPE)
pkgs_list = list(grep.stdout)
for pkg in pkgs_list:
print(pkg.decode('utf-8').split('\n')[0])
def run(self):
pt_version = 'not installed'
if is_torch_available():
import torch
pt_version = torch.__version__
platform_info = {'Platform': platform.platform(), 'Python version': platform.python_version()}
info = {'`tensorrt` version': trt_version, '`tensorrt-llm` version': trtllm_version, '`optimum` version': optimum_version, '`transformers` version': transformers_version, '`huggingface_hub` version': huggingface_hub.__version__, '`torch` version': f'{pt_version}'}
print('\nCopy-and-paste the text below in your GitHub issue:\n')
print('\nPlatform:\n')
print(self.format_dict(platform_info))
print('\nPython packages:\n')
print(self.format_dict(info))
print('\nCUDA system packages:\n')
self.print_apt_pkgs()
# File: optimum-nvidia-main/src/optimum/nvidia/compression/modelopt.py
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Iterable, Optional, Protocol, Union, runtime_checkable
import modelopt.torch.quantization as mtq
import modelopt.torch.sparsity as mts
import torch
from modelopt.torch.export import export_tensorrt_llm_checkpoint
from transformers.quantizers import HfQuantizer
from transformers.utils.quantization_config import QuantizationConfigMixin
from optimum.nvidia.compression import CompressionRecipe
if TYPE_CHECKING:
from modelopt.torch.quantization import QuantizeConfig
from transformers import PreTrainedModel as TransformersPreTrainedModel
from optimum.nvidia.export import Workspace
@runtime_checkable
class IntoModelOptQuantizeConfig(Protocol):
def into_modelopt_qconfig(self) -> 'QuantizeConfig':
...
class ModelOptConfig(QuantizationConfigMixin):
def __init__(self, qconfig: Union['QuantizeConfig', 'IntoModelOptQuantizeConfig'], sparsity: Optional[Union[mts.mode.SparseGPTConfig, mts.mode.SparseMagnitudeConfig]]=None):
self._qconfig = qconfig.into_modelopt_qconfig() if isinstance(qconfig, IntoModelOptQuantizeConfig) else qconfig
self._sparsity = sparsity
@property
def quant_method(self):
return self._qconfig.algorithm
@property
def qconfig(self) -> 'QuantizeConfig':
return self._qconfig
@property
def sparsity(self) -> Optional[str]:
return self._sparsity
class ModelOptRecipe(CompressionRecipe[ModelOptConfig], ABC):
@property
@abstractmethod
def config(self) -> ModelOptConfig:
raise NotImplementedError()
@property
@abstractmethod
def dataset(self) -> Iterable:
raise NotImplementedError()
class ModelOptQuantizer(HfQuantizer):
def __init__(self, recipe: ModelOptRecipe):
super().__init__(recipe.config)
self._recipe = recipe
def _looper(self, model: 'TransformersPreTrainedModel'):
for sample in self._recipe.dataset:
_ = model(**sample)
def _process_model_before_weight_loading(self, model, **kwargs):
return model
def _process_model_after_weight_loading(self, model, **kwargs):
if 'workspace' not in kwargs:
raise KeyError('workspace not provided but required to generate quantized model representation')
workspace: 'Workspace' = kwargs.pop('workspace')
with torch.inference_mode():
if (sconfig := self._recipe.config.sparsity):
device = model.device
model = mts.sparsify(model.cpu(), sconfig, {'data_loader': self._recipe.dataset, 'collect_func': lambda x: x})
model = mts.export(model)
model.to(device)
qmodel = mtq.quantize(model, vars(self._recipe.config.qconfig), forward_loop=self._looper)
export_tensorrt_llm_checkpoint(qmodel, decoder_type=model.config.model_type, dtype=model.dtype, export_dir=workspace.checkpoints_path, inference_tensor_parallel=1, inference_pipeline_parallel=1, use_nfs_workspace=False, naive_fp8_quantization=False)
return qmodel
@property
def is_serializable(self):
return True
@property
def is_trainable(self):
return True
# File: optimum-nvidia-main/src/optimum/nvidia/errors.py
from typing import Optional
from optimum.nvidia.utils.nvml import SM_FP8_SUPPORTED
class OptimumNvidiaException(Exception):
def __init__(self, msg: str, operation: Optional[str]=None):
if operation:
super().__init__(f'[{operation}] {msg}.')
else:
super().__init__(f'{msg}')
class UnsupportedModelException(OptimumNvidiaException):
def __init__(self, model_type: str):
super().__init__(f'Model of type {model_type} is not supported. Please open-up an issue at https://github.com/huggingface/optimum-nvidia/issues')
class UnsupportedHardwareFeature(OptimumNvidiaException):
def __init__(self, msg, feature: str):
super(msg)
@classmethod
def float8(cls) -> 'UnsupportedHardwareFeature':
return Float8NotSupported()
class Float8NotSupported(UnsupportedHardwareFeature):
def __init__(self):
super().__init__(f'float8 is not supported on your device. Please use a device with compute capabilities {SM_FP8_SUPPORTED}', 'float8')
# File: optimum-nvidia-main/src/optimum/nvidia/export/cli.py
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from argparse import ArgumentParser
def common_trtllm_export_args(parser: 'ArgumentParser'):
parser.add_argument('model', type=str, help='Model to export.')
required_group = parser.add_argument_group('Required arguments')
required_group.add_argument('--max-input-length', type=int, default=-1, help='Maximum sequence length, in number of tokens, the prompt can be. The maximum number of potential tokens generated will be <max-output-length> - <max-input-length>.')
required_group.add_argument('--max-output-length', type=int, default=-1, help='Maximum sequence length, in number of tokens, the model supports.')
optional_group = parser.add_argument_group('Optional arguments')
optional_group.add_argument('-d', '--dtype', type=str, default='auto', help="Computational data type used for the model. Default to 'auto' matching model's data type.")
optional_group.add_argument('--max-batch-size', type=int, default=1, help='Maximum number of concurrent requests the model can process. Default to 1.')
optional_group.add_argument('--max-beams-width', type=int, default=1, help='Maximum number of sampling paths ("beam") to evaluate when decoding new a token. Default to 1.')
optional_group.add_argument('-q', '--quantization', type=str, help='Path to a quantization recipe file.')
optional_group.add_argument('--destination', type=str, default=None, help='Folder where the resulting exported engines will be stored. Default to Hugging Face Hub cache.')
optional_group.add_argument('--push-to-hub', type=str, help='Repository to push generated engines to.')
# File: optimum-nvidia-main/src/optimum/nvidia/export/config.py
from dataclasses import dataclass
from logging import getLogger
from os import PathLike
from typing import TYPE_CHECKING, Optional, Union
from warnings import warn
from tensorrt_llm import BuildConfig
from tensorrt_llm import Mapping as ShardingInfo
from tensorrt_llm.bindings import QuantMode
from tensorrt_llm.plugin import PluginConfig
from tensorrt_llm.plugin.plugin import ContextFMHAType
from transformers import AutoConfig
from optimum.nvidia.lang import DataType
from optimum.utils import NormalizedConfig
if TYPE_CHECKING:
from transformers import PretrainedConfig
INFER_NUM_LOCAL_GPUS = -1
LOGGER = getLogger()
@dataclass
class ExportConfig:
dtype: str
max_input_len: int
max_output_len: int
max_batch_size: int
max_beam_width: int = 1
max_num_tokens: int = -1
enabled_chunked_context: int = False
sharding: Optional[ShardingInfo] = None
optimization_level: int = 3
def __post_init__(self):
if self.max_batch_size < 1:
raise ValueError(f'max_batch_size should >= 1, got {self.max_batch_size}')
@staticmethod
def from_pretrained(model_id_or_path: Union[str, PathLike], max_batch_size: int=1) -> 'ExportConfig':
return ExportConfig.from_config(AutoConfig.from_pretrained(model_id_or_path), max_batch_size)
@staticmethod
def from_config(config: Union[NormalizedConfig, 'PretrainedConfig'], max_batch_size: int=1) -> 'ExportConfig':
if not isinstance(config, NormalizedConfig):
config = NormalizedConfig(config)
dtype = DataType.from_torch(config.torch_dtype).value
max_input_len = config.max_position_embeddings
max_output_len = config.max_position_embeddings
econfig = ExportConfig(dtype=dtype, max_input_len=max_input_len, max_output_len=max_output_len, max_batch_size=max_batch_size)
econfig.with_sharding()
econfig.validate()
return econfig
def validate(self) -> 'ExportConfig':
if self.optimization_level < 0:
raise ValueError(f'optimization_level should be >= 0, got {self.optimization_level}')
if self.max_num_tokens == -1:
if self.enabled_chunked_context:
self.max_num_tokens = 128
warn(f'max_num_tokens set to {self.max_num_tokens} with chunked context enabled might not be optimal.')
else:
self.max_num_tokens = 2 * self.max_input_len
LOGGER.debug(f'Inferred max_num_tokens={self.max_num_tokens}')
return self
@property
def plugin_config(self) -> 'PluginConfig':
config = PluginConfig()
config.gemm_plugin = 'auto'
config.gpt_attention_plugin = 'auto'
config.set_context_fmha(ContextFMHAType.enabled)
if self.sharding.world_size > 1:
config.lookup_plugin = 'auto'
config.set_nccl_plugin()
if DataType(self.dtype) == DataType.FLOAT8:
config.gemm_swiglu_plugin = True
return config
def to_builder_config(self, qmode: Optional['QuantMode']=None, plugin_config: Optional[PluginConfig]=None) -> 'BuildConfig':
self.validate()
plugin_config = plugin_config or self.plugin_config
if qmode:
plugin_config.use_fp8_context_fmha = qmode.has_fp8_qdq() or qmode.has_fp8_kv_cache()
if qmode.is_weight_only():
plugin_config.weight_only_groupwise_quant_matmul_plugin = 'auto'
weight_sparsity = False
else:
weight_sparsity = False
return BuildConfig(max_input_len=self.max_input_len, max_seq_len=self.max_output_len, max_batch_size=self.max_batch_size, max_beam_width=self.max_beam_width, max_num_tokens=self.max_num_tokens, builder_opt=self.optimization_level, plugin_config=plugin_config, use_fused_mlp=True, weight_sparsity=weight_sparsity)
def with_sharding(self, tp: int=1, pp: int=1, gpus_per_node: int=8, sharding: Optional[ShardingInfo]=None) -> 'ExportConfig':
self.sharding = sharding or ShardingInfo(tp_size=tp, pp_size=pp, world_size=tp * pp, gpus_per_node=gpus_per_node)
return self
def auto_parallel(config: 'ExportConfig', world_size: int=INFER_NUM_LOCAL_GPUS) -> 'ExportConfig':
if world_size < 1:
from optimum.nvidia.utils.nvml import get_device_count
world_size = get_device_count()
LOGGER.info(f'Found {world_size} GPUs on the system')
if world_size == 0:
raise ValueError('No GPU found')
elif world_size == 1:
return config.with_sharding(tp=1, pp=1, gpus_per_node=world_size)
else:
LOGGER.info(f'Creating auto-parallelization strategy on {world_size}-GPUs')
LOGGER.warning('Auto-parallelization strategy is currently in beta and might not be optimal')
if world_size == 2:
return config.with_sharding(tp=2, pp=1, gpus_per_node=world_size)
elif world_size == 4:
return config.with_sharding(tp=2, pp=2, gpus_per_node=world_size)
elif world_size == 8:
return config.with_sharding(tp=4, pp=2, gpus_per_node=world_size)
else:
raise ValueError(f'Unsupported number of GPUs: {world_size}. Please open-up and issue on the optimum-nvidia repository: https://github.com/huggingface/optimum-nvidia')
def sharded(config: 'ExportConfig', tp: int=1, pp: int=1) -> 'ExportConfig':
if tp < 1:
raise ValueError(f'Tensor Parallelism (tp) should be >= 1 (got: tp={tp})')
if pp < 1:
raise ValueError(f'Pipeline Parallelism (pp) should be >= 1 (got: pp={pp})')
return config.with_sharding(sharding=ShardingInfo(tp_size=tp, pp_size=pp, world_size=tp * pp))
# File: optimum-nvidia-main/src/optimum/nvidia/export/converter.py
import shutil
from abc import ABC
from enum import Enum
from logging import getLogger
from os import PathLike
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Sequence, Type, Union
from tensorrt_llm.builder import build
from optimum.nvidia.compression.modelopt import ModelOptQuantizer
from optimum.nvidia.export import Workspace
from optimum.nvidia.utils.nvml import get_device_name, is_post_ampere
if TYPE_CHECKING:
from tensorrt_llm import BuildConfig, Mapping
from tensorrt_llm.models import PretrainedModel
from transformers import PreTrainedModel as TransformersPreTrainedModel
from optimum.nvidia.compression.modelopt import ModelOptRecipe
LOGGER = getLogger()
def infer_plugin_from_build_config(config: 'BuildConfig') -> 'BuildConfig':
if is_post_ampere():
LOGGER.debug('Enabling Paged Context FMHA plugin')
config.plugin_config.update_from_dict({'use_paged_context_fmha': True})
config.plugin_config.update_from_dict({'enable_xqa': False})
return config
class TensorRTArtifactKind(Enum):
CHECKPOINTS = 'checkpoints'
ENGINES = 'engines'
class TensorRTArtifact:
@staticmethod
def checkpoints(root: Union[str, PathLike]) -> 'TensorRTArtifact':
return TensorRTArtifact(TensorRTArtifactKind.CHECKPOINTS, root)
@staticmethod
def engines(root: Union[str, PathLike]) -> 'TensorRTArtifact':
return TensorRTArtifact(TensorRTArtifactKind.ENGINES, root)
def __init__(self, kind: TensorRTArtifactKind, root: Union[str, PathLike]):
self._kind = kind
self._root = root
@property
def kind(self) -> TensorRTArtifactKind:
return self._kind
@property
def root(self) -> Path:
return Path(self._root)
def push_to_hub(self):
raise NotImplementedError()
class TensorRTModelConverter(ABC):
CONFIG_CLASS: Type
MODEL_CLASS: Type
def __init__(self, model_id: str, subpart: str='', workspace: Optional[Union['Workspace', str, bytes, Path]]=None, license_path: Optional[Union[str, bytes, Path]]=None):
LOGGER.info(f'Creating a model converter for {subpart}')
if not workspace:
target_device = get_device_name(0)[-1]
workspace = Workspace.from_hub_cache(model_id, target_device, subpart=subpart)
if isinstance(workspace, (str, bytes, Path)):
workspace = Workspace(Path(workspace))
LOGGER.debug(f'Initializing model converter workspace at {workspace.root}')
self._workspace = workspace
self._license_path = license_path
@property
def workspace(self) -> Workspace:
return self._workspace
def save_license(self, licence_filename: str='LICENSE'):
if not (dst_licence_file_path := (self.workspace.root / licence_filename)).exists() and self._license_path:
shutil.copyfile(self._license_path, dst_licence_file_path)
def quantize(self, model: 'TransformersPreTrainedModel', qconfig: 'ModelOptRecipe') -> TensorRTArtifact:
quantizer = ModelOptQuantizer(qconfig)
quantizer.preprocess_model(model, workspace=self.workspace)
quantizer.postprocess_model(model, workspace=self.workspace)
self.save_license()
return TensorRTArtifact.checkpoints(self._workspace.checkpoints_path)
def convert(self, models: Union['PretrainedModel', Sequence['PretrainedModel']], mapping: Optional['Mapping']=None) -> TensorRTArtifact:
if isinstance(models, PretrainedModel):
models = [models]
for (rank, model) in enumerate(models):
LOGGER.info(f'Converting {models[0].config.architecture} model for rank {rank} to TRTLLM')
model.save_checkpoint(str(self._workspace.checkpoints_path))
self.save_license()
return TensorRTArtifact.checkpoints(str(self._workspace.checkpoints_path))
def build(self, models: Union['PretrainedModel', Sequence['PretrainedModel']], config: 'BuildConfig') -> TensorRTArtifact:
if not isinstance(models, Sequence):
models = [models]
config = infer_plugin_from_build_config(config)
for model in models:
LOGGER.info(f'Building TRTLLM engine for rank {model.config.mapping.rank} ->> {config.to_dict()}')
engine = build(model, config)
engine.save(str(self._workspace.engines_path))
self.save_license()
return TensorRTArtifact.engines(str(self._workspace.engines_path))
# File: optimum-nvidia-main/src/optimum/nvidia/export/workspace.py
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable, Optional
from huggingface_hub import cached_assets_path
from tensorrt_llm import __version__ as TRTLLM_VERSION
from optimum.nvidia import LIBRARY_NAME
from optimum.nvidia.export import PATH_FILE_CHECKPOINTS, PATH_FILE_ENGINES, PATH_FOLDER_CHECKPOINTS, PATH_FOLDER_ENGINES
@dataclass
class Workspace:
root: Path
@staticmethod
def from_hub_cache(model_id: str, device: str, namespace: str=LIBRARY_NAME, version: str=TRTLLM_VERSION, subpart: Optional[str]=None) -> 'Workspace':
assets_path = cached_assets_path(namespace, namespace=version, subfolder=model_id)
assets_path = assets_path.joinpath(device)
if subpart:
assets_path = assets_path.joinpath(subpart)
assets_path.mkdir(exist_ok=True, parents=True)
return Workspace(assets_path)
def __post_init__(self):
if not self.checkpoints_path.exists():
self.checkpoints_path.mkdir(parents=True)
if not self.engines_path.exists():
self.engines_path.mkdir(parents=True)
@property
def checkpoints_path(self) -> Path:
return self.root / PATH_FOLDER_CHECKPOINTS
@property
def engines_path(self) -> Path:
return self.root / PATH_FOLDER_ENGINES
@property
def checkpoints(self) -> Iterable[Path]:
return self.checkpoints_path.glob(PATH_FILE_CHECKPOINTS)
def engines(self) -> Iterable[Path]:
return self.engines_path.glob(PATH_FILE_ENGINES)
# File: optimum-nvidia-main/src/optimum/nvidia/generation/logits_process.py
import torch
from transformers import ForceTokensLogitsProcessor, SuppressTokensAtBeginLogitsProcessor, SuppressTokensLogitsProcessor
from transformers.generation.logits_process import WhisperNoSpeechDetection
class TrtSuppressTokensLogitsProcessor(SuppressTokensLogitsProcessor):
def __call__(self, step: int, input_ids: torch.Tensor, scores: torch.Tensor):
scores = super().__call__(input_ids, scores)
return scores
class TrtSuppressTokensAtBeginLogitsProcessor(SuppressTokensAtBeginLogitsProcessor):
def __call__(self, step: int, input_ids: torch.Tensor, scores: torch.Tensor):
scores = super().__call__(input_ids, scores)
return scores
class TrtForceTokensLogitsProcessor(ForceTokensLogitsProcessor):
def __call__(self, step: int, input_ids: torch.Tensor, scores: torch.Tensor):
scores = super().__call__(input_ids, scores)
return scores
class TrtWhisperNoSpeechDetection(WhisperNoSpeechDetection):
def __call__(self, step: int, input_ids: torch.Tensor, scores: torch.Tensor):
scores = super().__call__(input_ids, scores)
return scores
LOGITS_PROCESSOR_MAP = {SuppressTokensLogitsProcessor: TrtSuppressTokensLogitsProcessor, SuppressTokensAtBeginLogitsProcessor: TrtSuppressTokensAtBeginLogitsProcessor, ForceTokensLogitsProcessor: TrtForceTokensLogitsProcessor, WhisperNoSpeechDetection: TrtWhisperNoSpeechDetection}
# File: optimum-nvidia-main/src/optimum/nvidia/hub.py
import re
from abc import ABCMeta, abstractmethod
from logging import getLogger
from os import PathLike, scandir, symlink
from pathlib import Path
from shutil import copyfile, copytree
from typing import Dict, Generator, Iterable, List, Mapping, Optional, Type, Union
import torch.cuda
from huggingface_hub import ModelHubMixin, snapshot_download
from huggingface_hub.hub_mixin import T
from tensorrt_llm import __version__ as trtllm_version
from tensorrt_llm.models import PretrainedConfig
from tensorrt_llm.models import PretrainedModel as TrtLlmPreTrainedModel
from transformers import AutoConfig, GenerationConfig
from transformers import PretrainedConfig as TransformersPretraineConfig
from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, SAFE_WEIGHTS_INDEX_NAME
from optimum.nvidia import LIBRARY_NAME
from optimum.nvidia.compression.modelopt import ModelOptRecipe
from optimum.nvidia.export import PATH_FOLDER_ENGINES, ExportConfig, TensorRTModelConverter, Workspace, auto_parallel
from optimum.nvidia.lang import DataType
from optimum.nvidia.models import SupportsFromHuggingFace, SupportsTransformersConversion
from optimum.nvidia.models.base import SupportFromTrtLlmCheckpoint
from optimum.nvidia.utils import get_user_agent
from optimum.nvidia.utils.nvml import get_device_count, get_device_name
from optimum.utils import NormalizedConfig
ATTR_TRTLLM_ENGINE_FOLDER = '__trtllm_engine_folder__'
FILE_TRTLLM_ENGINE_PATTERN = 'rank[0-9]*.engine'
FILE_TRTLLM_CHECKPOINT_PATTERN = 'rank[0-9]*.engine'
FILE_LICENSE_NAME = 'LICENSE'
HUB_SNAPSHOT_ALLOW_PATTERNS = [CONFIG_NAME, GENERATION_CONFIG_NAME, SAFE_WEIGHTS_INDEX_NAME, '*.safetensors', FILE_LICENSE_NAME]
LOGGER = getLogger()
def folder_list_engines(folder: Path) -> Iterable[Path]:
if folder.exists():
return list(folder.glob('*.engine'))
return []
def folder_list_checkpoints(folder: Path) -> Iterable[Path]:
checkpoint_candidates = []
if folder.exists():
re_checkpoint_filename = re.compile('rank[0-9]+\\.safetensors')
checkpoint_candidates = list(map(Path, filter(lambda item: re_checkpoint_filename.match(item.name), scandir(folder))))
return checkpoint_candidates
def get_rank_from_filename(filename: str) -> int:
name = filename.split('.')[0]
if name.startswith('rank'):
return int(name[3:])
else:
raise ValueError(f'Unknown filename format {filename} to extract rank from')
def get_trtllm_artifact(model_id: str, patterns: List[str], add_default_allow_patterns: bool=True) -> Path:
if (local_path := Path(model_id)).exists():
return local_path
return Path(snapshot_download(repo_id=model_id, repo_type='model', library_name=LIBRARY_NAME, library_version=trtllm_version, user_agent=get_user_agent(), allow_patterns=patterns + HUB_SNAPSHOT_ALLOW_PATTERNS if add_default_allow_patterns else patterns))
def get_trtllm_checkpoints(model_id: str, device: str, dtype: str):
if (workspace := Workspace.from_hub_cache(model_id, device)).checkpoints_path.exists():
return workspace.checkpoints_path
return get_trtllm_artifact(model_id, [f'{device}/{dtype}/**/*.safetensors'])
def get_trtllm_engines(model_id: str, device: str, dtype: str):
if (workspace := Workspace.from_hub_cache(model_id, device)).engines_path.exists():
return workspace.engines_path
return get_trtllm_artifact(model_id, [f'{device}/{dtype}/**/{PATH_FOLDER_ENGINES}/*.engine'])
def from_ranked_checkpoints(checkpoints_folder: Path, target_class: Type[SupportFromTrtLlmCheckpoint]) -> Generator['TrtLlmPreTrainedModel', None, None]:
root = str(checkpoints_folder)
trtllm_config = PretrainedConfig.from_checkpoint(root)
for rank in range(trtllm_config.mapping.world_size):
yield target_class.from_checkpoint(root, rank, trtllm_config)
def from_ranked_hf_model(local_hf_model_path: Path, config: 'TransformersPretraineConfig', target_class: Type['TrtLlmPreTrainedModel'], export_config: 'ExportConfig'):
root = str(local_hf_model_path)
for rank in range(export_config.sharding.world_size):
export_config.sharding.rank = rank
ranked_model = target_class.from_hugging_face(root, dtype=DataType.from_torch(config.torch_dtype).value, mapping=export_config.sharding, load_by_shard=True, use_parallel_embedding=export_config.sharding.world_size > 1, share_embedding_table=config.tie_word_embeddings)
ranked_model.config.mapping.rank = rank
yield ranked_model
class HuggingFaceHubModel(ModelHubMixin, library_name=LIBRARY_NAME, languages=['python', 'c++'], tags=['optimum-nvidia', 'trtllm'], repo_url='https://github.com/huggingface/optimum-nvidia', docs_url='https://huggingface.co/docs/optimum/nvidia_overview', metaclass=ABCMeta):
def __init__(self, engines_path: Union[str, PathLike, Path]):
self._engines_path = Path(engines_path)
@classmethod
def _from_pretrained(cls: Type[T], *, model_id: str, config: Dict, revision: Optional[str], cache_dir: Optional[Union[str, Path]], force_download: bool, proxies: Optional[Dict], resume_download: bool, local_files_only: bool, token: Optional[Union[str, bool]], use_cuda_graph: bool=False, device_map: Optional[str]=None, export_config: Optional[ExportConfig]=None, quantization_config: Optional[ModelOptRecipe]=None, force_export: bool=False, export_only: bool=False, save_intermediate_checkpoints: bool=False) -> T:
if get_device_count() < 1:
raise ValueError('No GPU detected on this platform')
device_name = get_device_name(0)[-1]
if 'torch_dtype' in config:
dtype = config['torch_dtype']
elif 'pretrained_config' in config and 'dtype' in config['pretrained_config']:
dtype = config['pretrained_config']['dtype']
else:
raise RuntimeError("Failed to detect model's dtype")
local_model_id = Path(model_id)
engines_folder = checkpoints_folder = None
engine_files = checkpoint_files = []
if local_model_id.exists() and local_model_id.is_dir():
if any((engine_files := list(folder_list_engines(local_model_id)))):
engines_folder = engine_files[0].parent
checkpoints_folder = None
else:
checkpoint_files = list(folder_list_checkpoints(local_model_id))
if checkpoint_files:
checkpoints_folder = checkpoint_files[0].parent
else:
if not force_export:
LOGGER.debug(f'Retrieving prebuild engine(s) for device {device_name}')
engines_folder = get_trtllm_engines(model_id, device_name, dtype)
engine_files = folder_list_engines(engines_folder)
if not engine_files:
LOGGER.debug(f'Retrieving checkpoint(s) for {device_name}')
checkpoints_folder = get_trtllm_checkpoints(model_id, device_name, dtype)
checkpoint_files = folder_list_checkpoints(checkpoints_folder)
if not engine_files:
LOGGER.info(f'No prebuild engines nor checkpoint were found for {model_id}')
if local_model_id.is_dir():
LOGGER.debug(f'Retrieving model from local folder: {local_model_id}')
original_checkpoints_path_for_conversion = local_model_id
workspace = Workspace(local_model_id)
else:
LOGGER.debug(f'Retrieving model from snapshot {model_id} on the Hugging Face Hub')
original_checkpoints_path_for_conversion = snapshot_download(model_id, repo_type='model', revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, allow_patterns=HUB_SNAPSHOT_ALLOW_PATTERNS)
workspace = None
config = NormalizedConfig(AutoConfig.for_model(**config))
generation_config = GenerationConfig.from_pretrained(original_checkpoints_path_for_conversion)
if FILE_LICENSE_NAME in original_checkpoints_path_for_conversion:
licence_path = original_checkpoints_path_for_conversion.joinpath(FILE_LICENSE_NAME)
else:
licence_path = None
export_config = export_config or ExportConfig.from_config(config)
if device_map and device_map == 'auto':
LOGGER.info('Auto-parallel we will be used')
export_config = auto_parallel(export_config)
if isinstance(cls, SupportsTransformersConversion):
targets = cls.TRT_LLM_TARGET_MODEL_CLASSES
if not isinstance(targets, Mapping):
targets = {'': targets}
for (idx, (subpart, clazz)) in enumerate(targets.items()):
LOGGER.info(f'Building {model_id} {subpart} ({idx + 1} / {len(targets)})')
converter = TensorRTModelConverter(model_id, subpart, workspace, licence_path)
if quantization_config:
hf_model = cls.HF_LIBRARY_TARGET_MODEL_CLASS.from_pretrained(original_checkpoints_path_for_conversion, torch_dtype='auto', device_map='auto')
checkpoints_folder = converter.quantize(hf_model, quantization_config)
checkpoints_folder = checkpoints_folder.root
checkpoint_files = folder_list_checkpoints(checkpoints_folder)
del hf_model
torch.cuda.empty_cache()
if force_export or not len(list(converter.workspace.engines_path.glob('*.engine'))):
if checkpoint_files and isinstance(clazz, SupportFromTrtLlmCheckpoint):
ranked_models = from_ranked_checkpoints(checkpoints_folder, clazz)
elif isinstance(clazz, SupportsFromHuggingFace):
ranked_models = from_ranked_hf_model(original_checkpoints_path_for_conversion, config, clazz, export_config)
else:
raise TypeError(f"{clazz} can't convert from HF checkpoint")
generation_config = GenerationConfig.from_pretrained(original_checkpoints_path_for_conversion)
for ranked_model in ranked_models:
if save_intermediate_checkpoints:
_ = converter.convert(ranked_model)
LOGGER.info(f'Saved intermediate checkpoints at {converter.workspace.checkpoints_path}')
build_config = export_config.to_builder_config(ranked_model.config.quantization.quant_mode)
_ = converter.build(ranked_model, build_config)
engines_folder = converter.workspace.engines_path
generation_config.save_pretrained(engines_folder)
LOGGER.info(f'Saved TensorRT-LLM engines at {converter.workspace.engines_path}')
else:
LOGGER.info(f'Found existing engines at {converter.workspace.engines_path}')
else:
raise ValueError("Model doesn't support Hugging Face transformers conversion, aborting.")
else:
generation_config = GenerationConfig.from_pretrained(engines_folder)
return cls(engines_path=engines_folder, generation_config=generation_config, load_engines=not export_only)
@abstractmethod
def _save_additional_parcels(self, save_directory: Path):
raise NotImplementedError()
def _save_pretrained(self, save_directory: Path) -> None:
device_name = get_device_name(0)[-1]
save_directory = save_directory.joinpath(device_name)
save_directory.mkdir(parents=True, exist_ok=True)
src_license_file_path = self._engines_path.parent / FILE_LICENSE_NAME
dst_files = [src_license_file_path] if src_license_file_path.exists() else []
dst_files += list(self._engines_path.glob('*'))
for file in dst_files:
try:
symlink(file, save_directory.joinpath(file.relative_to(self._engines_path)))
except OSError as ose:
LOGGER.error(f'Failed to create symlink from current engine folder {self._engines_path.parent} to {save_directory}. Will default to copy based _save_pretrained', exc_info=ose)
dst = save_directory.joinpath(file.relative_to(self._engines_path))
if file.is_dir():
copytree(file, dst, symlinks=True)
elif file:
copyfile(file, dst)
self._save_additional_parcels(save_directory)
# File: optimum-nvidia-main/src/optimum/nvidia/lang/__init__.py
from enum import Enum
from typing import List
import torch
class DataType(str, Enum):
FLOAT32 = 'float32'
FLOAT16 = 'float16'
BFLOAT16 = 'bfloat16'
FLOAT8 = 'float8'
INT64 = 'int64'
INT32 = 'int32'
INT8 = 'int8'
UINT8 = 'uint8'
BOOL = 'bool'
@staticmethod
def from_torch(dtype: torch.dtype) -> 'DataType':
if dtype == torch.float32:
return DataType.FLOAT32
elif dtype == torch.float16:
return DataType.FLOAT16
elif dtype == torch.bfloat16:
return DataType.BFLOAT16
elif dtype == torch.float8_e4m3fn:
return DataType.FLOAT8
elif dtype == torch.int64:
return DataType.INT64
elif dtype == torch.int32:
return DataType.INT32
elif dtype == torch.int8:
return DataType.INT8
elif dtype == torch.uint8:
return DataType.UINT8
elif dtype == torch.bool:
return DataType.BOOL
else:
raise ValueError(f'Unknown torch.dtype {dtype}')
def to_trt(self) -> 'DataType':
import tensorrt as trt
if self == DataType.FLOAT32:
return trt.DataType.FLOAT
elif self == DataType.FLOAT16:
return trt.DataType.HALF
elif self == DataType.BFLOAT16:
return trt.DataType.BF16
elif self == DataType.FLOAT8:
return trt.DataType.FP8
elif self == DataType.INT8:
return trt.DataType.INT8
elif self == DataType.UINT8:
return trt.DataType.UINT8
elif self == DataType.INT32:
return trt.DataType.INT32
elif self == DataType.INT64:
return trt.DataType.INT64
elif self == DataType.BOOL:
return trt.DataType.BOOL
else:
raise ValueError(f'Unknown value {self}')
def to_torch(self):
import torch
if self == DataType.FLOAT32:
return torch.float32
elif self == DataType.FLOAT16:
return torch.float16
elif self == DataType.BFLOAT16:
return torch.bfloat16
elif self == DataType.FLOAT8:
return torch.float8_e4m3fn
elif self == DataType.INT8:
return torch.int8
elif self == DataType.UINT8:
return torch.uint8
elif self == DataType.INT32:
return torch.int32
elif self == DataType.INT64:
return torch.int64
elif self == DataType.BOOL:
return torch.bool
else:
raise ValueError(f'Unknown value {self}')
@staticmethod
def values() -> List[str]:
return [item.value for item in DataType]
# File: optimum-nvidia-main/src/optimum/nvidia/models/auto.py
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, Union
from huggingface_hub import ModelHubMixin
from optimum.nvidia.errors import UnsupportedModelException
from optimum.nvidia.models.gemma import GemmaForCausalLM
from optimum.nvidia.models.llama import LlamaForCausalLM
from optimum.nvidia.utils import model_type_from_known_config
if TYPE_CHECKING:
from optimum.nvidia.export import ExportConfig
from optimum.nvidia.runtime import CausalLM
class AutoModelForCausalLM(ModelHubMixin):
""""""
_SUPPORTED_MODEL_CLASS = {'llama': LlamaForCausalLM, 'mistral': LlamaForCausalLM, 'mixtral': LlamaForCausalLM, 'gemma': GemmaForCausalLM}
def __init__(self):
super().__init__()
@classmethod
def _from_pretrained(cls: Type, *, model_id: str, revision: Optional[str], cache_dir: Optional[Union[str, Path]], force_download: bool, proxies: Optional[Dict], resume_download: bool, local_files_only: bool, token: Optional[Union[str, bool]], config: Optional[Dict[str, Any]]=None, export_config: Optional['ExportConfig']=None, force_export: bool=False, use_cuda_graph: bool=False, **model_kwargs) -> 'CausalLM':
if config is None:
raise ValueError('Unable to determine the model type with config = None')
model_type = model_type_from_known_config(config)
if not model_type or model_type not in AutoModelForCausalLM._SUPPORTED_MODEL_CLASS:
raise UnsupportedModelException(model_type)
model_clazz = AutoModelForCausalLM._SUPPORTED_MODEL_CLASS[model_type]
model = model_clazz.from_pretrained(pretrained_model_name_or_path=model_id, config=config, revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, export_config=export_config, force_export=force_export, use_cuda_graph=use_cuda_graph, **model_kwargs)
return model
# File: optimum-nvidia-main/src/optimum/nvidia/models/base.py
from os import PathLike
from typing import TYPE_CHECKING, Mapping, Optional, Protocol, Type, Union, runtime_checkable
if TYPE_CHECKING:
from tensorrt_llm.models import PretrainedConfig
from tensorrt_llm.top_model_mixin import TopModelMixin
from transformers import PreTrainedModel as TransformersPreTrainedModel
@runtime_checkable
class SupportsFromHuggingFace(Protocol):
@classmethod
def from_hugging_face(cls, hf_model_dir: Union[str, bytes, PathLike], dtype: str='float16', mapping: Optional[Mapping]=None, **kwargs):
...
@runtime_checkable
class SupportFromTrtLlmCheckpoint(Protocol):
@classmethod
def from_checkpoint(cls, ckpt_dir: str, rank: Optional[int]=None, config: Optional['PretrainedConfig']=None):
...
@runtime_checkable
class SupportsTransformersConversion(Protocol):
HF_LIBRARY_TARGET_MODEL_CLASS: Type['TransformersPreTrainedModel']
TRT_LLM_TARGET_MODEL_CLASSES: Union[Type['TopModelMixin'], Mapping[str, Type['TopModelMixin']]]
# File: optimum-nvidia-main/src/optimum/nvidia/models/gemma.py
from logging import getLogger
from tensorrt_llm.models.gemma.model import GemmaForCausalLM as TrtGemmaForCausalLM
from transformers import GemmaForCausalLM as TransformersGemmaForCausalLM
from optimum.nvidia.hub import HuggingFaceHubModel
from optimum.nvidia.models import SupportsTransformersConversion
from optimum.nvidia.runtime import CausalLM
LOGGER = getLogger(__name__)
class GemmaForCausalLM(CausalLM, HuggingFaceHubModel, SupportsTransformersConversion):
HF_LIBRARY_TARGET_MODEL_CLASS = TransformersGemmaForCausalLM
TRT_LLM_TARGET_MODEL_CLASSES = TrtGemmaForCausalLM
TRT_LLM_MANDATORY_CONVERSION_PARAMS = {'share_embedding_table': True}
# File: optimum-nvidia-main/src/optimum/nvidia/models/mistral.py
from logging import getLogger
from tensorrt_llm.models.llama.model import LLaMAForCausalLM
from transformers import MistralForCausalLM as TransformersMistralForCausalLM
from optimum.nvidia.hub import HuggingFaceHubModel
from optimum.nvidia.models import SupportsTransformersConversion
from optimum.nvidia.runtime import CausalLM
LOGGER = getLogger(__name__)
class MistralForCausalLM(CausalLM, HuggingFaceHubModel, SupportsTransformersConversion):
HF_LIBRARY_TARGET_MODEL_CLASS = TransformersMistralForCausalLM
TRT_LLM_TARGET_MODEL_CLASSES = LLaMAForCausalLM
# File: optimum-nvidia-main/src/optimum/nvidia/models/mixtral.py
from logging import getLogger
from tensorrt_llm.models.llama.model import LLaMAForCausalLM
from transformers import MixtralForCausalLM as TransformersMixtralForCausalLM
from optimum.nvidia.hub import HuggingFaceHubModel
from optimum.nvidia.models import SupportsTransformersConversion
from optimum.nvidia.runtime import CausalLM
LOGGER = getLogger(__name__)
class MixtralForCausalLM(CausalLM, HuggingFaceHubModel, SupportsTransformersConversion):
HF_LIBRARY_TARGET_MODEL_CLASS = TransformersMixtralForCausalLM
TRT_LLM_TARGET_MODEL_CLASSES = LLaMAForCausalLM
# File: optimum-nvidia-main/src/optimum/nvidia/models/whisper.py
from logging import getLogger
from typing import TYPE_CHECKING
from tensorrt_llm.models import DecoderModel as TrtDecoderModel
from tensorrt_llm.models import WhisperEncoder as TrtWhisperEncoder
from transformers.models.whisper.modeling_whisper import WhisperForConditionalGeneration as TransformersWhisperForConditionalGeneration
from optimum.nvidia.models import SupportsTransformersConversion
if TYPE_CHECKING:
pass
LOGGER = getLogger(__name__)
class WhisperForConditionalGeneration(SupportsTransformersConversion):
HF_LIBRARY_TARGET_MODEL_CLASS = TransformersWhisperForConditionalGeneration
TRT_LLM_TARGET_MODEL_CLASSES = {'encoder': TrtWhisperEncoder, 'decoder': TrtDecoderModel}
# File: optimum-nvidia-main/src/optimum/nvidia/pipelines/__init__.py
from os import PathLike
from typing import Dict, Optional, Tuple, Type, Union
from huggingface_hub import model_info
from tensorrt_llm import Module
from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
from optimum.nvidia import AutoModelForCausalLM
from optimum.nvidia.pipelines.text_generation import TextGenerationPipeline
from .base import Pipeline
SUPPORTED_MODEL_WITH_TASKS: Dict[str, Dict[str, Tuple[Type[Pipeline], Type]]] = {'gemma': {'text-generation': (TextGenerationPipeline, AutoModelForCausalLM)}, 'llama': {'text-generation': (TextGenerationPipeline, AutoModelForCausalLM)}, 'mistral': {'text-generation': (TextGenerationPipeline, AutoModelForCausalLM)}, 'mixtral': {'text-generation': (TextGenerationPipeline, AutoModelForCausalLM)}}
def get_target_class_for_model_and_task(task: str, architecture: str) -> Optional[Type]:
task_ = SUPPORTED_MODEL_WITH_TASKS.get(task, None)
if not task:
raise NotImplementedError(f'Task {task} is not supported yet.')
target = task_.get(architecture, None)
if not target:
raise NotImplementedError(f'Architecture {architecture} is not supported for task {task}. Only the following architectures are: {list(task_.keys())}')
return target
def pipeline(task: str=None, model: Union[str, PathLike, Module]=None, tokenizer: Optional[Union[str, PreTrainedTokenizer, PreTrainedTokenizerFast]]=None, **kwargs):
try:
info = model_info(model)
except Exception as e:
raise RuntimeError(f'Failed to instantiate the pipeline inferring the task for model {model}: {e}')
model_type = info.config.get('model_type', None)
if not model_type:
raise RuntimeError(f'Failed to infer model type for model {model}')
elif model_type not in SUPPORTED_MODEL_WITH_TASKS:
raise NotImplementedError(f'Model type {model_type} is not currently supported')
if not task and getattr(info, 'library_name', 'transformers') == 'transformers':
if not info.pipeline_tag:
raise RuntimeError(f'Failed to infer the task for model {model}, please use `task` parameter')
task = info.pipeline_tag
if task not in SUPPORTED_MODEL_WITH_TASKS[model_type]:
raise NotImplementedError(f'Task {task} is not supported yet for {model_type}.')
if tokenizer is None:
tokenizer = AutoTokenizer.from_pretrained(model, use_fast=True)
(pipeline_factory, model_factory) = SUPPORTED_MODEL_WITH_TASKS[model_type][task]
model = model_factory.from_pretrained(model, **kwargs)
return pipeline_factory(model, tokenizer)
# File: optimum-nvidia-main/src/optimum/nvidia/pipelines/text_generation.py
import warnings
from enum import Enum
from typing import Dict, List, Union
import torch
from transformers import PreTrainedTokenizer, TensorType
from optimum.nvidia import AutoModelForCausalLM
from optimum.nvidia.runtime import CausalLM
from .base import Pipeline
class ReturnType(Enum):
TENSORS = 0
NEW_TEXT = 1
FULL_TEXT = 2
class TextGenerationPipeline(Pipeline):
TARGET_FACTORY = AutoModelForCausalLM
__slots__ = ('tokenizer', '_runtime')
def __init__(self, model: CausalLM, tokenizer: PreTrainedTokenizer):
super().__init__()
if tokenizer.eos_token and (not tokenizer.pad_token):
tokenizer.pad_token = tokenizer.eos_token
self.tokenizer = tokenizer
self._runtime = model
def __call__(self, inputs: Union[str, List[str]], add_special_tokens: bool=True, **kwargs):
(preprocess_params, forward_params, postprocess_params) = self._sanitize_parameters(add_special_tokens=add_special_tokens, **kwargs)
model_inputs = self.preprocess(inputs, **preprocess_params)
model_outputs = self._forward(model_inputs, **forward_params)
outputs = self.postprocess(model_outputs, **postprocess_params)
return outputs
def _sanitize_parameters(self, return_full_text=None, return_tensors=None, return_text=None, return_type=None, clean_up_tokenization_spaces=None, prefix=None, handle_long_generation=None, stop_sequence=None, add_special_tokens=False, **generate_kwargs):
preprocess_params = {'add_special_tokens': add_special_tokens}
if prefix is not None:
preprocess_params['prefix'] = prefix
if prefix:
prefix_inputs = self.tokenizer(prefix, padding=False, add_special_tokens=add_special_tokens, return_tensors=TensorType.PYTORCH)
generate_kwargs['prefix_length'] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {'hole'}:
raise ValueError(f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected [None, 'hole']")
preprocess_params['handle_long_generation'] = handle_long_generation
preprocess_params.update(generate_kwargs)
forward_params = generate_kwargs
postprocess_params = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`')
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`')
return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`')
return_type = ReturnType.TENSORS
if return_type is not None:
postprocess_params['return_type'] = return_type
if clean_up_tokenization_spaces is not None:
postprocess_params['clean_up_tokenization_spaces'] = clean_up_tokenization_spaces
if stop_sequence is not None:
stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
if len(stop_sequence_ids) > 1:
warnings.warn('Stopping on a multiple token sequence is not yet supported on transformers. The first token of the stop sequence will be used as the stop sequence string in the interim.')
generate_kwargs['eos_token_id'] = stop_sequence_ids[0]
return (preprocess_params, forward_params, postprocess_params)
def _forward(self, model_inputs, **generate_kwargs):
input_ids = model_inputs['input_ids']
prompt_text = model_inputs.pop('prompt_text')
attention_mask = model_inputs.get('attention_mask', None)
max_new_tokens = generate_kwargs.pop('max_new_tokens', None)
min_length = generate_kwargs.pop('min_length', -1)
num_beams = generate_kwargs.pop('num_beams', 1)
temperature = generate_kwargs.pop('temperature', 1.0)
top_k = generate_kwargs.pop('top_k', 50)
top_p = generate_kwargs.pop('top_p', 1.0)
repetition_penalty = generate_kwargs.pop('repetition_penalty', 1.0)
length_penalty = generate_kwargs.pop('length_penalty', 1.0)
seed = generate_kwargs.pop('seed', 2017)
(generated_sequence, lengths) = self._runtime.generate(input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, min_length=min_length, num_beams=num_beams, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, length_penalty=length_penalty, seed=seed)
return {'generated_sequence': generated_sequence, 'lengths': lengths, 'input_ids': input_ids, 'prompt_text': prompt_text}
def preprocess(self, prompt_text, prefix='', handle_long_generation=None, add_special_tokens=False, **generate_kwargs) -> Dict[str, torch.Tensor]:
if isinstance(prompt_text, List):
text = [prefix + prompt for prompt in prompt_text]
else:
text = prefix + prompt_text
inputs = self.tokenizer(text, padding=False, add_special_tokens=add_special_tokens, return_tensors=TensorType.PYTORCH)
inputs['prompt_text'] = prompt_text
return inputs
def postprocess(self, model_outputs, return_type=ReturnType.FULL_TEXT, clean_up_tokenization_spaces=True):
generated_sequence = model_outputs['generated_sequence']
generated_sequence = generated_sequence.cpu().numpy().tolist()
records = []
if return_type == ReturnType.TENSORS:
return [{'generated_token_ids': generated for generated in generated_sequence}]
for sequence in generated_sequence:
text = self.tokenizer.decode(sequence, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces)
record = {'generated_text': text}
records.append(record)
return records
# File: optimum-nvidia-main/src/optimum/nvidia/runtime.py
import asyncio
import json
import math
from logging import getLogger
from os import PathLike
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
import torch
from tensorrt_llm.bindings.executor import ExecutorConfig, KvCacheConfig
from tensorrt_llm.executor import GenerationExecutor, GenerationRequest, GenerationResult
from tensorrt_llm.hlapi import SamplingParams
from optimum.nvidia.hub import HuggingFaceHubModel
from optimum.nvidia.utils.nvml import is_post_ampere
if TYPE_CHECKING:
from transformers import GenerationConfig
LOGGER = getLogger(__name__)
def read_engine_config_file(path: Path) -> Dict[str, Any]:
with open(path / 'config.json', 'r', encoding='utf-8') as config_f:
return json.load(config_f)
def convert_generation_config(config: 'GenerationConfig') -> 'SamplingParams':
return SamplingParams(end_id=config.eos_token_id, pad_id=config.pad_token_id, top_k=config.top_k if config.do_sample else 1, top_p=config.top_p, temperature=config.temperature, beam_width=config.num_beams if config.do_sample else 1, bad_token_ids=config.bad_words_ids, length_penalty=config.length_penalty, repetition_penalty=config.repetition_penalty, no_repeat_ngram_size=config.no_repeat_ngram_size if config.no_repeat_ngram_size > 0 else 1, min_length=config.min_length if config.min_length > 0 else 1, max_new_tokens=config.max_new_tokens, return_generation_logits=config.output_logits, return_log_probs=not config.renormalize_logits)
def default_executor_config(config: Dict[str, Any]) -> 'ExecutorConfig':
build_config = config['build_config']
plugin_config = config['build_config']['plugin_config']
max_blocks_per_sequence = math.floor(build_config['max_seq_len'] / plugin_config['tokens_per_block'])
return ExecutorConfig(enable_chunked_context=is_post_ampere(), kv_cache_config=KvCacheConfig(enable_block_reuse=True, max_tokens=build_config['max_beam_width'] * plugin_config['tokens_per_block'] * max_blocks_per_sequence))
class InferenceRuntimeBase:
__slots__ = ('_config', '_executor', '_generation_config', '_sampling_config')
def __init__(self, engines_path: Union[str, PathLike], generation_config: 'GenerationConfig', executor_config: Optional['ExecutorConfig']=None, load_engines: bool=True):
engines_path = Path(engines_path)
if not engines_path.exists():
raise OSError(f"engine folder {engines_path} doesn't exist")
self._config = read_engine_config_file(engines_path)
self._generation_config = generation_config
self._sampling_config = convert_generation_config(generation_config)
if load_engines:
self._executor = GenerationExecutor.create(engine=engines_path, executor_config=executor_config or default_executor_config(self._config))
def generate(self, inputs: Union[List[int], 'torch.IntTensor'], generation_config: Optional['GenerationConfig']=None):
sampling = convert_generation_config(generation_config) if generation_config else self._sampling_config
if isinstance(inputs, torch.Tensor):
inputs = inputs.tolist()
result = self._executor.generate(inputs, sampling_params=sampling)
return result[0].outputs[0].token_ids
async def agenerate(self, inputs: Union[List[int], 'torch.IntTensor'], generation_config: Optional['GenerationConfig']=None) -> List[int]:
sampling = convert_generation_config(generation_config) if generation_config else self._sampling_config
if isinstance(inputs, torch.Tensor):
inputs = inputs.tolist()
futures = self._executor.generate_async(inputs, streaming=False, sampling_params=sampling)
if isinstance(futures, GenerationRequest):
results = await futures.aresult()
return results.token_ids
else:
results = await asyncio.gather(*[f.aresult() for f in futures])
return [r.token_ids for r in results]
class CausalLMOutput:
__slots__ = ('_results',)
def __init__(self, results: Union['GenerationResult', Sequence['GenerationResult']]):
self._results = results
@property
def logits(self):
return self._results.token_ids
@property
def loss(self) -> None:
return None
class CausalLM(HuggingFaceHubModel, InferenceRuntimeBase):
def __init__(self, engines_path: Union[str, PathLike, Path], generation_config: 'GenerationConfig', executor_config: Optional['ExecutorConfig']=None, load_engines: bool=True):
InferenceRuntimeBase.__init__(self, engines_path, generation_config, executor_config, load_engines)
HuggingFaceHubModel.__init__(self, engines_path)
def _save_additional_parcels(self, save_directory: Path):
self._generation_config.save_pretrained(save_directory, 'generation_config.json')
# File: optimum-nvidia-main/src/optimum/nvidia/subpackage/commands/export.py
import sys
from typing import TYPE_CHECKING, Optional, Union
from transformers import AutoConfig, AutoTokenizer
from optimum.commands import optimum_cli_subcommand
from optimum.commands.base import BaseOptimumCLICommand, CommandInfo
from optimum.commands.export.base import ExportCommand
from optimum.nvidia import AutoModelForCausalLM, ExportConfig
from optimum.nvidia.export.cli import common_trtllm_export_args
if TYPE_CHECKING:
from argparse import ArgumentParser, Namespace, _SubParsersAction
from pathlib import Path
OPTIMUM_NVIDIA_CLI_QUANTIZATION_TARGET_REF = 'TARGET_QUANTIZATION_RECIPE'
def import_source_file(fname: Union[str, 'Path'], modname: str):
import importlib.util
spec = importlib.util.spec_from_file_location(modname, fname)
module = importlib.util.module_from_spec(spec)
sys.modules[modname] = module
spec.loader.exec_module(module)
@optimum_cli_subcommand(ExportCommand)
class TrtLlmExportCommand(BaseOptimumCLICommand):
COMMAND = CommandInfo(name='trtllm', help='Export PyTorch models to TensorRT-LLM compiled engines')
def __init__(self, subparsers: '_SubParsersAction', args: Optional['Namespace']=None, command: Optional['CommandInfo']=None, from_defaults_factory: bool=False, parser: Optional['ArgumentParser']=None):
super().__init__(subparsers, args=args, command=command, from_defaults_factory=from_defaults_factory, parser=parser)
self.args_string = ' '.join(sys.argv[3:])
@staticmethod
def parse_args(parser: 'ArgumentParser'):
return common_trtllm_export_args(parser)
def run(self):
args = self.args
if args.quantization:
tokenizer = AutoTokenizer.from_pretrained(args.model)
import_source_file(args.quantization, 'recipe')
try:
from recipe import TARGET_QUANTIZATION_RECIPE
qconfig = TARGET_QUANTIZATION_RECIPE(tokenizer)
except ImportError:
raise ModuleNotFoundError(f"Global variable 'TARGET_QUANTIZATION_RECIPE' was not found in {args.quantization}. This is required to automatically detect and allocate the right recipe for quantization.")
else:
qconfig = None
config = AutoConfig.from_pretrained(args.model)
export = ExportConfig.from_config(config, args.max_batch_size)
model = AutoModelForCausalLM.from_pretrained(args.model, export_config=export, quantization_config=qconfig, export_only=True, force_export=True)
if args.destination:
model.save_pretrained(args.destination)
if args.push_to_hub:
print(f'Exporting model to the Hugging Face Hub: {args.push_to_hub}')
model.push_to_hub(args.push_to_hub, commit_message=f'Optimum-CLI TensorRT-LLM {args.model} export')
# File: optimum-nvidia-main/templates/inference-endpoints/postprocessing/1/model.py
import json
import numpy as np
import triton_python_backend_utils as pb_utils
from transformers import AutoTokenizer, LlamaTokenizer, T5Tokenizer
class TritonPythonModel:
__slots__ = ('tokenizer', 'output_dtype')
def initialize(self, args):
model_config = json.loads(args['model_config'])
tokenizer_dir = model_config['parameters']['tokenizer_dir']['string_value']
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_dir, padding_side='left')
self.tokenizer.pad_token = self.tokenizer.eos_token
output_config = pb_utils.get_output_config_by_name(model_config, 'OUTPUT')
self.output_dtype = pb_utils.triton_string_to_numpy(output_config['data_type'])
def execute(self, requests):
responses = []
for (idx, request) in enumerate(requests):
tokens_batch = pb_utils.get_input_tensor_by_name(request, 'TOKENS_BATCH').as_numpy()
outputs = self._postprocessing(tokens_batch)
output_tensor = pb_utils.Tensor('OUTPUT', np.array(outputs).astype(self.output_dtype))
inference_response = pb_utils.InferenceResponse(output_tensors=[output_tensor])
responses.append(inference_response)
return responses
def finalize(self):
print('Cleaning up...')
def _postprocessing(self, tokens_batch):
outputs = []
for beam_tokens in tokens_batch:
for tokens in beam_tokens:
output = self.tokenizer.decode(tokens)
outputs.append(output.encode('utf8'))
return outputs
# File: optimum-nvidia-main/templates/inference-endpoints/preprocessing/1/model.py
import csv
import json
from pathlib import Path
from typing import List, Sequence
import numpy as np
import triton_python_backend_utils as pb_utils
from tokenizers import Tokenizer
INPUT_NAMES = {'INPUT_ID', 'REQUEST_INPUT_LEN', 'BAD_WORDS_IDS', 'STOP_WORDS_IDS'}
class TritonPythonModel:
__slots__ = ('tokenizer', 'pad_token', 'pad_token_id', 'input_id_dtype', 'request_input_len_dtype', 'bad_words_ids_dtype', 'stop_words_ids_dtype')
def initialize(self, args):
model_config = json.loads(args['model_config'])
tokenizer_dir = Path(model_config['parameters']['tokenizer_dir']['string_value'])
tokenizer_path = tokenizer_dir.joinpath('tokenizer.json')
pad_to_multiple_of = int(model_config['parameters']['pad_to_multiple_of']['string_value'])
special_tokens_map_path = tokenizer_dir.joinpath('special_tokens_map.json')
with open(special_tokens_map_path, 'r', encoding='utf-8') as special_tokens_f:
special_tokens_map = json.load(special_tokens_f)
self.tokenizer = Tokenizer.from_file(str(tokenizer_path))
if 'eos_token' in special_tokens_map:
eos_token = special_tokens_map['eos_token']['content']
eos_token_id = self.tokenizer.encode(eos_token, add_special_tokens=False).ids[0]
self.pad_token = eos_token
self.pad_token_id = eos_token_id
for name in INPUT_NAMES:
dtype = pb_utils.triton_string_to_numpy(pb_utils.get_output_config_by_name(model_config, name)['data_type'])
setattr(self, name.lower() + '_dtype', dtype)
def execute(self, requests: Sequence):
responses = []
for request in requests:
response = self.handle_request(request)
responses.append(response)
return responses
def finalize(self):
print('Cleaning up...')
def handle_request(self, request: Sequence):
query = pb_utils.get_input_tensor_by_name(request, 'QUERY').as_numpy().item().decode('utf-8')
request_output_len = pb_utils.get_input_tensor_by_name(request, 'REQUEST_OUTPUT_LEN')
encoding = self.tokenizer.encode(query)
bad_words_ids = pb_utils.Tensor('BAD_WORDS_IDS', np.array([[], []], dtype=self.bad_words_ids_dtype))
stop_words_ids = pb_utils.Tensor('STOP_WORDS_IDS', np.array([[], []], dtype=self.stop_words_ids_dtype))
input_ids = pb_utils.Tensor('INPUT_ID', np.array([encoding.ids], dtype=self.input_id_dtype))
request_input_len = pb_utils.Tensor('REQUEST_INPUT_LEN', np.array([[len(encoding.ids)]], dtype=self.request_input_len_dtype))
return pb_utils.InferenceResponse(output_tensors=[input_ids, bad_words_ids, stop_words_ids, request_input_len, request_output_len])
def _to_word_list_format(self, word_dict: List[List[str]]):
assert self.tokenizer != None, 'need to set tokenizer'
flat_ids = []
offsets = []
for word_dict_item in word_dict:
item_flat_ids = []
item_offsets = []
if isinstance(word_dict_item[0], bytes):
word_dict_item = [word_dict_item[0].decode()]
words = list(csv.reader(word_dict_item))[0]
for word in words:
ids = self.tokenizer.encode(word)
if len(ids) == 0:
continue
item_flat_ids += ids
item_offsets.append(len(ids))
flat_ids.append(np.array(item_flat_ids))
offsets.append(np.cumsum(np.array(item_offsets)))
pad_to = max(1, max((len(ids) for ids in flat_ids)))
for (i, (ids, offs)) in enumerate(zip(flat_ids, offsets)):
flat_ids[i] = np.pad(ids, (0, pad_to - len(ids)), constant_values=0)
offsets[i] = np.pad(offs, (0, pad_to - len(offs)), constant_values=-1)
return np.array([flat_ids, offsets], dtype='int32').transpose((1, 0, 2))