content
stringlengths
32
91.6k
path
stringlengths
14
91
fimified
bool
2 classes
<fim_suffix> # by default, so keep the previous decoded string and all previous # tokens around to correctly decode spaces. # Otherwise, when streaming one token at a time, no spaces are decoded. # # See for example: # https://github.com/huggingface/transformers/issues/22710). # Decode the full sequence. self._prev_tokens += output_tokens decoded = self._tokenizer_handle.decode( self._tokens_to_numpy(self._prev_tokens) ) # Return the newly generated text. result = str(decoded)[len(self._prev_decoded) :] # Cache the full sequence for subsequent iterations. self._prev_decoded = str(decoded^) return result <fim_prefix># ===----------------------------------------------------------------------=== # # Copyright (c) 2024, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # """A tokenizer that leverages the Hugging Face transformers library.""" from math import ceildiv from python import Python from random import randint from tensor import Tensor, TensorShape from utils import Index from . import Tokenizer struct AutoTokenizer(Tokenizer): """Wrapper around generic tokenizers loaded from the huggingface transformers library. WARN: There are some extra copies that happen in the naive form of this tokenizer, specifically when converting python list -> numpy tensor -> MAX Tensor. """ var _transformers_module: PythonObject var _numpy_module: PythonObject var _tokenizer_handle: PythonObject var _py_builtins_handle: PythonObject var _prev_tokens: List[Int64] var _prev_decoded: String def __init__(inout self, hf_tokenizer_name: String): self._transformers_module = Python.import_module("transformers") self._numpy_module = Python.import_module("numpy") self._tokenizer_handle = ( self._transformers_module.AutoTokenizer.from_pretrained( hf_tokenizer_name ) ) self._py_builtins_handle = Python.import_module("builtins") self._prev_tokens = List[Int64]() self._prev_decoded = "" fn __moveinit__(inout self, owned existing: Self): self._transformers_module = existing._transformers_module^ self._numpy_module = existing._numpy_module^ self._tokenizer_handle = existing._tokenizer_handle^ self._py_builtins_handle = existing._py_builtins_handle^ self._prev_tokens = existing._prev_tokens^ self._prev_decoded = existing._prev_decoded^ @staticmethod def is_available() -> Bool: """Returns True if AutoTokenizer is available and False otherwise.""" try: Python.import_module("transformers") except: return False else: return True @always_inline @staticmethod def _numpy_data_pointer[ type: DType ](numpy_array: PythonObject) -> DTypePointer[type]: return DTypePointer[type]( __mlir_op.`pop.index_to_pointer`[ _type = __mlir_type[`!kgen.pointer<scalar<`, type.value, `>>`] ]( Scalar[DType.index]( numpy_array.__array_interface__["data"][0].__index__() ).value ) ) @always_inline @staticmethod def _memcpy_to_numpy(array: PythonObject, tokens: List[Int64]): dst = AutoTokenizer._numpy_data_pointer[DType.int64](array) memcpy(dst, tokens.unsafe_ptr(), len(tokens)) @always_inline @staticmethod def _memcpy_from_numpy(array: PythonObject, tensor: Tensor): src = AutoTokenizer._numpy_data_pointer[tensor.type](array) dst = tensor._ptr length = tensor.num_elements() memcpy(dst, src, length) @staticmethod @always_inline def _numpy_to_tensor[type: DType](array: PythonObject) -> Tensor[type]: shape = List[Int]() array_shape = array.shape for dim in array_shape: shape.append(dim.__index__()) out = Tensor[type](shape) AutoTokenizer._memcpy_from_numpy(array, out) return out^ @always_inline def _list_of_string_to_py_list( self, string_list: List[String] ) -> PythonObject: input_string_py = self._py_builtins_handle.list() for i in range(len(string_list)): <fim_middle> input_string_py.append(string_list[i]) return input_string_py @always_inline def _shape_to_python_list(self, shape: TensorShape) -> PythonObject: python_list = self._py_builtins_handle.list() for i in range(shape.rank()): python_list.append(shape[i]) return python_list^ @always_inline def _get_np_dtype[type: DType](self) -> PythonObject: @parameter if type.is_float32(): return self._numpy_module.float32 elif type.is_int32(): return self._numpy_module.int32 elif type.is_int64(): return self._numpy_module.int64 elif type.is_uint8(): return self._numpy_module.uint8 raise Error("Unknown datatype") @always_inline def _tokens_to_numpy(self, tokens: List[Int64]) -> PythonObject: shape = self._shape_to_python_list(len(tokens)) tokens_as_numpy = self._numpy_module.zeros( shape, self._get_np_dtype[DType.int64]() ) self._memcpy_to_numpy(tokens_as_numpy, tokens) return tokens_as_numpy def is_end_of_text(self, val: Int64) -> Bool: return val == int(self._tokenizer_handle.eos_token_id) def encode( self, input_string: List[String], bos: Optional[String] = None, eos: Optional[String] = None, ) -> List[Int64]: input_string_py = self._list_of_string_to_py_list(input_string) tokenized_py = self._tokenizer_handle(input_string_py) token_ids = AutoTokenizer._numpy_to_tensor[DType.int64]( self._numpy_module.array(tokenized_py["input_ids"]) ) result = List[Int64]() for i in range(token_ids.num_elements()): result.append(token_ids._to_buffer()[i]) _ = token_ids^ return result def decode(inout self, output_tokens: List[Int64]) -> String: """Decodes tokens using the autotokenizer and accounts for spaces.""" # Attempt to produce correct output in a streaming setting. # Tokenizers decode differently depending on neighbouring tokens. # In particular, the sentencepiece BPE tokenizer removes prefix space
max/examples/graph-api/pipelines/tokenizer/autotokenizer.πŸ”₯
true
<filename>max/examples/graph-api/pipelines/tokenizer/tokenizer.πŸ”₯ <fim_suffix> def decode(inout self, output_tokens: List[Int64]) -> String: ... <fim_prefix># ===----------------------------------------------------------------------=== # # Copyright (c) 2024, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # """Module containing a trait for generic tokenizers.""" trait Tokenizer(Movable): def is_end_of_text(self, val: Int64) -> Bool: ... def encode( self, input_string: List[String], bos: Optional[String] = None, <fim_middle> eos: Optional[String] = None, ) -> List[Int64]: ...
max/examples/graph-api/pipelines/tokenizer/tokenizer.πŸ”₯
true
<fim_suffix> required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # from .autotokenizer import AutoTokenizer from .tokenizer import Tokenizer <fim_prefix># ===----------------------------------------------------------------------=== # # Copyright (c) 2024, Modula<fim_middle>r Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless
max/examples/graph-api/pipelines/tokenizer/__init__.πŸ”₯
true
<fim_suffix>----------------------------------------------------------------=== # """Functions for lazily downloading model weights on first execution.""" from os import listdir, mkdir from pathlib import cwd, Path from sys.ffi import external_call def curl_download(url: String, destination: Path): curl_command = "/usr/bin/curl " + url + " -L -J -o " + str(destination) _ = external_call["system", Pointer[NoneType]](curl_command.unsafe_ptr()) _ = curl_command^ def download_weights_to_cache(cache_path: Path, *urls: String): if not cache_path.is_dir(): mkdir(cache_path) for url in urls: last_component = url[].split("/")[-1] destination = cache_path.joinpath(last_component) if not destination.is_file(): curl_download(url[], destination) <fim_prefix># ===----------------------------------------------------------------------=== # # Copyright (c) 2024, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the Licens<fim_middle>e for the specific language governing permissions and # limitations under the License. # ===------
max/examples/graph-api/pipelines/weights/download.πŸ”₯
true
<filename>max/examples/graph-api/pipelines/weights/ggml_quants.πŸ”₯ <fim_suffix>elf.scales = scales self.d = d @staticmethod fn elements_per_block() -> Int: """Returns the number of elements per Q6_K block.""" return QK_K <fim_prefix># ===----------------------------------------------------------------------=== # # Copyright (c) 2024, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # """GGML quantized type definitions.""" from utils import InlineArray @value struct BlockQ40: """4-bit quantization. Constraints: The data layout must exactly match `block_q4_0` from ggml-quants.h. """ alias QK4_0 = 32 """Number of elements per Q4_0 block.""" var d: Float16 """Delta.""" var qs: InlineArray[UInt8, Self.QK4_0 // 2] """Nibbles / quants.""" def __init__( inout self, d: Float16, qs: InlineArray[UInt8, Self.QK4_0 // 2], ): constrained[sizeof[Self]() == sizeof[Float16]() + (Self.QK4_0 // 2)]() self.d = d self.qs = qs @staticmethod fn elements_per_block() -> Int: """Returns the number of elements per Q4_0 block.""" return Self.QK4_0 @value struct BlockQ80: """8-bit quantization. Constraints: The data layout must exactly match `block_q8_0` from ggml-quants.h. """ alias QK8_0 = 32 """Number of elements per Q8_0 block.""" var d: Float16 """Delta.""" var qs: InlineArray[Int8, Self.QK8_0] """Quants.""" def __init__( inout self, d: Float16, qs: InlineArray[Int8, Self.QK8_0], ): constrained[sizeof[Self]() == sizeof[Float16]() + Self.QK8_0]() self.d = d self.qs = qs @staticmethod fn elements_per_block() -> Int: """Returns the number of elements per Q8_0 block.""" return Self.QK8_0 # Note that there is a compile definition in ggml-quants.h that allows setting # `QK_K=64`, which is useful for models with rows unaligned to 256 bits. alias QK_K = 256 """Size of superblock quantized elements, in bytes.""" alias K_SCALE_SIZE = 12 """Size of superblock scales and mins, in bytes.""" @value struct BlockQ4K: """4-bit quantization. 8 blocks of 32 elements each. Weights are represented as `x = a * q + b`. Effectively 4.5 bits per weight. Constraints: The data layout must exactly match `block_q4_K` from ggml-quants.h. """ var d: Float16 """Super-block scale for quantized scales.""" var dmin: Float16 """Super-block scale for quantized mins.""" var scales: InlineArray[UInt8, K_SCALE_SIZE] """Scales and mins, quantized with 6 bits.""" var qs: InlineArray[UInt8, QK_K // 2] """4-bit quants.""" def __init__( inout self, d: Float16, dmin: Float16, scales: InlineArray[UInt8, K_SCALE_SIZE], qs: InlineArray[UInt8, QK_K // 2], ): constrained[ sizeof[Self]() == 2 * sizeof[Float16]() + K_SCALE_SIZE + QK_K // 2 ]() self.d = d self.dmin = dmin self.scales = scales self.qs = qs @staticmethod fn elements_per_block() -> Int: """Returns the number of elements per Q4_K block.""" return QK_K @value struct BlockQ6K: """6-bit quantization. 16 blocks of 16 elements each. Weights are represented as `x = a * q`. Effectively 6.5625 bits per weight. Constraints: The data layout must exactly match `block_q6_K` from ggml-quants.h. """ var ql: InlineArray[UInt8, QK_K // 2] """Quants: lower 4 bits.""" var qh: InlineArray[UInt8, QK_K // 4] """Quants: upper 2 bits.""" var scales: InlineArray[Int8, QK_K // 16] """Scales: quantized with 8 bits.""" var d: Float16 """Super-block scale.""" def __init__( inout self, ql: InlineArray[UInt8, QK_K // 2], qh: InlineArray[UInt8, QK_K // 4], scales: InlineArray[Int8, QK_K // 16], d: Float16, ):<fim_middle> constrained[ sizeof[Self]() == (QK_K // 2) + (QK_K // 4) + (QK_K // 16) + sizeof[Float16]() ]() self.ql = ql self.qh = qh s
max/examples/graph-api/pipelines/weights/ggml_quants.πŸ”₯
true
<filename>max/examples/graph-api/pipelines/weights/gguf.πŸ”₯ <fim_suffix> # Opposite to `TensorSpec`, GGUF stores the inner dimension at # the smaller index, so reverse them. dims.append(int(self.ne[n_dims - i - 1])) return dims @always_inline fn storage_tensor_shape(self) raises -> TensorShape: """Computes the `TensorShape` for the storage backing this tensor. Returns: A `TensorShape` describing this GGUF tensor's torage. """ var dims = self.tensor_dims() if self.type.type_trait().is_quantized: if len(dims) != 2: raise ( "GGML to stdlib tensor only supports quantized matrices" " currently but got tensor of rank: " + str(len(dims)) ) # TODO(#31206): Support more principled compatibility between: # - Custom quantized types such as in ggml-quants.h. # - Mojo types. # - MO types. return TensorShape(dims[0], self.num_bytes() // dims[0]) return TensorShape(dims) struct GGUFReader: var offset: Int var f: FileHandle fn __init__(inout self, owned f: FileHandle): self.offset = 0 self.f = f^ @always_inline fn align_to(inout self, alignment: Int) raises -> None: var overshoot = self.offset % alignment if overshoot == 0: return self.seek(alignment - overshoot) @always_inline fn read_bytes(inout self, num_bytes: Int) raises -> Tensor[DType.uint8]: self.offset += num_bytes return self.f.read_bytes(num_bytes) @always_inline fn seek(inout self, num_bytes: Int) raises: self.offset += num_bytes _ = self.f.seek(num_bytes) @always_inline fn dtype_element[type: DType](inout self) raises -> Scalar[type]: var bytes_tensor: Tensor[DType.uint8] = self.read_bytes( sizeof[Scalar[type]]() ) var result = bytes_tensor.unsafe_ptr().bitcast[type]().load() _ = bytes_tensor^ return result @always_inline fn gguf_string(inout self) raises -> GGUFString: var n = int(self.dtype_element[DType.uint64]()) var key_data: Tensor[DType.uint8] = self.read_bytes(n) return GGUFString(n, key_data._steal_ptr().bitcast[DType.uint8]()) fn gguf_kv(inout self) raises -> GGUFKV: @always_inline @parameter fn _gguf_value[type: DType]() raises -> GGUFValue: var bytes_tensor: Tensor[DType.uint8] = self.read_bytes( sizeof[Scalar[type]]() ) var result = bytes_tensor.unsafe_ptr().bitcast[type]().load() _ = bytes_tensor^ return GGUFValue.from_dtype[type](result) @always_inline @parameter fn _sizeof[type: DType]() raises -> Int: return sizeof[type]() var key = self.gguf_string() if ( StringRef(key.data.bitcast[DType.uint8](), int(key.n)) == "general.alignment" ): raise "don't support specifying alignment" var kv_type = GGUFType(self.dtype_element[DType.int32]()) if kv_type is GGUFType.GGUF_TYPE_STRING: return GGUFKV(key, GGUFValue(self.gguf_string())) if kv_type is GGUFType.GGUF_TYPE_ARRAY: var array_type = GGUFType(self.dtype_element[DType.int32]()) if array_type is GGUFType.GGUF_TYPE_ARRAY: raise ( "Array of arrays not supported yet. Please raise an issue" ) var array_n = int(self.dtype_element[DType.uint64]()) # Note: the underlying `array.data` ptr is uninitialized var array = GGUFArray(array_n) if array_type is GGUFType.GGUF_TYPE_STRING: for i in range(array_n): initialize_pointee_copy( array.data + i, GGUFValue(self.gguf_string()) ) return GGUFKV(key, GGUFValue(array)) for i in range(array_n): initialize_pointee_copy( array.data + i, array_type.dispatch[GGUFValue, _gguf_value](), ) return GGUFKV(key, GGUFValue(array)) # Dispatch on dtype. return GGUFKV(key, kv_type.dispatch[GGUFValue, _gguf_value]()) fn gguf_tensor_info(inout self) raises -> GGUFTensorInfo: var name = self.gguf_string() var n_dims = self.dtype_element[DType.uint32]() var ne = StaticTuple[UInt64, GGUFTensorInfo.GGML_MAX_DIMS]() for i in range(GGUFTensorInfo.GGML_MAX_DIMS): ne[i] = 1 for i in range(int(n_dims)): ne[i] = self.dtype_element[DType.uint64]() var type = GGMLType(self.dtype_element[DType.int32]()) var offset = self.dtype_element[DType.uint64]() return GGUFTensorInfo( name, n_dims, ne, type, offset, data=DTypePointer[DType.invalid](), size=0, ) struct GGUFFile(LoadableModel): """A container for all metadata describing the weights in a GGUF file.""" # This is called GGUFFile to match `gguf_file_t` in gguf.md, but note that # this matches `gguf_context` in ggml.c. # This context owns all memory of its fields and their fields, # transitively. # All GGUF types with non-trivial memory management should implement a # `.destroy()` method, which is called in `GGUFFile`'s destructor. alias GGUF_DEFAULT_ALIGNMENT = 32 alias GGUF_MAGIC = "GGUF" var header: GGUFHeader var kv: List[GGUFKV] var infos: List[GGUFTensorInfo] var alignment: Int # The offset of the tensor data in the file. # `GGUFTensorInfo.offset` is relative to this. var offset: Int # Size of the tensor data section in bytes. var size: Int # The open GGUF model file. var fp: FileHandle fn __init__(inout self, model_path: Path) raises: var reader = GGUFReader(open(model_path, "r")) # Read the header. var magic: Tensor[DType.uint8] = reader.read_bytes( sizeof[StaticTuple[Int8, 4]]() ) for i in range(magic.num_elements()): if magic[i] != GGUFFile.GGUF_MAGIC.as_uint8_ptr().load(i): raise "invalid magic character" var version = reader.dtype_element[DType.uint32]() if version == 1: raise "GGUFv1 is not supported" var n_tensors = int(reader.dtype_element[DType.uint64]()) var n_kv = int(reader.dtype_element[DType.uint64]()) self.header = GGUFHeader( StaticTuple[UInt8, 4](magic[0], magic[1], magic[2], magic[3]), version, n_tensors, n_kv, ) # Read the kv pairs. self.kv = List[GGUFKV](capacity=n_kv) for _ in range(n_kv): self.kv.append(reader.gguf_kv()) # Read the tensor infos. self.infos = List[GGUFTensorInfo](capacity=n_tensors) for _ in range(n_tensors): self.infos.append(reader.gguf_tensor_info()) self.alignment = GGUFFile.GGUF_DEFAULT_ALIGNMENT # TODO: Set alignment from general.alignment key. reader.align_to(self.alignment) self.offset = reader.offset # Compute total size of the data section accounting for alignment. self.size = 0 for i in range(n_tensors): var size_cur = self.infos[i].num_bytes() @always_inline fn pad(x: Int, n: Int) -> Int: return (x + n - 1) & ~(n - 1) self.size += pad(size_cur, self.alignment) self.fp = open(model_path, "r") fn __moveinit__(inout self, owned other: GGUFFile): @always_inline fn exchange[T: Movable](inout old_var: T, owned new_value: T) -> T: var old_value = old_var^ old_var = new_value^ return old_value^ self.header = other.header self.kv = exchange(other.kv, List[GGUFKV]()) self.infos = exchange(other.infos, List[GGUFTensorInfo]()) self.alignment = other.alignment self.offset = other.offset self.size = other.size self.fp = other.fp^ fn __del__(owned self): for i in range(int(self.header.n_kv)): self.kv[i].destroy() for i in range(self.n_tensors()): self.infos[i].destroy() fn __str__(self) -> String: var res = str(self.header) + "\n" for i in range(self.header.n_kv): res += str(self.kv[i]) res += "\n" for i in range(self.header.n_tensors): res += str(self.infos[i]) res += "\n" return res fn __getitem__(self, key: String) raises -> GGUFValue: """Returns the metadata value for key, raising if not found.""" for kv in self.kv: if str(kv[].key) == key: return kv[].value raise "GGUF key: " + key + " not found" fn get[ type: DType ]( inout self, key: String, layer_idx: Optional[Int] = None ) raises -> Tensor[type]: var full_key = key + ".weight" if layer_idx: full_key = "blk." + str(layer_idx.value()[]) + "." + full_key for i in range(self.n_tensors()): var info = self.infos[i] if str(info.name) != full_key: continue if type != info.type.dtype(): raise "compile/runtime dtype mismatch of " + str( type ) + "; expected " + str(info.type.dtype()) + " for " + str( info.name ) # Add tensor data offset since `info.offset` is from the start of # the tensor data. _ = self.fp.seek(self.offset + int(info.offset)) var bytes_tensor = Tensor[DType.uint8]( self.fp.read_bytes(info.num_bytes()) ) return Tensor( info.storage_tensor_shape(), bytes_tensor._steal_ptr().bitcast[type](), ) raise "key not found" fn ggml_type( self, key: String, layer_idx: Optional[Int] = None ) raises -> GGMLType: """Reads the GGML type for the specified tensor.""" var full_key = key + ".weight" if layer_idx: full_key = "blk." + str(layer_idx.value()[]) + "." + full_key for i in range(self.n_tensors()): var info = self.infos[i] if str(info.name) != full_key: continue return info.type raise "key: " + key + " not found in ggml_type" fn hyperparams(self) raises -> LlamaHParams: """Loads Llama hyperparameters by reading the GGUF metadata.""" var dims = int(self["llama.embedding_length"]) var n_heads = int(self["llama.attention.head_count"]) var n_kv_heads = int(self["llama.attention.head_count_kv"]) # Compute the vocab size from the token embedding shape. # This works around -1 vocab_size from Llama 1 and 2 conversion. # See for example: https://github.com/ggerganov/llama.cpp/pull/4258. var vocab_size: Optional[Int] = None for info in self.infos: if str(info[].name) != "token_embd.weight": continue # Take the 2nd dim since token_embd is [dims, vocab_size]. vocab_size = int(info[].ne[1]) if not vocab_size: # As a last resort try the vocab_size metadata field. vocab_size = int(self["llama.vocab_size"]) return LlamaHParams( dims=dims, n_layers=int(self["llama.block_count"]), n_heads=n_heads, norm_eps=self["llama.attention.layer_norm_rms_epsilon"].float(), n_kv_heads=n_kv_heads, vocab_size=vocab_size.value()[], head_dim=dims // n_heads, n_rep=n_heads // n_kv_heads, ) @always_inline fn n_tensors(self) -> Int: return int(self.header.n_tensors) <fim_prefix># ===----------------------------------------------------------------------=== # # Copyright (c) 2024, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # """Interface to GGUF following the logic in ggml.c, in particular `gguf_init_from_file()`: https://github.com/ggerganov/llama.cpp/blob/8da46278e1a57107591653275f8e03a281de94f0/ggml.c#L18016 In order to maintain readability of this file and the ability to cross-reference ggml.c, here types and field names match those in ggml.c as much as possible. This changes only casing for `struct`s to adhere to Mojo naming style. GGUF is designed with the following core principles: - The entire model is self-contained in one file. - The format is extensible so that GGUF can be changed maintaining backwards compatibility. - Weights can be mmap'ed. GGUFKV is a key-value storage for hyperparameters called metadata. GGUFTensorInfo describe and is used to locate the tensor data. The model, its tensors, and all of its metadata are serialized as little endian. See the GGUF documentation for more details: https://github.com/ggerganov/ggml/blob/cce2ac9a5d788c3b6bb72a3b3dbde9247d8b85a7/docs/gguf.md. NB: all types in this file except `GGUFFile` can be passed around by value and do not own their data. The `GGUFFile` allocates all the other objects and deallocates them all in its destructor. Other types expose a `.destroy()` method simply to facilitate this. """ from collections import List, Optional from memory.unsafe import DTypePointer from pathlib import Path from utils import StaticTuple, Variant from tensor import Tensor, TensorShape from . import ggml_quants from .loadable_model import LlamaHParams, LoadableModel @value struct GGMLTypeTrait: var type_name: String var blck_size: Int var type_size: Int var is_quantized: Bool @value @register_passable("trivial") struct GGMLType: """Enum-like struct matching `ggml_type`, the dtype of a tensor.""" alias GGML_TYPE_F32: Int32 = 0 alias GGML_TYPE_F16: Int32 = 1 alias GGML_TYPE_Q4_0: Int32 = 2 alias GGML_TYPE_Q4_1: Int32 = 3 # GGML_TYPE_Q4_2 = 4, support has been removed # GGML_TYPE_Q4_3 (5) support has been removed alias GGML_TYPE_Q5_0: Int32 = 6 alias GGML_TYPE_Q5_1: Int32 = 7 alias GGML_TYPE_Q8_0: Int32 = 8 alias GGML_TYPE_Q8_1: Int32 = 9 # k-quantizations alias GGML_TYPE_Q2_K: Int32 = 10 alias GGML_TYPE_Q3_K: Int32 = 11 alias GGML_TYPE_Q4_K: Int32 = 12 alias GGML_TYPE_Q5_K: Int32 = 13 alias GGML_TYPE_Q6_K: Int32 = 14 alias GGML_TYPE_Q8_K: Int32 = 15 alias GGML_TYPE_I8: Int32 = 16 alias GGML_TYPE_I16: Int32 = 17 alias GGML_TYPE_I32: Int32 = 18 alias GGML_TYPE_COUNT: Int32 = 19 # marks the end of the enum var _value: Int32 fn __is__(self, other: Self) -> Bool: """Checks if this `GGMLType` is the same as `other`. Args: other: `GGMLType` to check equality against. Returns `True` if the `GGMLType`s are the same and `False` otherwise. """ return self._value == other._value fn __str__(self) -> String: if self._value == self.GGML_TYPE_F16: return "F16" if self._value == self.GGML_TYPE_F32: return "F32" if self._value == self.GGML_TYPE_COUNT: return "COUNT" if self._value == self.GGML_TYPE_I16: return "I16" if self._value == self.GGML_TYPE_I32: return "I32" if self._value == self.GGML_TYPE_I8: return "I8" if self._value == self.GGML_TYPE_Q2_K: return "Q2_K" if self._value == self.GGML_TYPE_Q3_K: return "Q3_K" if self._value == self.GGML_TYPE_Q4_0: return "Q4_0" if self._value == self.GGML_TYPE_Q4_1: return "Q4_1" if self._value == self.GGML_TYPE_Q4_K: return "Q4_K" if self._value == self.GGML_TYPE_Q5_0: return "Q5_0" if self._value == self.GGML_TYPE_Q5_1: return "Q5_1" if self._value == self.GGML_TYPE_Q5_K: return "Q5_K" if self._value == self.GGML_TYPE_Q6_K: return "Q6_K" if self._value == self.GGML_TYPE_Q8_0: return "Q8_0" if self._value == self.GGML_TYPE_Q8_1: return "Q8_1" if self._value == self.GGML_TYPE_Q8_K: return "Q8_K" return "<unknown>" @always_inline fn dtype(self) raises -> DType: # Return uint8 for quantized types. if ( self is Self.GGML_TYPE_Q4_0 or self is Self.GGML_TYPE_Q4_K or self is Self.GGML_TYPE_Q6_K or self is Self.GGML_TYPE_Q8_0 ): return DType.uint8 if self is Self.GGML_TYPE_F16: return DType.float16 if self is Self.GGML_TYPE_F32: return DType.float32 if self is Self.GGML_TYPE_I8: return DType.int8 if self is Self.GGML_TYPE_I16: return DType.int16 if self is Self.GGML_TYPE_I32: return DType.int32 raise "GGML type lacks corresponding DType" fn type_trait(self) raises -> GGMLTypeTrait: if self is Self.GGML_TYPE_Q4_0: return GGMLTypeTrait( "q4_0", blck_size=ggml_quants.BlockQ40.QK4_0, type_size=sizeof[ggml_quants.BlockQ40](), is_quantized=True, ) if self is Self.GGML_TYPE_Q4_K: return GGMLTypeTrait( "q4_K", blck_size=ggml_quants.QK_K, type_size=sizeof[ggml_quants.BlockQ4K](), is_quantized=True, ) if self is Self.GGML_TYPE_Q6_K: return GGMLTypeTrait( "q6_K", blck_size=ggml_quants.QK_K, type_size=sizeof[ggml_quants.BlockQ6K](), is_quantized=True, ) if self is Self.GGML_TYPE_Q8_0: return GGMLTypeTrait( "q8_0", blck_size=ggml_quants.BlockQ80.QK8_0, type_size=sizeof[ggml_quants.BlockQ80](), is_quantized=True, ) if self is Self.GGML_TYPE_F32: return GGMLTypeTrait( "f32", blck_size=1, type_size=sizeof[DType.float32](), is_quantized=False, ) if self is Self.GGML_TYPE_F16: return GGMLTypeTrait( "f16", blck_size=1, type_size=sizeof[DType.float16](), is_quantized=False, ) raise "type trait " + str(self._value) + " not implemented yet" @value @register_passable("trivial") struct GGUFString(Stringable): # The length of the string in bytes. var n: UInt64 # The string as a UTF-8 non-null-terminated string. # Note: this matches GGUF spec but # is unsafe to be used without proper initialization. var data: DTypePointer[DType.uint8] @always_inline fn destroy(owned self): self.data.free() @always_inline fn __str__(self) -> String: return str(StringRef(self.data, int(self.n))) @value @register_passable("trivial") struct GGUFArray: # The length of the array var n: UInt64 var data: UnsafePointer[GGUFValue] # Max amount of items to print for an array. alias max_print_size = 5 fn __init__(inout self, size: Int): self.n = size self.data = UnsafePointer[GGUFValue].alloc(size) @always_inline fn destroy(owned self): self.data.free() @always_inline fn __str__(self) -> String: var res = String("[ ") for i in range(self.n.min(self.max_print_size)): res += str(self.data[i]) res += " " res += "]" return res @value @register_passable("trivial") struct GGUFType: """Enum-like struct matching `gguf_type`, a metadata value type.""" alias GGUF_TYPE_UINT8: Int32 = 0 alias GGUF_TYPE_INT8: Int32 = 1 alias GGUF_TYPE_UINT16: Int32 = 2 alias GGUF_TYPE_INT16: Int32 = 3 alias GGUF_TYPE_UINT32: Int32 = 4 alias GGUF_TYPE_INT32: Int32 = 5 alias GGUF_TYPE_FLOAT32: Int32 = 6 alias GGUF_TYPE_BOOL: Int32 = 7 alias GGUF_TYPE_STRING: Int32 = 8 alias GGUF_TYPE_ARRAY: Int32 = 9 alias GGUF_TYPE_UINT64: Int32 = 10 alias GGUF_TYPE_INT64: Int32 = 11 alias GGUF_TYPE_FLOAT64: Int32 = 12 alias GGUF_TYPE_COUNT: Int32 = 13 # Marks the end of the enum. var _value: Int32 fn __is__(self, other: Self) -> Bool: """Checks if this `GGUFType` is the same as `other`. Args: other: `GGUFType` to check equality against. Returns `True` if the `GGUFType`s are the same and `False` otherwise. """ return self._value == other._value @always_inline fn dispatch[ T: AnyType, func: fn[type: DType] () raises capturing -> T ](self) raises -> T: if self is Self.GGUF_TYPE_UINT8: return func[DType.uint8]() if self is Self.GGUF_TYPE_INT8: return func[DType.int8]() if self is Self.GGUF_TYPE_UINT16: return func[DType.uint16]() if self is Self.GGUF_TYPE_INT16: return func[DType.int16]() if self is Self.GGUF_TYPE_UINT32: return func[DType.uint32]() if self is Self.GGUF_TYPE_INT32: return func[DType.int32]() if self is Self.GGUF_TYPE_UINT64: return func[DType.uint64]() if self is Self.GGUF_TYPE_INT64: return func[DType.int64]() if self is Self.GGUF_TYPE_FLOAT32: return func[DType.float32]() if self is Self.GGUF_TYPE_FLOAT64: return func[DType.float64]() if self is Self.GGUF_TYPE_BOOL: return func[DType.bool]() # GGUF_TYPE_STRING and GGUF_TYPE_ARRAY must be handled separately. raise "only GGUF types corresponding to dtypes are supported" @value struct GGUFValue: alias _type = Variant[ UInt8, Int8, UInt16, Int16, UInt32, Int32, UInt64, Int64, Float32, Float64, Bool, GGUFString, GGUFArray, ] var _value: Self._type @staticmethod fn from_dtype[type: DType](value: Scalar[type]) raises -> Self: @parameter if type.is_bool(): # Treat `Bool` as an exception since it isn't `Scalar[DType.bool]`. return Self(Bool(value)) else: return Self(value) @always_inline fn destroy(owned self): if self._value.isa[GGUFString](): self._value.unsafe_get[GGUFString]()[].destroy() elif self._value.isa[GGUFArray](): self._value.unsafe_get[GGUFArray]()[].destroy() @always_inline fn float(self) raises -> Float64: if self._value.isa[Float32](): return self._value.unsafe_get[Float32]()[].cast[DType.float64]() if self._value.isa[Float64](): return self._value.unsafe_get[Float64]()[] raise "GGUFValue is not a float32 or float64" fn __int__(self) raises -> Int: if self._value.isa[Int8](): return int(self._value.unsafe_get[Int8]()[]) if self._value.isa[UInt8](): return int(self._value.unsafe_get[UInt8]()[]) if self._value.isa[UInt16](): return int(self._value.unsafe_get[UInt16]()[]) if self._value.isa[Int16](): return int(self._value.unsafe_get[Int16]()[]) if self._value.isa[UInt32](): return int(self._value.unsafe_get[UInt32]()[]) if self._value.isa[Int32](): return int(self._value.unsafe_get[Int32]()[]) if self._value.isa[Int64](): return int(self._value.unsafe_get[Int64]()[]) if self._value.isa[Float32](): return int(self._value.unsafe_get[Float32]()[]) if self._value.isa[Bool](): return int(self._value.unsafe_get[Bool]()[]) if self._value.isa[GGUFString](): raise "can't convert GGUFString to int" if self._value.isa[GGUFArray](): raise "can't convert GGUFArray to int" raise "unknown GGUFValue" fn __str__(self) -> String: if self._value.isa[Int8](): return str(self._value.unsafe_get[Int8]()[]) if self._value.isa[UInt8](): return str(self._value.unsafe_get[UInt8]()[]) if self._value.isa[UInt16](): return str(self._value.unsafe_get[UInt16]()[]) if self._value.isa[Int16](): return str(self._value.unsafe_get[Int16]()[]) if self._value.isa[UInt32](): return str(self._value.unsafe_get[UInt32]()[]) if self._value.isa[Int32](): return str(self._value.unsafe_get[Int32]()[]) if self._value.isa[Int64](): return str(self._value.unsafe_get[Int64]()[]) if self._value.isa[Float32](): return str(self._value.unsafe_get[Float32]()[]) if self._value.isa[Bool](): return str(self._value.unsafe_get[Bool]()[]) if self._value.isa[GGUFString](): return str(self._value.unsafe_get[GGUFString]()[]) if self._value.isa[GGUFArray](): return str(self._value.unsafe_get[GGUFArray]()[]) return "<unknown>" @value struct GGUFKV: # The key of the metadata. It is a standard GGUF string, with the following caveats: # - It must be a valid ASCII string. # - It must be a hierarchical key, where each segment is `lower_snake_case` # and separated by a `.`. # - It must be at most 2^16-1/65535 bytes long. # Any keys that do not follow these rules are invalid. var key: GGUFString var value: GGUFValue @always_inline fn destroy(owned self): self.key.destroy() self.value.destroy() fn __str__(self) -> String: return str(self.key) + ": " + str(self.value) @value @register_passable("trivial") struct GGUFHeader: # Magic number to announce that this is a GGUF file. # Must be `GGUF` at the byte level: `0x47` `0x47` `0x55` `0x46`. # Your executor might do little-endian byte order, so it might be # checking for 0x46554747 and letting the endianness cancel out. # Consider being *very* explicit about the byte order here. var magic: StaticTuple[UInt8, 4] # The version of the format implemented. # Must be `3` for version described in this spec, which introduces big-endian support. # # This version should only be increased for structural changes to the format. # Changes that do not affect the structure of the file should instead # update the metadata to signify the change. var version: UInt32 # This number of tensors in the file is explicit, instead of being included # in the metadata, to ensure it is always present for loading the tensors. var n_tensors: UInt64 # The number of metadata key-value pairs. var n_kv: UInt64 fn __init__(inout self): self.magic = StaticTuple[UInt8, 4]( ord("N"), ord("U"), ord("L"), ord("L") ) self.version = 0 self.n_tensors = 0 self.n_kv = 0 fn __str__(self) -> String: return ( "version: " + str(self.version) + "\nn_tensors: " + str(self.n_tensors) + "\nn_kv: " + str(self.n_kv) ) @value @register_passable("trivial") struct GGUFTensorInfo: alias GGML_MAX_DIMS: Int = 4 # The name of the tensor. It is a standard GGUF string, with the caveat # that it must be at most 64 bytes long. var name: GGUFString # The number of dimensions in the tensor. # Currently at most 4, but this may change in the future. var n_dims: UInt32 var ne: StaticTuple[UInt64, Self.GGML_MAX_DIMS] var type: GGMLType # The offset of the tensor's data in this file in bytes. # # This offset is relative to `tensor_data`, not to the start # of the file, to make it easier for writers to write the file. # R<fim_middle>eaders should consider exposing this offset relative to the # file to make it easier to read the data. # # Must be a multiple of `ALIGNMENT`. var offset: UInt64 # For writing API. var data: DTypePointer[DType.invalid] var size: Int # Padding for aligning columns when printing tensor info alias name_padding = 32 alias type_padding = 8 fn __str__(self) -> String: # Create padding to line up tensor name, type, and dims. var res = str(self.name) var sep = self.name_padding - len(res) if sep < 1: sep = 1 var spaces = String() for _ in range(sep): spaces += " " var type_str = str(self.type) var type_sep = self.type_padding - len(type_str) if type_sep < 1: type_sep = 1 var type_spaces = String() for _ in range(type_sep): type_spaces += " " res += spaces + str(self.type) + type_spaces + "[ " for i in range(self.n_dims): res += str(self.ne[i]) res += " " res += "]" return res @always_inline fn destroy(owned self): self.name.destroy() self.data.free() @always_inline fn num_bytes(self) raises -> Int: var ne = self.ne var num_elements = ne[0] * ne[1] * ne[2] * ne[3] var type_trait = self.type.type_trait() return int(num_elements * type_trait.type_size // type_trait.blck_size) fn tensor_dims(self) -> List[Int]: """Converts from GGML `ne` to dims compatible with stdlib `Tensor`. Returns: A `List` of dims compatible with stdlib `TensorShape`. """ var n_dims = int(self.n_dims) var dims = List[Int](capacity=n_dims) for i in range(n_dims):
max/examples/graph-api/pipelines/weights/gguf.πŸ”₯
true
<fim_suffix>type]() elif key == "ffn_gate": tensor_ref = self.w1[type]() elif key == "ffn_down": tensor_ref = self.w2[type]() elif key == "ffn_up": tensor_ref = self.w3[type]() elif key == "attn_norm": tensor_ref = self.rms_att[type]() elif key == "ffn_norm": tensor_ref = self.rms_ffn[type]() else: raise "key not found" var ptr = DTypePointer[type].alloc(tensor_ref.nelems()) var layer_offset = layer_idx.value()[] * tensor_ref.nelems() if layer_idx else 0 memcpy(ptr, tensor_ref.offset + layer_offset, tensor_ref.nelems()) return Tensor(tensor_ref.shape, ptr) fn hyperparams(self) raises -> LlamaHParams: return LlamaHParams( dims=self.config.dim, n_layers=self.config.n_layers, n_heads=self.config.n_heads, norm_eps=1e-5, n_kv_heads=self.config.n_kv_heads, vocab_size=self.config.vocab_size, head_dim=self.config.dim // self.config.n_heads, n_rep=self.config.n_heads // self.config.n_kv_heads, ) <fim_prefix># ===----------------------------------------------------------------------=== # # Copyright (c) 2024, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # """A parser for the Llama 2 weights as provided for karpathy/llama2.c.""" from memory import memcpy from collections import Optional from pathlib import Path from tensor import Tensor, TensorShape from .loadable_model import LlamaHParams, LoadableModel @value @register_passable("trivial") struct Config: var dim: Int # transformer dimension var hidden_dim: Int # for ffn layers var n_layers: Int # number of layers var n_heads: Int # number of query heads var n_kv_heads: Int # number of key/value heads (can be < query heads because of multiquery) var vocab_size: Int # vocabulary size, usually 256 (byte-level) var seq_len: Int # max sequence length var head_size: Int # dimension of attention head activations @value struct TensorRef[type: DType]: # Not owned by this `TensorRef`. var offset: DTypePointer[type] var shape: TensorShape @always_inline fn nelems(self) -> Int: # Number of elements in a single tensor. return self.shape.num_elements() @value struct LlamaCFile(LoadableModel): """Checkpoint file from karpathy/llama2.c. This is a simple format that groups each "type" of weight in one large contiguous block. So for example wq layer 0 weights start at some offset N, then wq layer 1 weights immediately follow, and so on. See: https://github.com/karpathy/llama2.c/blob/d9862069e7ef665fe6309e3c17398ded2f121bf5/run.c#L111. """ var config: Config var weights_ptr: DTypePointer[DType.uint8] fn __init__(inout self, model_path: Path) raises: with open(model_path, "r") as f: @parameter @always_inline fn read_int32() raises -> Int32: var bytes_tensor = Tensor[DType.uint8]( f.read_bytes(sizeof[Int32]()) ) var result = bytes_tensor.unsafe_ptr().bitcast[ DType.int32 ]().load() _ = bytes_tensor^ return result var dim = read_int32() var hidden_dim = read_int32() var n_layers = read_int32() var n_heads = read_int32() var n_kv_heads = read_int32() var vocab_size = read_int32() if vocab_size < 0: raise "negative vocab size unsupported" <fim_middle> var seq_len = read_int32() self.config = Config( int(dim), int(hidden_dim), int(n_layers), int(n_heads), int(n_kv_heads), int(vocab_size), int(seq_len), head_size=int(dim // n_heads), ) var bytes_tensor = Tensor[DType.uint8](f.read_bytes()) self.weights_ptr = bytes_tensor._steal_ptr() fn __moveinit__(inout self, owned existing: Self): self.config = existing.config self.weights_ptr = existing.weights_ptr fn __del__(owned self): self.weights_ptr.free() fn token_embd[type: DType](self) -> TensorRef[type]: return TensorRef[type]( self.weights_ptr.bitcast[type](), TensorShape(self.config.vocab_size, self.config.dim), ) fn rms_att[type: DType](self) -> TensorRef[type]: return TensorRef( self.token_embd[type]().offset + self.token_embd[type]().nelems(), TensorShape(self.config.dim), ) fn wq[type: DType](self) -> TensorRef[type]: return TensorRef( self.rms_att[type]().offset + (self.config.n_layers * self.rms_att[type]().nelems()), TensorShape( self.config.n_heads * self.config.head_size, self.config.dim ), ) fn wk[type: DType](self) -> TensorRef[type]: return TensorRef( self.wq[type]().offset + (self.config.n_layers * self.wq[type]().nelems()), TensorShape( self.config.n_kv_heads * self.config.head_size, self.config.dim ), ) fn wv[type: DType](self) -> TensorRef[type]: return TensorRef( self.wk[type]().offset + (self.config.n_layers * self.wk[type]().nelems()), TensorShape( self.config.n_kv_heads * self.config.head_size, self.config.dim ), ) fn wo[type: DType](self) -> TensorRef[type]: return TensorRef( self.wv[type]().offset + (self.config.n_layers * self.wv[type]().nelems()), TensorShape( self.config.dim, self.config.n_heads * self.config.head_size ), ) fn rms_ffn[type: DType](self) -> TensorRef[type]: return TensorRef( self.wo[type]().offset + (self.config.n_layers * self.wo[type]().nelems()), TensorShape(self.config.dim), ) fn w1[type: DType](self) -> TensorRef[type]: return TensorRef( self.rms_ffn[type]().offset + (self.config.n_layers * self.rms_ffn[type]().nelems()), TensorShape(self.config.hidden_dim, self.config.dim), ) fn w2[type: DType](self) -> TensorRef[type]: return TensorRef( self.w1[type]().offset + (self.config.n_layers * self.w1[type]().nelems()), TensorShape(self.config.dim, self.config.hidden_dim), ) fn w3[type: DType](self) -> TensorRef[type]: return TensorRef( self.w2[type]().offset + (self.config.n_layers * self.w2[type]().nelems()), TensorShape(self.config.hidden_dim, self.config.dim), ) fn rms_final[type: DType](self) -> TensorRef[type]: return TensorRef( self.w3[type]().offset + (self.config.n_layers * self.w3[type]().nelems()), TensorShape(self.config.dim), ) fn wcls[type: DType](self) -> TensorRef[type]: return TensorRef( self.rms_final[type]().offset + self.rms_final[type]().nelems() # Skip what used to be freq_cis_{real,img} + int(2 * (self.config.seq_len * self.config.head_size // 2)), TensorShape(self.config.vocab_size, self.config.dim), ) fn get[ type: DType ]( inout self, key: String, layer_idx: Optional[Int] = None ) raises -> Tensor[type]: # Heap allocates and copies output, which is owned by the caller. var tensor_ref: TensorRef[type] if key == "token_embd": tensor_ref = self.token_embd[type]() elif key == "output_norm": tensor_ref = self.rms_final[type]() elif key == "output": if self.config.vocab_size > 0: tensor_ref = self.token_embd[type]() else: tensor_ref = self.wcls[type]() elif key == "attn_q": tensor_ref = self.wq[type]() elif key == "attn_k": tensor_ref = self.wk[type]() elif key == "attn_v": tensor_ref = self.wv[type]() elif key == "attn_output": tensor_ref = self.wo[
max/examples/graph-api/pipelines/weights/llama2checkpoint.πŸ”₯
true
<filename>max/examples/graph-api/pipelines/weights/loadable_model.πŸ”₯ <fim_suffix> tensor for `key` at layer `layer_idx`, possibly seeking the file. `self` is `inout` here due to implementations that seek a file pointer. Args: key: Used to look up the tensor in the weights file. layer_idx: An optional layer index for this key. Returns: A tensor corresponding to `key` and `layer_idx` and containing a copy of data from the weights file. Raises: An error for invalid key/layer_idx arguments. """ ... fn hyperparams(self) raises -> LlamaHParams: """Retrieves model-specific hyperparameters. Returns: Hyperparameters for the model corresponding to this weights file. """ ... <fim_prefix># ===----------------------------------------------------------------------=== # # Copyright (c) 2024, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # """A module containing generic weight loading traits and types.""" from pathlib import Path from collections import Optional from tensor import Tensor @value @register_passable("trivial") struct LlamaHParams: """Hyperparameters defining Llama model instances.""" var dims: Int """Channel dimension of input features.""" var n_layers: Int """Number of transformer layers.""" <fim_middle>var n_heads: Int """Number of attention heads.""" var vocab_size: Int """Number of tokens in vocabulary.""" var norm_eps: Float64 """Epsilon used in normalization layers.""" var n_kv_heads: Int """Number of key and value heads.""" var head_dim: Int """Channel dimension of per-head features in attention.""" var n_rep: Int """Number of repetitions for local heads.""" trait LoadableModel(Movable): """A trait for the weights file of an ML model. Types implementing `LoadableModel` are constructible from a `Path`, have a `get` method for tensor data, and know their own model hyperparameters. """ fn __init__(inout self, path: Path) raises: """Initializes the weights file from a path. Args: path: Filepath to the model's weights file. """ ... fn get[ type: DType ]( inout self, key: String, layer_idx: Optional[Int] = None ) raises -> Tensor[type]: """Returns a
max/examples/graph-api/pipelines/weights/loadable_model.πŸ”₯
true
<filename>max/examples/graph-api/pipelines/weights/__init__.πŸ”₯ <fim_suffix># See the License for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # <fim_prefix># ===----------------------------------------------------------------------=== # # Copyright (c) 2024, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY <fim_middle>KIND, either express or implied.
max/examples/graph-api/pipelines/weights/__init__.πŸ”₯
true
<filename>max/examples/inference/bert-mojo-torchscript/simple-inference.πŸ”₯ <fim_suffix>=input_specs ) return model def read_input() -> String: USAGE = ( 'Usage: ./run.mojo <str> \n\t e.g., ./run.mojo "Paris is the [MASK] of' ' France"' ) argv = sys.argv() if len(argv) != 2: raise Error("\nPlease enter a prompt." + "\n" + USAGE) return sys.argv()[1] def main(): # Import HF Transformers dependency (for the tokenizer) transformers = Python.import_module("transformers") # Read user prompt, create an InferenceSession, and load the model text = read_input() session = InferenceSession() model = load_model(session) # Run inference decoded_result = execute(model, text, transformers) print("input text: ", text) print("filled mask: ", text.replace("[MASK]", decoded_result)) <fim_prefix>#!/usr/bin/env mojo # ===----------------------------------------------------------------------=== # # Copyright (c) 2024, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the Li<fim_middle>cense for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # from max.engine import InputSpec, InferenceSession, Model from python import Python from tensor import TensorSpec import sys def execute(model: Model, text: String, transformers: PythonObject) -> String: # The model was compiled with a maximum seqlen, so read that out from the model output metadata output_spec = model.get_model_output_metadata()[0] max_seqlen = output_spec[1].value()[] tokenizer = transformers.AutoTokenizer.from_pretrained("bert-base-uncased") inputs = tokenizer( text=text, add_special_tokens=True, padding="max_length", truncation=True, max_length=max_seqlen, return_tensors="np", ) input_ids = inputs["input_ids"] token_type_ids = inputs["token_type_ids"] attention_mask = inputs["attention_mask"] outputs = model.execute( "input_ids", input_ids, "token_type_ids", token_type_ids, "attention_mask", attention_mask, ) logits = outputs.get[DType.float32]("result0") mask_idx = -1 for i in range(len(input_ids[0])): if input_ids[0][i] == tokenizer.mask_token_id: mask_idx = i predicted_token_id = logits.argmax()[mask_idx] return tokenizer.decode( predicted_token_id, skip_special_tokens=True, clean_up_tokenization_spaces=True, ) def load_model(session: InferenceSession) -> Model: batch = 1 seqlen = 128 input_ids_spec = TensorSpec(DType.int64, batch, seqlen) token_type_ids_spec = TensorSpec(DType.int64, batch, seqlen) attention_mask_spec = TensorSpec(DType.int64, batch, seqlen) input_specs = List[InputSpec]() input_specs.append(input_ids_spec) input_specs.append(attention_mask_spec) input_specs.append(token_type_ids_spec) model = session.load( "../../models/bert-mlm.torchscript", input_specs
max/examples/inference/bert-mojo-torchscript/simple-inference.πŸ”₯
true
<fim_suffix>0] ) @always_inline fn memcpy_to_numpy[ type: DType ](array: PythonObject, tensor: Tensor[type]) raises: var dst = numpy_data_pointer[type](array) var src = tensor._ptr var length = tensor.num_elements() memcpy(dst, src, length) @always_inline fn memcpy_from_numpy[ type: DType ](array: PythonObject, tensor: Tensor[type]) raises: var src = numpy_data_pointer[type](array) var dst = tensor._ptr var length = tensor.num_elements() memcpy(dst, src, length) @always_inline fn shape_to_python_list(shape: TensorShape) raises -> PythonObject: var python_list = python.Python.evaluate("list()") for i in range(shape.rank()): _ = python_list.append(shape[i]) return python_list^ @always_inline fn get_np_dtype[type: DType](np: PythonObject) raises -> PythonObject: @parameter if type.is_float32(): return np.float32 elif type.is_int32(): return np.int32 elif type.is_int64(): return np.int64 elif type.is_uint8(): return np.uint8 raise "Unknown datatype" @always_inline fn tensor_to_numpy[ type: DType ](tensor: Tensor[type], np: PythonObject) raises -> PythonObject: var shape = shape_to_python_list(tensor.shape()) var tensor_as_numpy = np.zeros(shape, get_np_dtype[type](np)) _ = shape^ memcpy_to_numpy(tensor_as_numpy, tensor) return tensor_as_numpy^ @always_inline fn numpy_to_tensor[type: DType](array: PythonObject) raises -> Tensor[type]: var shape = List[Int]() var array_shape = array.shape for dim in array_shape: shape.append(dim) var out = Tensor[type](shape) memcpy_from_numpy(array, out) return out^ <fim_prefix># ===----------------------------------------------------------------------=== # # Copyright (c) 2024, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT <fim_middle>WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # import python from memory import memcpy from collections import List from tensor import Tensor, TensorShape @always_inline fn numpy_data_pointer[ type: DType ](numpy_array: PythonObject) raises -> DTypePointer[type]: return DTypePointer[type]( address=numpy_array.__array_interface__["data"][
max/examples/inference/stable-diffusion-mojo-onnx/python_utils.πŸ”₯
true
<filename>max/examples/inference/stable-diffusion-mojo-onnx/scheduler.πŸ”₯ <fim_suffix>end(cumprod) def step( inout self, borrowed model_output: FloatTensor, timestep: Int, borrowed sample: FloatTensor, ) -> FloatTensor: var previous_timestep = timestep - self.step_ratio if self.counter == 1: # Special case: the second timestep is repeated. previous_timestep = timestep timestep = timestep + self.step_ratio else: # TODO: clear out old tensors. # For some reason, doing this leads to a crash. # self.previous_outputs = self.previous_outputs[-3:] self.previous_outputs.append(FloatTensor(model_output)) averaged_output = FloatTensor(model_output) if len(self.previous_outputs) == 1 and self.counter == 0: # First input, there is no data to average. pass elif len(self.previous_outputs) == 1 and self.counter == 1: averaged_output = ( averaged_output + self.previous_outputs[-1] ) / 2.0 elif len(self.previous_outputs) == 2: averaged_output = ( 3 * self.previous_outputs[-1] - self.previous_outputs[-2] ) / 2.0 elif len(self.previous_outputs) == 3: averaged_output = ( 23 * self.previous_outputs[-1] - 16 * self.previous_outputs[-2] + 5 * self.previous_outputs[-3] ) / 12.0 else: averaged_output = (1.0 / 24.0) * ( 55 * self.previous_outputs[-1] - 59 * self.previous_outputs[-2] + 37 * self.previous_outputs[-3] - 9 * self.previous_outputs[-4] ) previous_sample = self._get_previous_sample( sample, timestep, previous_timestep, averaged_output ) self.counter += 1 return previous_sample def _get_previous_sample( inout self, borrowed sample: FloatTensor, timestep: Int, previous_timestep: Int, borrowed model_output: FloatTensor, ) -> FloatTensor: alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_previous = ( self.alphas_cumprod[previous_timestep] if previous_timestep >= 0 else self.alphas_cumprod[0] ) beta_prod_t = 1 - alpha_prod_t beta_prod_t_previous = 1 - alpha_prod_t_previous sample_coeff = (alpha_prod_t_previous / alpha_prod_t) ** (0.5) model_output_denom_coeff = alpha_prod_t * beta_prod_t_previous ** ( 0.5 ) + (alpha_prod_t * beta_prod_t * alpha_prod_t_previous) ** (0.5) previous_sample = ( sample_coeff * sample - (alpha_prod_t_previous - alpha_prod_t) * model_output / model_output_denom_coeff ) return previous_sample <fim_prefix># ===----------------------------------------------------------------------=== # # Copyright (c) 2024, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # This code is directly ported from the hugging face diffusers library. # # Copyright 2024 Zhejiang University Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # """A simple helper implementation of a pndm schedular for stable diffusion """ from tensor import Tensor alias FloatTensor = Tensor[DType.float32] struct Scheduler: alias training_steps = 1000 alias init_noise_sigma = 1.0 alias beta_start = 0.00085 alias beta_end = 0.012 var counter: Int var step_ratio: Int var timesteps: List[Int] var alphas_cumprod: List[Float32] var previous_outputs: List[FloatTensor] def __init__(inout self, inference_steps: Int): self.counter = 0 self.previous_outputs = List[FloatTensor]() # Generate timesteps. self.step_ratio = Self.training_steps // inference_steps self.timesteps = List[Int]() for i in range(1, Self.training_steps, self.step_ratio): self.timesteps.append(i) self.timesteps.reverse() # Duplicate the second timestep because this scheduler skips the prk steps. if len(self.timesteps) >= 2: self.timesteps.insert(1, self.timesteps[1]) # Generate alpha cumulative product. self.alphas_cumprod = List[Float32]() var start = Self.beta_start**0.5 var end = Self.beta_end**0.5 var step = (end - start) / Self.training_steps var cumprod = 1.0 for i in range(Self.training_steps): var beta = start + (end - start) * (i / Self.training_steps) var alpha = 1.0 - (beta**2) <fim_middle> cumprod *= alpha self.alphas_cumprod.app
max/examples/inference/stable-diffusion-mojo-onnx/scheduler.πŸ”₯
true
<fim_suffix> a & b along the outermost dimension.""" # Generate return shape. out_shape = List[Int]() out_shape.append(a.shape()[0] + b.shape()[0]) for i in range(1, a.shape().rank()): out_shape.append(a.shape()[i]) # Allocate return tensor. out = Tensor[dtype](TensorShape(out_shape)) # Fill data. memcpy(out.unsafe_ptr(), a.unsafe_ptr(), a.num_elements()) memcpy( out.unsafe_ptr() + a.num_elements(), b.unsafe_ptr(), b.num_elements() ) return out def split[dtype: DType](borrowed x: Tensor[dtype], i: Int) -> Tensor[dtype]: """Return the ith slice of the outermost dim; i.e., x[idx, :, :, ..., :].""" # Generate return shape shape = List[Int]() shape.append(1) for i in range(1, x.shape().rank()): shape.append(x.shape()[i]) # Allocate return tensor ret = Tensor[dtype](TensorShape(shape)) # Fill data memcpy( ret.unsafe_ptr(), x.unsafe_ptr() + (x.num_elements() // 2) * i, x.num_elements() // 2, ) return ret def main(): # Parse args. USAGE = ( "Usage: ./text-to-image.πŸ”₯ --prompt <str> [--negative-prompt <str>]" " [--num-steps <int>] [--seed <int>] [-o <str>]" ) argv = sys.argv() if len(argv) % 2 == 0: print(USAGE) raise Error("All options require an argument") # Set default values seed() prompt = str("") negative_prompt = str("") num_steps = 25 model_dir = str(DEFAULT_MODEL_DIR) output = str("output.png") for i in range(1, len(argv), 2): if argv[i] == "--prompt": prompt = argv[i + 1] elif argv[i] == "--negative-prompt": negative_prompt = argv[i + 1] elif argv[i] == "--num-steps": num_steps = atol(argv[i + 1]) elif argv[i] == "--seed": seed(atol(argv[i + 1])) elif argv[i] == "--model-dir": model_dir = argv[i + 1] elif argv[i] == "-o" or argv[i] == "--output": output = argv[i + 1] else: print(USAGE) raise Error(str("Unknown option: ") + argv[i]) # Only required arg is --prompt if prompt == "": print(USAGE) raise Error("--prompt option is required") # Import python modules. np = Python.import_module("numpy") Image = Python.import_module("PIL.Image") transformers = Python.import_module("transformers") # Compile & load models - this may take a few minutes. print("Loading and compiling models...") session = InferenceSession() txt_encoder = session.load(model_dir + "/text_encoder/model.onnx") img_decoder = session.load(model_dir + "/vae_decoder/model.onnx") img_diffuser = session.load(model_dir + "/unet/model.onnx") print("Models compiled.\n") # Tokenize inputs and run through text encoder. print("Processing input...") tokenizer = transformers.CLIPTokenizer.from_pretrained( model_dir + "/tokenizer" ) max_length = int(tokenizer.model_max_length) prompt_p = tokenizer(prompt, padding="max_length", max_length=max_length) prompt_n = tokenizer( negative_prompt, padding="max_length", max_length=max_length ) prompt_p_tensor = python_utils.numpy_to_tensor[DType.int32]( np.array(prompt_p["input_ids"], dtype=np.int32) ) prompt_n_tensor = python_utils.numpy_to_tensor[DType.int32]( np.array(prompt_n["input_ids"], dtype=np.int32) ) input_ids = vstack[DType.int32]( prompt_p_tensor.reshape(TensorShape(1, max_length)), prompt_n_tensor.reshape(TensorShape(1, max_length)), ) encoder_output = txt_encoder.execute("input_ids", input_ids) encoder_hidden_states = encoder_output.get[DType.float32]( "last_hidden_state" ) print("Input processed.\n") # Initialize latent. print("Initializing latent...") schedule = Scheduler(num_steps) # Note: For onnx, shapes are given in NCHW format. latent = ( Tensor[DType.float32].randn( TensorShape(1, LATENT_CHANNELS, LATENT_HEIGHT, LATENT_WIDTH) ) * Scheduler.init_noise_sigma ) # Loop through diffusion model. for i in range(len(schedule.timesteps)): print("\rGenerating image:", i, "/", num_steps, end="") # Duplicate latent to create full sample. sample = vstack(latent, latent) # Execute the diffusion model with bs=2. Both batches have same primary input and # timestep, but the encoder_hidden_states (primary prompt vs negative) differs. timestep_tensor = Tensor[DType.int64](TensorSpec(DType.int64, 1)) timestep_tensor[0] = int(schedule.timesteps[i]) diffuser_output = img_diffuser.execute( "sample", sample, "encoder_hidden_states", encoder_hidden_states, "timestep", timestep_tensor, ) noise_pred = diffuser_output.get[DType.float32]("out_sample") # Merge conditioned & unconditioned outputs. noise_pred_text = split[DType.float32](noise_pred, 0) noise_pred_uncond = split[DType.float32](noise_pred, 1) noise_pred = noise_pred_uncond + GUIDANCE_SCALE_FACTOR * ( noise_pred_text - noise_pred_uncond ) # Merge latent with previous iteration. latent = schedule.step(noise_pred, schedule.timesteps[i], latent) # Decode finalized latent. print("\n\nDecoding image...") latent = latent * (1 / LATENT_SCALE_FACTOR) decoder_output = img_decoder.execute("latent_sample", latent) decoded = decoder_output.get[DType.float32]("sample") image = ((decoded / 2.0 + 0.5).clip(0, 1) * 255.0).astype[DType.uint8]() np_pixels = python_utils.tensor_to_numpy[DType.uint8](image, np) np_pixels = np_pixels.squeeze().transpose(1, 2, 0) Image.fromarray(np_pixels, "RGB").save(output) print("Image saved to " + output + ".") <fim_prefix>#!/usr/bin/env mojo # ===--------------------------------------<fim_middle>--------------------------------=== # # Copyright (c) 2024, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # import sys import random from collections import List from python import Python from random import seed from max.engine import InferenceSession from tensor import Tensor, TensorShape, TensorSpec # Local Mojo imports import python_utils from scheduler import Scheduler var DEFAULT_MODEL_DIR = "../../models/stable-diffusion-onnx" var DESCRIPTION = "Generate an image based on the given prompt." var GUIDANCE_SCALE_FACTOR = 7.5 var LATENT_SCALE_FACTOR = 0.18215 var OUTPUT_HEIGHT = 512 var OUTPUT_WIDTH = 512 var LATENT_WIDTH = OUTPUT_WIDTH // 8 var LATENT_HEIGHT = OUTPUT_HEIGHT // 8 var LATENT_CHANNELS = 4 def vstack[ dtype: DType ](borrowed a: Tensor[dtype], borrowed b: Tensor[dtype]) -> Tensor[dtype]: """Concatenate tensors
max/examples/inference/stable-diffusion-mojo-onnx/text-to-image.πŸ”₯
true
<filename>mojo/examples/hello.πŸ”₯ <fim_suffix>): print(x) <fim_prefix># ===----------------------------------------------------------------------=== # # Copyright (c) 2023, Modular Inc. All rights reserved. # # Licensed under the Apache License v2.0 with<fim_middle> LLVM Exceptions: # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===----------------------------------------------------------------------=== # # RUN: %mojo %s | FileCheck %s # This sample demonstrates some basic Mojo # Range and print functions available as builtins def main(): # CHECK: Hello Mojo πŸ”₯! print("Hello Mojo πŸ”₯!") for x in range(9, 0, -3
mojo/examples/hello.πŸ”₯
true