content
stringlengths
32
91.6k
path
stringlengths
14
91
fimified
bool
2 classes
<filename>arrow.mojo/arrow/util.mojo """ Arrow buffers are recommended to have an alignment and padding of 64 bytes https://arrow.apache.org/docs/format/Columnar.html#buffer-alignment-and-padding. """ alias PADDING = 64 alias ALIGNMENT = 64 fn get_num_bytes_with_padding(num_bytes: Int) -> Int: return ((num_bytes + PADDING - 1) // PADDING) * PADDING
arrow.mojo/arrow/util.mojo
false
from arrow.physical_layout.arrow import ( ArrowFixedWidthVector, ArrowIntVector, ) from arrow.array.bool_array import ArrowBooleanArray from arrow.buffer.bitmap import Bitmap from arrow.buffer.offset import OffsetBuffer32, OffsetBuffer64 from arrow.physical_layout.varbinary import ArrowStringVector
arrow.mojo/arrow/__init__.mojo
false
from arrow.buffer.bitmap import Bitmap struct ArrowBooleanArray: var length: Int var null_count: Int var _validity: Bitmap var _buffer: Bitmap var mem_used: Int fn __init__(inout self, values: List[Bool]): self.length = len(values) self.null_count = 0 self._validity = Bitmap(List(True) * len(values)) self._buffer = Bitmap(values) self.mem_used = self._validity.mem_used + self._buffer.mem_used fn __init__(inout self, length: Int): self.length = length self.null_count = 0 self._validity = Bitmap(List(True) * length) self._buffer = Bitmap(length) self.mem_used = self._validity.mem_used + self._buffer.mem_used fn __init__(inout self, values: List[Optional[Bool]]): self.length = len(values) self.null_count = 0 var validity_list = List[Bool](capacity=len(values)) var value_list = List[Bool](capacity=len(values)) for i in range(len(values)): if values[i] is None: validity_list.append(False) self.null_count += 1 else: validity_list.append(True) value_list.append(values[i]) self._validity = Bitmap(validity_list) self._buffer = Bitmap(value_list) self.mem_used = self._validity.mem_used + self._buffer.mem_used fn __len__(self) -> Int: return self.length fn __getitem__(self, index: Int) raises -> Optional[Bool]: if index < 0 or index >= self.length: raise Error("index out of range for ArrowBoolVector") if self._validity._unsafe_getitem(index): return self._buffer._unsafe_getitem(index) return None
arrow.mojo/arrow/array/bool_array.mojo
false
<filename>arrow.mojo/arrow/buffer/binary.mojo from arrow.util import ALIGNMENT, get_num_bytes_with_padding @value struct BinaryBuffer: alias _ptr_type = DTypePointer[DType.uint8] var _buffer: Self._ptr_type var length: Int var mem_used: Int fn __init__(inout self, length_unpadded: Int): self.length = length_unpadded self.mem_used = get_num_bytes_with_padding(length_unpadded) self._buffer = Self._ptr_type.alloc(self.mem_used, alignment=ALIGNMENT) memset_zero(self._buffer, self.mem_used) fn __init__(inout self, values: List[UInt8]): self = Self(len(values)) self._unsafe_set_sequence(0, values) @always_inline fn _unsafe_setitem(self, index: Int, value: UInt8): self._buffer[index] = value fn __setitem__(self, index: Int, value: UInt8) raises: if index < 0 or index >= self.length: raise Error("index out of range for BinaryBuffer") self._unsafe_setitem(index, value) @always_inline fn _unsafe_getitem(self, index: Int) -> UInt8: return self._buffer[index] fn __getitem__(self, index: Int) raises -> UInt8: if index < 0 or index >= self.length: raise Error("index out of range for BinaryBuffer") return self._unsafe_getitem(index) fn _unsafe_set_sequence(self, start: Int, values: List[UInt8]): for i in range(len(values)): self._unsafe_setitem(start + i, values[i]) fn set_sequence(self, start: Int, values: List[UInt8]) raises: if start < 0 or start + len(values) > self.length: raise Error("index out of range for BinaryBuffer") self._unsafe_set_sequence(start, values) fn _unsafe_get_sequence(self, start: Int, length: Int) -> List[UInt8]: var values = List[UInt8](capacity=length) for i in range(length): values.append(self._unsafe_getitem(start + i)) return values fn get_sequence(self, start: Int, length: Int) raises -> List[UInt8]: if start < 0 or start + length > self.length: raise Error("index out of range for BinaryBuffer") return self._unsafe_get_sequence(start, length) fn __len__(self) -> Int: return self.length # Lifecycle methods fn __moveinit__(inout self, owned existing: BinaryBuffer): self._buffer = existing._buffer self.length = existing.length self.mem_used = existing.mem_used fn __copyinit__(inout self, existing: BinaryBuffer): self.length = existing.length self.mem_used = existing.mem_used self._buffer = Self._ptr_type.alloc(self.mem_used, alignment=ALIGNMENT) for i in range(self.mem_used): self._buffer[i] = existing._buffer[i] fn __del__(owned self): self._buffer.free()
arrow.mojo/arrow/buffer/binary.mojo
false
from memory.unsafe import Pointer from memory import memset_zero from arrow.util import PADDING, ALIGNMENT, get_num_bytes_with_padding struct Bitmap(StringableRaising): """Bitmap according to the Apache Arrow specification which can found here. Source: https://arrow.apache.org/docs/format/Columnar.html#validity-bitmaps The source provides this pseudo code: ``` is_valid[j] -> bitmap[j / 8] & (1 << (j % 8)) ``` And the following explanation: > We use least-significant bit (LSB) numbering (also known as bit-endianness). This means that within a group of 8 bits, we read right-to-left: ``` values = [0, 1, null, 2, null, 3] bitmap j mod 8 7 6 5 4 3 2 1 0 0 0 1 0 1 0 1 1 ``` """ alias _ptr_type = DTypePointer[DType.uint8] var _buffer: Self._ptr_type var length: Int var mem_used: Int # TODO maybe buffers shouldn't have length and mem_used, just size. # The layouts that use the buffers can keep track of their length. fn __init__(inout self, length_unpadded: Int): """Creates a new Bitmap that supports at least `length_unpadded` elements. Args: length_unpadded: The number of elements the Bitmap should support. Buffers are typically padded to 32, 64, or 128 bytes but it depends on the architecture. """ var num_bytes = (length_unpadded + 7) // 8 var num_bytes_with_padding = get_num_bytes_with_padding(num_bytes) self._buffer = Self._ptr_type.alloc( num_bytes_with_padding, alignment=ALIGNMENT ) memset_zero(self._buffer, num_bytes_with_padding) self.length = length_unpadded self.mem_used = num_bytes_with_padding fn __init__(inout self, bools: List[Bool]): self = Self(len(bools)) for i in range(len(bools)): self._unsafe_setitem(i, bools[i]) fn _unsafe_setitem(self, index: Int, value: Bool): """Doesn't check if index is out of bounds. Only works if memory is true, doesn't work if memory is 1 and value is False """ var byte_index = index // 8 var bitmask = UInt8(value.__int__()) << (index % 8) var new_byte = self._buffer[ byte_index ] | bitmask # only works if memory is 0 self._buffer[byte_index] = new_byte @always_inline fn _unsafe_getitem(self, index: Int) -> Bool: """Doesn't check if index is out of bounds. Follows this pseudo code from the Apache Arrow specification `is_valid[j] -> bitmap[j / 8] & (1 << (j % 8))` """ var byte_index = index // 8 var bitmask: UInt8 = 1 << (index % 8) return ((self._buffer[byte_index] & bitmask)).__bool__() fn __getitem__(self, index: Int) raises -> Bool: if index < 0 or index >= self.length: raise Error("index out of range for Bitmap") return self._unsafe_getitem(index) fn __len__(self) -> Int: return self.length fn __del__(owned self): self._buffer.free() fn __moveinit__(inout self, owned existing: Bitmap): self._buffer = existing._buffer self.length = existing.length self.mem_used = existing.mem_used fn __copyinit__(inout self, existing: Bitmap): self._buffer = Self._ptr_type.alloc( existing.mem_used, alignment=ALIGNMENT ) for i in range(existing.mem_used): self._buffer[i] = existing._buffer[i] self.length = existing.length self.mem_used = existing.mem_used fn __str__(self) raises -> String: var output: String = "[" for i in range(self.length): output = output + self[i].__str__() if i < self.length - 1: output = output + ", " return output + "]" fn to_list(self) raises -> List[Bool]: var bools = List[Bool](capacity=self.length) for i in range(self.length): bools.append(self[i]) return bools
arrow.mojo/arrow/buffer/bitmap.mojo
false
<filename>arrow.mojo/arrow/buffer/dtype.mojo from arrow.util import ALIGNMENT, get_num_bytes_with_padding struct DTypeBuffer[type: DType]: alias _ptr_type = DTypePointer[type] alias element_type = Scalar[type] alias element_byte_width = sizeof[Self.element_type]() var _buffer: Self._ptr_type var length: Int var mem_used: Int fn __init__(inout self, length: Int): self.length = length var num_bytes = self.length * Self.element_byte_width self.mem_used = get_num_bytes_with_padding(num_bytes) var alloc_count = self.mem_used // Self.element_byte_width self._buffer = Self._ptr_type.alloc(alloc_count, alignment=ALIGNMENT) memset_zero(self._buffer, alloc_count) fn __init__(inout self, values: List[Int]): self = Self(len(values)) for i in range(len(values)): self._unsafe_setitem(i, values[i]) @always_inline fn _unsafe_getitem(self, index: Int) -> Self.element_type: return self._buffer[index] fn __getitem__(self, index: Int) raises -> Self.element_type: if index < 0 or index >= self.length: raise Error("index out of range for DTypeBuffer") return self._unsafe_getitem(index) @always_inline fn _unsafe_setitem(self, index: Int, value: Self.element_type): self._buffer[index] = value fn __setitem__(self, index: Int, value: Self.element_type) raises: if index < 0 or index >= self.length: raise Error("index out of range for DTypeBuffer") self._unsafe_setitem(index, value) fn __len__(self) -> Int: return self.length fn __moveinit__(inout self, owned existing: Self): self._buffer = existing._buffer self.length = existing.length self.mem_used = existing.mem_used fn __copyinit__(inout self, existing: Self): self.length = existing.length self.mem_used = existing.mem_used self._buffer = Self._ptr_type.alloc(self.mem_used, alignment=ALIGNMENT) for i in range(self.mem_used): self._buffer[i] = existing._buffer[i] fn __del__(owned self): self._buffer.free()
arrow.mojo/arrow/buffer/dtype.mojo
false
<filename>arrow.mojo/arrow/buffer/offset.mojo from arrow.util import ALIGNMENT, get_num_bytes_with_padding from arrow.buffer.dtype import DTypeBuffer alias OffsetBuffer32 = DTypeBuffer[DType.int32] alias OffsetBuffer64 = DTypeBuffer[DType.int64]
arrow.mojo/arrow/buffer/offset.mojo
false
<filename>arrow.mojo/arrow/buffer/__init__.mojo from arrow.buffer.binary import BinaryBuffer from arrow.buffer.bitmap import Bitmap from arrow.buffer.offset import OffsetBuffer32, OffsetBuffer64 from arrow.buffer.dtype import DTypeBuffer
arrow.mojo/arrow/buffer/__init__.mojo
false
<filename>arrow.mojo/arrow/c_data_interface/c_data_interface.mojo alias ARROW_FLAG_DICTIONARY_ORDERED = 1 alias ARROW_FLAG_NULLABLE = 2 alias ARROW_FLAG_MAP_KEYS_SORTED = 4 # @value # struct ArrowSchema: # var format: String # var name: String # var metadata: String # var flags: Int64 # var n_children: Int64 # var children: List[Pointer[Self]] # var dictionary: Pointer[Self] # var release: Pointer[fn (Pointer[Self]) -> None] # var private_data: Pointer[UInt8] # @value # struct ArrowArray: # var length: Int64 # var null_count: Int64 # var offset: Int64 # var n_buffers: Int64 # var n_children: Int64 # var buffers: List[Pointer[UInt8]] # var children: List[Pointer[Self]] # var dictionary: Pointer[Self] # var release: Pointer[fn (Pointer[Self]) -> None] # var private_data: Pointer[UInt8]
arrow.mojo/arrow/c_data_interface/c_data_interface.mojo
false
from memory.unsafe import Pointer from memory import memset_zero from arrow.util import ALIGNMENT, get_num_bytes_with_padding from arrow.buffer.bitmap import Bitmap from arrow.buffer.offset import OffsetBuffer64 struct ArrowFixedWidthVector[T: AnyTrivialRegType]: # TODO: support null values var length: Int var null_count: Int var validity: Bitmap var value: Pointer[UInt8] var view: Pointer[T] var mem_use: Int fn __init__(inout self, values: List[T]): var byte_width = sizeof[T]() var num_bytes = len(values) * byte_width var num_bytes_with_padding = get_num_bytes_with_padding(num_bytes) var ui8_ptr = Pointer[UInt8].alloc( num_bytes_with_padding, alignment=ALIGNMENT ) memset_zero(ui8_ptr, num_bytes_with_padding) var ptr = ui8_ptr.bitcast[T]() var validity_list = List[Bool](len(values)) for i in range(values.size): validity_list.append(True) var val = values[i] ptr.store(i, val) self.value = ui8_ptr self.validity = Bitmap(validity_list) self.null_count = 0 self.view = ptr self.length = len(values) self.mem_use = num_bytes_with_padding fn __getitem__(self, index: Int) raises -> T: if index < 0 or index >= self.length: raise Error("index out of range for ArrowFixedWidthVector") return self.view.load(index) fn __len__(self) -> Int: return self.length fn __del__(owned self): self.value.free() struct ArrowIntVector: """ Temporary solution until we can create ArrowFixedWidthVector[Int] Depends on https://github.com/modularml/mojo/issues/2956 to be fixed. """ var length: Int var null_count: Int var validity: Bitmap var value_buffer: OffsetBuffer64 var mem_used: Int fn __init__(inout self, values: List[Int]): self.length = len(values) self.value_buffer = OffsetBuffer64(values) var validity_list = List[Bool](capacity=len(values)) for i in range(values.size): validity_list.append(True) var val = values[i] self.value_buffer._unsafe_setitem(i, val) self.validity = Bitmap(validity_list) self.null_count = 0 self.mem_used = self.value_buffer.mem_used + self.validity.mem_used fn __getitem__(self, index: Int) raises -> Int64: return self.value_buffer[index] fn __len__(self) -> Int: return self.length
arrow.mojo/arrow/physical_layout/arrow.mojo
false
<filename>arrow.mojo/arrow/physical_layout/varbinary.mojo from arrow.util import ALIGNMENT, get_num_bytes_with_padding from arrow.arrow import Bitmap from arrow.buffer import BinaryBuffer, OffsetBuffer32, OffsetBuffer64 struct ArrowStringVector: var length: Int var null_count: Int var validity: Bitmap var offsets: OffsetBuffer64 var value_buffer: BinaryBuffer var mem_used: Int fn __init__(inout self, values: List[String]): var validity_list = List[Bool](capacity=len(values)) var offset_list = List[Int](capacity=len(values) + 1) # Calculate the size of the buffer and allocate it var buffer_size = 0 for i in range(len(values)): buffer_size += values[i]._buffer.size self.value_buffer = BinaryBuffer(buffer_size) offset_list.append(0) var offset_cursor = 0 for i in range(len(values)): validity_list.append(True) var bytes = values[i].as_bytes() self.value_buffer._unsafe_set_sequence(offset_cursor, bytes) offset_cursor += len(bytes) offset_list.append(offset_cursor) self.length = len(values) self.null_count = 0 self.validity = Bitmap(validity_list) self.offsets = OffsetBuffer64(offset_list) self.mem_used = self.value_buffer.mem_used + self.offsets.mem_used fn __getitem__(self, index: Int) raises -> String: if index < 0 or index >= self.length: raise Error("index out of range for ArrowStringVector") var start = self.offsets[index] var length = self.offsets[index + 1] - start var bytes = self.value_buffer._unsafe_get_sequence( rebind[Int](start), rebind[Int](length) ) bytes.extend( List(UInt8(0)) ) # TODO: null terminate string without copying return String(bytes) fn __len__(self) -> Int: return self.length
arrow.mojo/arrow/physical_layout/varbinary.mojo
false
<filename>arrow.mojo/arrow/physical_layout/varlist.mojo from arrow.util import ALIGNMENT, get_num_bytes_with_padding from arrow.arrow import Bitmap from arrow.buffer import DTypeBuffer, OffsetBuffer32, OffsetBuffer64 struct VariableSizedList[type: DType]: alias element_type = Scalar[type] alias element_byte_width = sizeof[Self.element_type]() var length: Int var null_count: Int var validity: Bitmap var offsets: OffsetBuffer64 var value_buffer: DTypeBuffer[type] var mem_used: Int fn __init__(inout self, values: List[List[Self.element_type]]) raises: self.length = len(values) var validity_list = List[Bool](capacity=len(values)) var offset_list = List[Int](capacity=len(values) + 1) # Calculate the size of the buffer and allocate it var buffer_size = 0 for i in range(len(values)): buffer_size += len(values[i]) self.value_buffer = DTypeBuffer[type](buffer_size) offset_list.append(0) var offset_cursor: Int = 0 for i in range(len(values)): # TODO: support nulls validity_list.append(True) var data_list = values[i] for value in data_list: self.value_buffer[offset_cursor] = value[] offset_cursor += 1 offset_list.append(offset_cursor) self.null_count = 0 self.validity = Bitmap(validity_list) self.offsets = OffsetBuffer64(offset_list) self.mem_used = self.value_buffer.mem_used + self.offsets.mem_used fn __getitem__(self, index: Int) raises -> List[Self.element_type]: if index < 0 or index >= self.length: # TODO: Sprintf the index into the error raise Error("index out of range for ArrowVariableSizedList") var ret = List[Self.element_type]() var start: Int = int(self.offsets[index]) var length: Int = int(self.offsets[index + 1] - start) for i in range(length): ret.append(self.value_buffer[start + i]) return ret fn __len__(self) -> Int: return self.length
arrow.mojo/arrow/physical_layout/varlist.mojo
false
from testing import assert_equal from arrow.array.bool_array import ArrowBooleanArray def test_ArrowBooleanArray(): var bools = List[Optional[Bool]](True, None, False) var arr = ArrowBooleanArray(bools) for i in range(len(arr)): if arr[i] is None: print("None") else: print(arr[i].or_else(False)) assert_equal(arr.length, 3) assert_equal(arr.null_count, 1) assert_equal(arr.mem_used, 128)
arrow.mojo/test/array/test_bool_array.mojo
false
from testing import assert_true from arrow.buffer.binary import BinaryBuffer def list_equality(list1: List[UInt8], list2: List[UInt8]) -> Bool: if list1.size != list2.size: return False for i in range(list1.size): if list1[i] != list2[i]: return False return True def test_BinaryBuffer(): var test_case = List(UInt8(0), UInt8(1), UInt8(2), UInt8(3)) var buffer = BinaryBuffer(test_case) var list_from_buffer = buffer.get_sequence(0, len(test_case)) assert_true(list_equality(test_case, list_from_buffer)) assert_true(buffer.length == len(test_case)) assert_true(buffer.mem_used == 64) def test_BinaryBuffer_2(): var test_case = List(UInt8(0), UInt8(1), UInt8(31985)) var buffer = BinaryBuffer(test_case) var list_from_buffer = buffer.get_sequence(0, len(test_case)) assert_true(list_equality(test_case, list_from_buffer)) assert_true(buffer.length == len(test_case)) assert_true(buffer.mem_used == 64)
arrow.mojo/test/buffer/test_binary.mojo
false
from arrow import Bitmap from testing import assert_equal def check_if_works(bool_list: List[Bool]) -> Bitmap: var bitmap = Bitmap(bool_list) var list_from_bitmap = bitmap.to_list() for i in range(bool_list.size): assert_equal(bool_list[i], list_from_bitmap[i]) return bitmap def test_Bitmap_0(): var test_case = List(False) var bitmap = check_if_works(test_case) assert_equal(bitmap.length, 1) assert_equal(bitmap.mem_used, 64) def test_Bitmap_1(): var test_case = List(True) var bitmap = check_if_works(test_case) assert_equal(bitmap.length, 1) assert_equal(bitmap.mem_used, 64) def test_Bitmap_2(): var test_case = List(False, False) var bitmap = check_if_works(test_case) assert_equal(bitmap.length, 2) assert_equal(bitmap.mem_used, 64) def test_Bitmap_3(): var test_case = List(False, True) var bitmap = check_if_works(test_case) assert_equal(bitmap.length, 2) assert_equal(bitmap.mem_used, 64) def test_Bitmap_4(): var test_case = List(True, False) var bitmap = check_if_works(test_case) assert_equal(bitmap.length, 2) assert_equal(bitmap.mem_used, 64) def test_Bitmap_5(): var test_case = List(False, True) var bitmap = check_if_works(test_case) assert_equal(bitmap.length, 2) assert_equal(bitmap.mem_used, 64) def main(): test_Bitmap_0() test_Bitmap_1() test_Bitmap_2() test_Bitmap_3() test_Bitmap_4() test_Bitmap_5()
arrow.mojo/test/buffer/test_bitmap.mojo
false
<filename>arrow.mojo/test/physical_layout/test_arrow.mojo from arrow import ArrowIntVector from testing import assert_equal def test_ArrowIntVector(): var ints = List[Int]() ints.append(-11) ints.append(2) ints.append(4) ints.append(7643) ints.append(69) var int_arrow_buf = ArrowIntVector(ints) assert_equal(int_arrow_buf[0], -11) assert_equal(int_arrow_buf[1], 2) assert_equal(int_arrow_buf[2], 4) assert_equal(int_arrow_buf[3], 7643) assert_equal(int_arrow_buf[4], 69) assert_equal(len(int_arrow_buf), 5) assert_equal(int_arrow_buf.mem_used, 128) assert_equal(int_arrow_buf.value_buffer.mem_used, 64) def main(): test_ArrowIntVector()
arrow.mojo/test/physical_layout/test_arrow.mojo
false
from testing import assert_equal from arrow.physical_layout.varbinary import ArrowStringVector def test_string_vector(): var strings = List[String]() strings.append("hello") strings.append("world") strings.append("this") strings.append("is") strings.append("a") strings.append("test") strings.append("of") strings.append("strings") var string_vec = ArrowStringVector(strings) assert_equal(string_vec[0], "hello") assert_equal(string_vec[1], "world") assert_equal(string_vec[2], "this") assert_equal(string_vec[3], "is") assert_equal(string_vec[4], "a") assert_equal(string_vec[5], "test") assert_equal(string_vec[6], "of") assert_equal(string_vec[7], "strings") def main(): test_string_vector()
arrow.mojo/test/physical_layout/test_varbinary.mojo
false
from testing import assert_equal from arrow.physical_layout.varlist import VariableSizedList def test_var_list(): var list_of_lists = List[List[Int64]]( List[Int64](1, 2, 3), List[Int64](4, 5), List[Int64](), List[Int64](7, 8), ) var var_list = VariableSizedList(list_of_lists) assert_equal(var_list[0][0], 1) assert_equal(var_list[0][1], 2) assert_equal(var_list[0][2], 3) assert_equal(var_list[1][0], 4) assert_equal(var_list[1][1], 5) assert_equal(len(var_list[2]), 0) assert_equal(var_list[3][0], 7) assert_equal(var_list[3][1], 8) def main(): test_var_list()
arrow.mojo/test/physical_layout/test_varlist.mojo
false
<filename>basalt/basalt/__init__.mojo from .autograd import Graph, Symbol, OP from .nn import Tensor, TensorShape from basalt.utils.collection import Collection alias dtype = DType.float32 alias nelts = 2 * simdwidthof[dtype]() alias seed = 42 alias epsilon = 1e-12
basalt/basalt/__init__.mojo
false
from collections import Optional, OptionalReg from basalt.nn.tensor import Tensor, TensorShape, MAX_RANK from basalt.utils.bytes import Bytes, scalar_to_bytes, bytes_to_scalar alias MAX_ATTRS = 10 alias MAX_NAME_CHARS = 16 alias MAX_DATA_BYTES = 32 @register_passable("trivial") struct AttributeType(Stringable): alias BOOL = AttributeType(0, "BOOL") alias INT = AttributeType(1, "INT") alias FLOAT = AttributeType(2, "FLOAT") alias STRING = AttributeType(3, "STRING") alias INTS = AttributeType(4, "INTS") alias FLOATS = AttributeType(5, "FLOATS") var id: UInt8 var name: Bytes[MAX_NAME_CHARS] fn __init__(inout self, id: UInt8, name: String): self.id = id self.name = Bytes[MAX_NAME_CHARS](name) fn __init__(inout self, type: DType): if type.is_floating_point(): self = AttributeType.FLOAT elif type.is_bool(): self = AttributeType.BOOL else: self = AttributeType.INT fn __eq__(self, other: Self) -> Bool: return self.id == other.id fn __str__(self) -> String: return str(self.name) @register_passable("trivial") struct AttributeVector(Sized, Stringable, CollectionElement): var attributes: StaticTuple[Attribute, MAX_ATTRS] var size: Int @always_inline("nodebug") fn __init__(inout self, *attributes: Attribute): self.attributes = StaticTuple[Attribute, MAX_ATTRS]() self.size = len(attributes) for i in range(self.size): self.attributes[i] = attributes[i] @always_inline("nodebug") fn __len__(self) -> Int: return self.size @always_inline("nodebug") fn __getitem__(self, index: Int) -> Attribute: return self.attributes[index] @always_inline("nodebug") fn __getitem__(self, index: StringLiteral) -> OptionalReg[Attribute]: for i in range(self.size): if self.attributes[i].name == Bytes[MAX_NAME_CHARS](index): return self.attributes[i] return None @always_inline("nodebug") fn __str__(self) -> String: var s: String = "[" for i in range(self.size): s += str(self.attributes[i]) if i < self.size - 1: s += ", " return s + "]" @register_passable("trivial") struct Attribute(Stringable, CollectionElement): var data_shape: StaticIntTuple[MAX_RANK] var name: Bytes[MAX_NAME_CHARS] var data: Bytes[MAX_DATA_BYTES] var type: AttributeType var size: Int @always_inline("nodebug") fn __init__(inout self, name: String, value: String): self.data_shape = StaticIntTuple[MAX_RANK]() self.name = Bytes[MAX_NAME_CHARS](name) self.data = Bytes[MAX_DATA_BYTES](value) self.type = AttributeType.STRING self.size = len(value) @always_inline("nodebug") fn __init__(inout self, name: String, value: TensorShape): self.data_shape = StaticIntTuple[MAX_RANK]() self.name = Bytes[MAX_NAME_CHARS](name) self.data = Bytes[MAX_DATA_BYTES]() self.type = AttributeType.INTS self.size = value.rank() for i in range(self.size): self.data_shape[i] = value._shape[i] @always_inline("nodebug") fn __init__[N: Int](inout self, name: String, value: StaticIntTuple[N]): constrained[N < MAX_RANK, "Attribute rank must be less than MAX_RANK."]() self.data_shape = StaticIntTuple[MAX_RANK]() self.name = Bytes[MAX_NAME_CHARS](name) self.data = Bytes[MAX_DATA_BYTES]() self.type = AttributeType.INTS self.size = N for i in range(self.size): self.data_shape[i] = value[i] @always_inline("nodebug") fn __init__[dtype: DType](inout self, name: String, value: Scalar[dtype]): constrained[dtype.is_numeric(), "Attribute value must be numeric."]() self.data_shape = StaticIntTuple[MAX_RANK]() self.name = Bytes[MAX_NAME_CHARS](name) self.data = scalar_to_bytes[dtype, MAX_DATA_BYTES](value) self.type = AttributeType(dtype) self.size = 1 @always_inline("nodebug") fn __init__(inout self, name: String, value: Int): self.__init__(name, Int64(value)) self.data_shape[0] = 1 @always_inline("nodebug") fn __init__(inout self, name: String, value: FloatLiteral): self.__init__(name, Float64(value)) self.data_shape[0] = 1 @always_inline("nodebug") fn __str__(self) -> String: return "Attribute(" + str(self.name) + ", " + "..." + ")" @always_inline("nodebug") fn to_string(self) -> String: return str(self.data) @always_inline("nodebug") fn to_shape(self) -> TensorShape: return TensorShape(rank=self.size, shape=self.data_shape) @always_inline("nodebug") fn to_static[N: Int](self) -> StaticIntTuple[N]: constrained[N < MAX_RANK, "Attribute rank must be less than MAX_RANK."]() var result = StaticIntTuple[N]() for i in range(N): result[i] = int(self.data_shape[i]) return result @always_inline("nodebug") fn to_scalar[dtype: DType](self) -> Scalar[dtype]: constrained[dtype.is_numeric(), "Attribute value must be numeric."]() return bytes_to_scalar[dtype](self.data) @always_inline("nodebug") fn to_int(self) -> Int: return int(self.to_scalar[DType.int64]()) fn json(self) -> String: var result = '{"name": "' + str(self.name) + '", ' var type: String = "" var value: String = "" if self.type == AttributeType.STRING: type = "STRING" value = '"' + self.to_string() + '"' elif self.type == AttributeType.INTS: type = "INTS" var value_temp = self.to_shape() value = "[" for i in range(value_temp.rank()): value += str(value_temp._shape[i]) if i < value_temp.rank() - 1: value += ", " value += "]" elif self.type == AttributeType.FLOAT: type = "FLOAT" value = str(self.to_scalar[DType.float64]()) elif self.type == AttributeType.INT: type = "INT" value = str(self.to_int()) else: type = "UNKNOWN" value = "UNKNOWN" result += '"type": "' + type + '", ' + '"value": ' + value return result + "}"
basalt/basalt/autograd/attributes.mojo
false
<filename>basalt/basalt/autograd/graph.mojo from python.python import Python from collections.optional import Optional, OptionalReg from .node import Node from .attributes import AttributeVector, Attribute from .symbol import Symbol from .ops import OP, static_result_shape, dynamic_result_shape from .params import ParamDict, Param from basalt import seed, dtype from basalt import Tensor, TensorShape struct Graph: var inputs: List[Symbol] var params: ParamDict var nodes: List[Node] var outputs: List[Symbol] var loss_out: OptionalReg[Symbol] var symbol_count: UInt32 fn __init__(inout self): self.inputs = List[Symbol]() self.params = ParamDict() self.nodes = List[Node]() self.outputs = List[Symbol]() self.loss_out = None self.symbol_count = 0 fn __moveinit__(inout self, owned other: Graph): self.inputs = other.inputs^ self.params = other.params^ self.nodes = other.nodes^ self.outputs = other.outputs^ self.loss_out = other.loss_out self.symbol_count = other.symbol_count fn create_symbol(inout self, shape: TensorShape, data: Optional[Param] = None, trainable: Bool = False, is_input: Bool = False) -> Symbol: var symbol = Symbol(self.symbol_count, dtype, shape, trainable) self.symbol_count += 1 if data is not None: self.params.put(symbol, data.take()) else: self.params.put(symbol) if is_input: self.inputs.append(symbol) return symbol fn input(inout self, shape: TensorShape, trainable: Bool = False) -> Symbol: return self.create_symbol(shape, trainable=trainable, is_input=True) fn param(inout self, shape: TensorShape, init: Param, trainable: Bool = True) -> Symbol: return self.create_symbol(shape, init, trainable) fn param(inout self, shape: TensorShape, trainable: Bool = True) -> Symbol: return self.create_symbol(shape, trainable=trainable) fn scalar(inout self, value: Scalar[dtype]) -> Symbol: return self.create_symbol(TensorShape(1), Param(value), trainable=False) fn constant(inout self, shape: TensorShape, data: List[Scalar[dtype]]) -> Symbol: return self.create_symbol(shape, Param(data), trainable=False) fn out(inout self, symbol: Symbol): self.outputs.append(symbol) fn loss(inout self, symbol: Symbol): self.loss_out = symbol fn op( inout self, op: OP, *operands: Symbol, attributes: AttributeVector = AttributeVector(), ) -> Symbol: var res_shape = static_result_shape(op, operands, attributes) var res = Symbol(self.symbol_count, dtype, res_shape, self.result_trainable(operands)) self.symbol_count += 1 var inputs = List[Symbol]() inputs.reserve(len(operands)) for operand in operands: inputs.append(operand) self.nodes.append(Node(op, inputs, List[Symbol](res), attributes)) return res fn op( inout self, op: OP, operand_1: Symbol, operand_2: Float64, attributes: AttributeVector = AttributeVector(), ) -> Symbol: return self.op(op, operand_1, self.scalar(operand_2), attributes=attributes) fn op( inout self, op: OP, operand_1: Float64, operand_2: Symbol, attributes: AttributeVector = AttributeVector(), ) -> Symbol: return self.op(op, self.scalar(operand_1), operand_2, attributes=attributes) fn create_symbols(inout self, shapes: List[TensorShape], trainable: Bool = False) -> List[Symbol]: var symbols = List[Symbol]() symbols.reserve(len(shapes)) for shape in shapes: symbols.append(Symbol(self.symbol_count, dtype, shape[], trainable)) self.symbol_count += 1 return symbols fn add_node(inout self, op: OP, inputs: List[Symbol], outputs: List[Symbol], attributes: AttributeVector): self.nodes.append(Node(op, inputs, outputs, attributes)) fn concat(inout self, *operands: Symbol, dim: Int = 0) -> Symbol: var attributes = AttributeVector(Attribute("dim", dim)) var res_shape = dynamic_result_shape(OP.CONCAT, operands, attributes)[0] var res_symbols = self.create_symbols(List[TensorShape](res_shape), self.result_trainable(operands)) var operand_list = List[Symbol]() operand_list.reserve(len(operands)) for operand in operands: operand_list.append(operand) self.add_node(OP.CONCAT, operand_list, res_symbols, attributes) return res_symbols[0] fn split( inout self, operand: Symbol, sections: List[Int], dim: Int = 0 ) -> List[Symbol]: var attributes = AttributeVector(Attribute("sections", TensorShape(sections)), Attribute("dim", dim)) var res_shapes = dynamic_result_shape(OP.SPLIT, operand, attributes) var trainable = self.result_trainable(operand) var result_symbols = self.create_symbols(res_shapes, trainable) self.add_node(OP.SPLIT, List[Symbol](operand), result_symbols, attributes) return result_symbols @staticmethod fn result_trainable(operands: VariadicList[Symbol]) -> Bool: for operand in operands: if operand.trainable: return True return False fn json(self) -> String: var result: String = '{"graph_name": "basalt", "nodes": [' for i in range(len(self.nodes)): result += self.nodes[i].json() if i < len(self.nodes) - 1: result += ", " result += '], "inputs": [' for i in range(len(self.inputs)): result += self.inputs[i].json() if i < len(self.inputs) - 1: result += ", " result += '], "outputs": [' for i in range(len(self.outputs)): result += self.outputs[i].json() if i < len(self.outputs) - 1: result += ", " if self.loss_out: result += '], "loss": [' result += self.loss_out.value().json() result += '], "params": [' for i in range(len(self.params)): result += self.params.symbols[i].json() if i < len(self.params) - 1: result += ", " result += "]}" return result fn render(self, render_type: String = "node") raises: Python.add_to_path("./basalt/utils") var renderer = Python.import_module("graph_render") var json = Python.import_module("json") _ = renderer.netron_render(json.loads(self.json()), render_type) fn compile(inout self): # 0. Sorting the graph # The staticlly defined graph has an implicit topological sorted order because, # each new operation is added the list of nodes after its dependencies have been calculated. # This eliminates the need for explicit topological sorting. # Possibilities: # - 1. Graph layout transformation (graph rewrite) # - Layer pruning (removing nodes that have no effect - with common sub-tree identification) # - Eliminate redundant intermediate data copies # - Operator replacement (e.g. replacing (combination of) costly ops with more efficient ones) # - (exmple of graph rewrite: https://dl.acm.org/doi/pdf/10.1145/3453483.3454083 - Table 4) # - Other intra-block optimizations: (e.g. data layout transformation BCHW -> BHWC, etc.) # - 2. Operator fusion (combining ops without materializing intermediate results) # - Fusion plan exploration # - Fusion plan generation (with subsequent intra-block optimizations) # - (example fusion plan algorithm: https://dl.acm.org/doi/pdf/10.1145/3453483.3454083 - Listing 1) # - 3. Fusion Code generation (behaviour) # - Code generation for planned fusion blocks # - Other inter-block optimizations (e.g. data layout transformation BCHW -> BHWC, etc.) # - 4. Auto-tuning (of vectorization-, parallelization-, tiling-, unrolling-parameters) # - (Might only work when memory is initialized) # Other considerations: # - Efficient Memory management: # - Memory reuse (in-place operations) # - Data layout from BCHW (batch, channel, height, width) to BHWC can lead to better utilization and efficiency # - VJP, JVP (for automatic differentiation) pass
basalt/basalt/autograd/graph.mojo
false
from collections.optional import Optional from utils.variant import Variant from basalt.autograd import Symbol from basalt.autograd.ops import OP from .attributes import AttributeVector @value struct Node(CollectionElement, Stringable): var operator: OP var inputs: List[Symbol] var outputs: List[Symbol] var attributes: AttributeVector fn __init__( inout self, operator: OP, inputs: List[Symbol], outputs: List[Symbol], attributes: AttributeVector = AttributeVector(), ): self.operator = operator self.inputs = inputs self.outputs = outputs self.attributes = attributes fn __str__(self) -> String: return self.json() fn json(self) -> String: var s: String = '{"operator": "' + str(self.operator.name) + '", "inputs": [' for i in range(len(self.inputs)): s += self.inputs[i].json() if i < len(self.inputs) - 1: s += ", " s += '], "outputs": [' for i in range(len(self.outputs)): s += self.outputs[i].json() if i < len(self.outputs) - 1: s += ", " s += '], "attributes": [' for i in range(len(self.attributes)): s += self.attributes[i].json() if i < len(self.attributes) - 1: s += ", " s += "]}" return s
basalt/basalt/autograd/node.mojo
false
<filename>basalt/basalt/autograd/params.mojo from collections.optional import Optional from basalt import dtype from basalt import Tensor, TensorShape from .symbol import Symbol from .attributes import Attribute @value struct Param(CollectionElement, Stringable): var data: Optional[List[Scalar[dtype]]] var initializer: Optional[Attribute] fn __init__(inout self): self.data = None self.initializer = None fn __init__(inout self, data: List[Scalar[dtype]]): self.data = data self.initializer = None fn __init__(inout self, data: Scalar[dtype]): self.data = List[Scalar[dtype]](data) self.initializer = None fn __init__(inout self, initializer: String, *args: Scalar[dtype]): # Supported initializers: # "random_uniform", lower_bound, upper_bound # "random_normal", mean, std # #TODO: "kaiming_uniform", mode, nonlinearity # #TODO: "kaiming_normal", mode, nonlinearity self.initializer = Attribute("initializer", initializer) var data = List[Scalar[dtype]]() for arg in args: data.append(arg) self.data = data fn __getitem__(self, i: Int) -> Optional[Scalar[dtype]]: if self.data: return self.data.value()[][i] else: return None fn __str__(self) -> String: var s: String = "" if self.data: var data = self.data.value() s += "[" for i in range(len(data[])): s += str(data[][i]) if i < len(data[]) - 1: s += ", " s += "]" return s @value struct ParamDict(Sized): var symbols: List[Symbol] var values: List[Param] fn __init__(inout self): self.symbols = List[Symbol]() self.values = List[Param]() fn put(inout self, param_id: Symbol, value: Param = Param()): self.symbols.append(param_id) self.values.append(value) fn get_tensor(self, idx: Int) -> Tensor[dtype]: # May only be called at runtime var num = self.symbols[idx].shape.num_elements() var t = DTypePointer[dtype].alloc(num) for i in range(num): t[i] = self.values[idx][i].value()[] return Tensor[dtype](t, self.symbols[idx].shape) fn __len__(self) -> Int: return len(self.symbols)
basalt/basalt/autograd/params.mojo
false
from basalt import Tensor, TensorShape @value @register_passable("trivial") struct Symbol(CollectionElement, Stringable, EqualityComparable): var name: UInt32 var dtype: DType var shape: TensorShape var trainable: Bool fn __eq__(self, other: Self) -> Bool: return self.name == other.name fn __ne__(self, other: Self) -> Bool: return self.name != other.name fn __str__(self) -> String: return self.json() fn json(self) -> String: return ( '{"name": "' + str(self.name) + '", "dtype": "' + str(self.dtype) + '", "shape": "' + str(self.shape) + '", "trainable": "' + str(self.trainable) + '"}' )
basalt/basalt/autograd/symbol.mojo
false
<filename>basalt/basalt/autograd/__init__.mojo from .symbol import Symbol from .graph import Graph from .ops import OP
basalt/basalt/autograd/__init__.mojo
false
<filename>basalt/basalt/autograd/ops/basics.mojo from math import add, sub, mul, div, log, exp from algorithm import vectorize from memory import memcpy from basalt import Tensor, TensorShape from basalt.nn.tensor import MAX_RANK from basalt.utils.tensorutils import * from basalt.autograd.attributes import Attribute, AttributeVector from basalt.autograd.ops.matmul import dot, dot_transpose_t1, dot_transpose_t2 """ Implement forward and backward operations for basic tensor manipulations. """ @value struct ADD: @staticmethod fn result_shape(t1_shape: TensorShape, t2_shape: TensorShape) -> TensorShape: return broadcast_shapes(t1_shape, t2_shape) @staticmethod fn forward[ t1_shape: TensorShape, t2_shape: TensorShape, ](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]): """ Forward pass of the add operation. """ elwise_op[t1_shape, t2_shape, add](res, t1, t2) @staticmethod fn backward[ tensor_id: Int, ug_shape: TensorShape, t1_shape: TensorShape, t2_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of element wise addition.""" # d(x + y) / dx = d(x + y) / dy = 1 return ug @value struct SUB: @staticmethod fn result_shape(t1_shape: TensorShape, t2_shape: TensorShape) -> TensorShape: return broadcast_shapes(t1_shape, t2_shape) @staticmethod fn forward[ t1_shape: TensorShape, t2_shape: TensorShape, ](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]): """ Forward pass of the subtraction operation. """ elwise_op[t1_shape, t2_shape, sub](res, t1, t2) @staticmethod fn backward[ tensor_id: Int, ug_shape: TensorShape, t1_shape: TensorShape, t2_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of element wise subtraction.""" # d(x - y) / dx = 1 # d(x - y) / dy = -1 @parameter if tensor_id == 0: return ug else: var res_grad = Tensor[dtype](ug_shape) elwise_op[mul](res_grad, ug, -1.0) return res_grad ^ @value struct MUL: @staticmethod fn result_shape(t1_shape: TensorShape, t2_shape: TensorShape) -> TensorShape: return broadcast_shapes(t1_shape, t2_shape) @staticmethod fn forward[ t1_shape: TensorShape, t2_shape: TensorShape, ](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]): """ Forward pass of the multiplication operation. """ elwise_op[t1_shape, t2_shape, mul](res, t1, t2) @staticmethod fn backward[ tensor_id: Int, ug_shape: TensorShape, t1_shape: TensorShape, t2_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of element wise multiplication.""" # d(x * y) / dx = y # d(x * y) / dy = x @parameter if tensor_id == 0: var res_grad = Tensor[dtype](ug_shape) elwise_op[ug_shape, t2_shape, mul](res_grad, ug, t2) return res_grad ^ else: var res_grad = Tensor[dtype](ug_shape) elwise_op[ug_shape, t1_shape, mul](res_grad, ug, t1) return res_grad ^ @value struct DIV: @staticmethod fn result_shape(t1_shape: TensorShape, t2_shape: TensorShape) -> TensorShape: return broadcast_shapes(t1_shape, t2_shape) @staticmethod fn forward[ t1_shape: TensorShape, t2_shape: TensorShape ](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]): """ Forward operation of element wise division. """ elwise_op[t1_shape, t2_shape, div](res, t1, t2) @staticmethod fn backward[ tensor_id: Int, ug_shape: TensorShape, t1_shape: TensorShape, t2_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of element wise division.""" # d(x/y) / dx = 1/y # d(x/y) / dy = -x/y^2 @parameter if tensor_id == 0: var res_grad = Tensor[dtype](ug_shape) elwise_op[ug_shape, t2_shape, div](res_grad, ug, t2) return res_grad ^ else: alias broadcast = (t1_shape != t2_shape) alias is_scalar = (t2_shape == TensorShape(1)) var res_grad = Tensor[dtype](ug_shape) @parameter if is_scalar: var factor: Scalar[dtype] = -1.0 / (t2[0] ** 2) @parameter fn vec_div_bw_scalar[nelts: Int](i: Int): res_grad.store[nelts]( i, factor * t1.load[nelts](i) * ug.load[nelts](i) ) vectorize[vec_div_bw_scalar, nelts](ug_shape.num_elements()) elif broadcast and not is_scalar: alias size = ug_shape.rank() alias strides1 = broadcast_calculate_strides[size, t1_shape, ug_shape]() alias strides2 = broadcast_calculate_strides[size, t2_shape, ug_shape]() @parameter fn vec_div_bw_broadcast[netls: Int](i: Int): var index1 = get_real_index[size, strides1, ug_shape](i) var index2 = get_real_index[size, strides2, ug_shape](i) res_grad.store[nelts]( i, -t1.load[nelts](index1) / (t2.load[nelts](index2) ** 2) * ug.load[nelts](i), ) vectorize[vec_div_bw_broadcast, 1](ug_shape.num_elements()) else: @parameter fn vec_div_bw[nelts: Int](i: Int): res_grad.store[nelts]( i, -t1.load[nelts](i) / (t2.load[nelts](i) ** 2) * ug.load[nelts](i), ) vectorize[vec_div_bw, nelts](ug_shape.num_elements()) return res_grad ^ @value struct DOT: @staticmethod fn result_shape(t1_shape: TensorShape, t2_shape: TensorShape) -> TensorShape: return TensorShape(t1_shape[0], t2_shape[1]) @staticmethod fn forward[ t1_shape: TensorShape, t2_shape: TensorShape, ](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]): """ Forward pass of the dot operation. """ dot[t1_shape, t2_shape](res, t1, t2) @staticmethod fn backward[ tensor_id: Int, ug_shape: TensorShape, t1_shape: TensorShape, t2_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of dot product.""" @parameter if tensor_id == 0: # dot(ug, t2.T) var res_grad = Tensor[dtype](t1_shape) dot_transpose_t2[ug_shape, t2_shape](res_grad, ug, t2) return res_grad ^ else: # dot(t1.T, ug) var res_grad = Tensor[dtype](t2_shape) dot_transpose_t1[t1_shape, ug_shape](res_grad, t1, ug) return res_grad ^ @value struct EXP: @staticmethod fn result_shape(t1_shape: TensorShape) -> TensorShape: return t1_shape @staticmethod fn forward[ t1_shape: TensorShape, ](inout res: Tensor[dtype], t1: Tensor[dtype]): """Forward operation of exp.""" elwise_transform[exp](res, t1) @staticmethod fn backward[ ug_shape: TensorShape, t1_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of exp.""" # d(exp(x)) / dx = exp(x) var res_grad = Tensor[dtype](ug_shape) @parameter fn vec_exp_bw[nelts: Int](i: Int): res_grad.store[nelts](i, exp(t1.load[nelts](i)) * ug.load[nelts](i)) vectorize[vec_exp_bw, nelts](ug_shape.num_elements()) return res_grad ^ @value struct LOG: @staticmethod fn result_shape(t1_shape: TensorShape) -> TensorShape: return t1_shape @staticmethod fn forward[ t1_shape: TensorShape, ](inout res: Tensor[dtype], t1: Tensor[dtype]): """Forward operation of exp.""" elwise_transform[log](res, t1) @staticmethod fn backward[ ug_shape: TensorShape, t1_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of log.""" # d(log(x)) / dx = 1 / x var res_grad = Tensor[dtype](ug_shape) elwise_op[ug_shape, t1_shape, div](res_grad, ug, t1) return res_grad ^ struct POW: @staticmethod fn result_shape(t1_shape: TensorShape, t2_shape: TensorShape) -> TensorShape: # t2_shape == TensorShape(1) return t1_shape @staticmethod fn forward[ t1_shape: TensorShape, t2_shape: TensorShape, ](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]): """Forward operation of element wise pow.""" # t2_shape is a graph scalar elwise_pow(res, t1, int(t2[0])) @staticmethod fn backward[ tensor_id: Int, ug_shape: TensorShape, t1_shape: TensorShape, t2_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of element wise pow.""" # d(x^y) / dx = y * x^(y-1) # d(x^y) / dy = sum( x^y * log(x) ) var res_grad: Tensor[dtype] var a = int(t2[0]) @parameter if tensor_id == 0: res_grad = Tensor[dtype](t1_shape) @parameter fn vec_pow_bw_x[nelts: Int](i: Int): res_grad.store[nelts](i, a * ((t1.load[nelts](i) + epsilon) ** (a - 1)) * ug.load[nelts](i)) vectorize[vec_pow_bw_x, nelts](t1_shape.num_elements()) else: res_grad = Tensor[dtype](t2_shape) # t2_shape == TensorShape(1) @parameter fn vec_pow_bw_y[nelts: Int](i: Int): res_grad[0] += ( (t1.load[nelts](i) ** a) * log(t1.load[nelts](i)) * ug.load[nelts](i) ).reduce_add() vectorize[vec_pow_bw_y, nelts](ug_shape.num_elements()) return res_grad ^ struct SUM: @staticmethod fn result_shape(t_shape: TensorShape, attributes: AttributeVector) -> TensorShape: var axis = attributes["axis"] if axis: return get_reduce_shape(t_shape, axis.value().to_int()) else: return TensorShape(1) @staticmethod fn forward[ t_shape: TensorShape, attributes: AttributeVector ](inout res: Tensor[dtype], t: Tensor[dtype]): """ Forward pass of the sum operation. """ alias axis = attributes["axis"] @parameter if axis: tsum(res, t, axis.value().to_int()) else: res[0] = tsum(t) @staticmethod fn backward[ ug_shape: TensorShape, t_shape: TensorShape, attributes: AttributeVector ](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of sum.""" return Self.backward[ug_shape, t_shape](ug, t) @staticmethod fn backward[ ug_shape: TensorShape, t_shape: TensorShape ](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of sum.""" var res_grad = Tensor[dtype](t_shape) fill(res_grad, 1.0) elwise_op[t_shape, ug_shape, mul](res_grad, res_grad, ug) return res_grad ^ @value struct MEAN: @staticmethod fn result_shape(t_shape: TensorShape, attributes: AttributeVector) -> TensorShape: var axis = attributes["axis"] if axis: return get_reduce_shape(t_shape, axis.value().to_int()) else: return TensorShape(1) @staticmethod fn forward[ t_shape: TensorShape, attributes: AttributeVector ](inout res: Tensor[dtype], t: Tensor[dtype]): """ Forward pass of the mean operation. """ alias axis = attributes["axis"] @parameter if axis: tmean(res, t, axis.value().to_int()) else: res[0] = tmean(t) @staticmethod fn backward[ ug_shape: TensorShape, t_shape: TensorShape, attributes: AttributeVector ](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of mean.""" alias axis = attributes["axis"] @parameter if axis: return Self.backward[ug_shape, t_shape](ug, t, axis.value().to_int()) else: return Self.backward[ug_shape, t_shape](ug, t) @staticmethod fn backward[ ug_shape: TensorShape, t_shape: TensorShape ](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of mean.""" # d(mean(t)) / dt = 1 / t.num_elements() var res_grad = Tensor[dtype](t_shape) var grad: Scalar[dtype] = 1.0 / t_shape.num_elements() grad = ( grad * ug[0] ) # because ug is a tensor of size 1 when mean is used without an axis @parameter fn v_mean_d[nelts: Int](i: Int): res_grad.store[nelts](i, grad) vectorize[v_mean_d, nelts](t_shape.num_elements()) return res_grad ^ @staticmethod fn backward[ ug_shape: TensorShape, t_shape: TensorShape ](ug: Tensor[dtype], t: Tensor[dtype], axis: Int) -> Tensor[dtype]: """Backward operation of mean.""" # d(mean(t)) / dt = 1 / t.dim(axis) var res_grad = Tensor[dtype](t_shape) var grad: Scalar[dtype] = 1.0 / t_shape[axis] fill(res_grad, grad) elwise_op[t_shape, ug_shape, mul](res_grad, res_grad, ug) return res_grad ^ struct MAX: @staticmethod fn result_shape(t_shape: TensorShape, attributes: AttributeVector) -> TensorShape: var axis = attributes["axis"] if axis: return get_reduce_shape(t_shape, axis.value().to_int()) else: return TensorShape(1) @staticmethod fn forward[ t_shape: TensorShape, attributes: AttributeVector ](inout res: Tensor[dtype], t: Tensor[dtype]): """ Forward pass of the max operation. """ alias axis = attributes["axis"] @parameter if axis: tmax(res, t, axis.value().to_int()) else: res[0] = tmax(t) @staticmethod fn backward[ ug_shape: TensorShape, t_shape: TensorShape, attributes: AttributeVector ](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of max.""" alias axis = attributes["axis"] @parameter if axis: return Self.backward[ug_shape, t_shape](ug, t, axis.value().to_int()) else: return Self.backward[ug_shape, t_shape](ug, t) @staticmethod fn backward[ ug_shape: TensorShape, t_shape: TensorShape ](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of max.""" # This could be changed to something like in tinygrad: # max_1s = CMPEQ(original_tensor, expanded(max_tensor), axis=axis) # sum_max_1s = SUM(max_1s) # div_sum_max_1s = DIV(max_1, sum_max_1s) # The selected element gradient is 1.0, the others are 0.0. And if there are # multiple max values, the gradient is divided by the number of max # values (1/n) for each max value. var res_grad = Tensor[dtype](t_shape) # ug_shape size is 1 var max_res = tmax(t) var sum_eq: Scalar[dtype] = 0 for i in range(t.num_elements()): if t[i] == max_res: sum_eq += 1 var factor = 1 / sum_eq for i in range(res_grad.num_elements()): if t[i] == max_res: res_grad[i] = factor * ug[0] return res_grad ^ @staticmethod fn backward[ ug_shape: TensorShape, t_shape: TensorShape ](ug: Tensor[dtype], t: Tensor[dtype], axis: Int) -> Tensor[dtype]: """Backward operation of max.""" # The selected element gradient is 1.0, the others are 0.0. And if there are # multiple max values, the gradient is divided by the number of max # values (1/n) for each max value. var res_grad = Tensor[dtype](t_shape) var max_res = Tensor[dtype](ug_shape) alias strides = t_shape.strides() tmax( max_res, t, axis ) # To not calculate this again we could receive the result of the forward pass as a parameter for i in range(max_res.num_elements()): var index_base = (i % strides[axis]) + (i // strides[axis]) * ( strides[axis] * t.dim(axis) ) var count_1s: Scalar[dtype] = 0 # Count the number of values equal to max_res for j in range(t.dim(axis)): var index = index_base + j * strides[axis] if t[index] == max_res[i]: count_1s += 1 # Divide 1.0 by the number of max values (n) and multiply by upper gradient value var factor = 1 / count_1s for j in range(t.dim(axis)): var index = index_base + j * strides[axis] if t[index] == max_res[i]: res_grad[index] = factor * ug[i] return res_grad ^ struct TRANSPOSE: @staticmethod fn result_shape(t_shape: TensorShape, attributes: AttributeVector) -> TensorShape: var axes = attributes["axes"] # axes to be permuted var rank = t_shape.rank() var shape = StaticIntTuple[MAX_RANK]() if axes: # NOTE: axis has to be the size of rank of the tensor var axes_shape = axes.value().to_shape() for i in range(rank): shape[i] = t_shape[axes_shape[i]] else: for i in range(rank): shape[i] = t_shape[rank - i - 1] return TensorShape(rank=rank, shape=shape) @staticmethod fn forward[ t_shape: TensorShape, attributes: AttributeVector ](inout res: Tensor[dtype], t: Tensor[dtype]): """ Forward pass of the transpose operation. """ alias axes = attributes["axes"] @parameter if axes: var axes_shape = axes.value().to_shape() transpose(res, t, axes_shape) else: fn create_transpose_axes() -> TensorShape: var rank = t_shape.rank() var axes = StaticIntTuple[MAX_RANK]() for i in range(rank): axes[i] = rank - i - 1 return TensorShape(rank=rank, shape=axes) alias axes_shape = create_transpose_axes() transpose(res, t, axes_shape) @staticmethod fn backward[ ug_shape: TensorShape, t_shape: TensorShape, attributes: AttributeVector ](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of transpose.""" # No local gradient. Transpose is its own inverse. alias axes = attributes["axes"] var res_grad = Tensor[dtype](t_shape) @parameter if axes: fn create_inverse_axes() -> TensorShape: var axes_shape = axes.value().to_shape() var rank = axes_shape.rank() var axes_shape_inv = StaticIntTuple[MAX_RANK]() for i in range(rank): axes_shape_inv[axes_shape[i]] = i return TensorShape(rank=rank, shape=axes_shape_inv) alias axes_shape_inv = create_inverse_axes() transpose(res_grad, ug, axes_shape_inv) else: fn create_transpose_axes() -> TensorShape: var rank = t_shape.rank() var axes = StaticIntTuple[MAX_RANK]() for i in range(rank): axes[i] = rank - i - 1 return TensorShape(axes) alias axes_shape_inv = create_transpose_axes() transpose(res_grad, ug, axes_shape_inv) return res_grad ^ struct FLATTEN: @staticmethod fn result_shape(t_shape: TensorShape) -> TensorShape: return TensorShape(t_shape.num_elements()) @staticmethod fn forward[t_shape: TensorShape](inout res: Tensor[dtype], t: Tensor[dtype]): """ Forward pass of the flatten operation. """ memcpy(res.data(), t.data(), t_shape.num_elements()) @staticmethod fn backward[ ug_shape: TensorShape, t_shape: TensorShape ](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of flatten.""" var res_grad = Tensor[dtype](t_shape) memcpy(res_grad.data(), ug.data(), ug_shape.num_elements()) return res_grad ^ struct RESHAPE: @staticmethod fn result_shape(t_shape: TensorShape, attributes: AttributeVector) -> TensorShape: var new_shape = attributes["shape"] return new_shape.value().to_shape() @staticmethod fn forward[t_shape: TensorShape](inout res: Tensor[dtype], t: Tensor[dtype]): """ Forward pass of the reshape operation. """ memcpy(res.data(), t.data(), t_shape.num_elements()) @staticmethod fn backward[ ug_shape: TensorShape, t_shape: TensorShape ](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of reshape.""" var res_grad = Tensor[dtype](t_shape) memcpy(res_grad.data(), ug.data(), ug_shape.num_elements()) return res_grad ^ struct FMA: @staticmethod fn result_shape( t1_shape: TensorShape, t2_shape: TensorShape, t3_shape: TensorShape ) -> TensorShape: # FMA assumes: t1_shape == t2_shape == t3_shape # TODO: Error handling, constraints in API return t1_shape @staticmethod fn forward[ t1_shape: TensorShape, t2_shape: TensorShape, t3_shape: TensorShape, ]( inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype], t3: Tensor[dtype], ): """ Forward pass of the fma operation. """ @parameter fn vec_fma[nelts: Int](i: Int): res.store[nelts]( i, t1.load[nelts](i).fma(t2.load[nelts](i), t3.load[nelts](i)) ) vectorize[vec_fma, nelts, size = t1_shape.num_elements()]() @staticmethod fn backward[ tensor_id: Int, ug_shape: TensorShape, t1_shape: TensorShape, t2_shape: TensorShape, t3_shape: TensorShape, ]( ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype], t3: Tensor[dtype] ) -> Tensor[dtype]: """Backward operation of fma.""" # d(x * y + z) / dx = y # d(x * y + z) / dy = x # d(x * y + z) / dz = 1 @parameter if tensor_id == 0: var res_grad = Tensor[dtype](ug_shape) elwise_op[ug_shape, t2_shape, mul](res_grad, ug, t2) return res_grad ^ elif tensor_id == 1: var res_grad = Tensor[dtype](ug_shape) elwise_op[ug_shape, t1_shape, mul](res_grad, ug, t1) return res_grad ^ else: return ug
basalt/basalt/autograd/ops/basics.mojo
false
from basalt import Tensor, TensorShape from basalt.autograd.attributes import AttributeVector from algorithm import parallelize, vectorize, tile from math import divmod from utils.loop import unroll @always_inline fn get_result_shape( input_shape: TensorShape, kernel_shape: TensorShape, padding: StaticIntTuple[2], stride: StaticIntTuple[2], dilation: StaticIntTuple[2], ) -> StaticIntTuple[2]: """ Calculates the X and Y dimensions of the resulting convolution. Dimensions X, Y are on the end of the shape (..., X, Y) dimension X on index -2. dimension Y on index -1. """ var result_x_dim = ( (input_shape[-2] + (2 * padding[0]) - dilation[0] * (kernel_shape[-2] - 1) - 1) // stride[0] ) + 1 var result_y_dim = ( (input_shape[-1] + (2 * padding[1]) - dilation[1] * (kernel_shape[-1] - 1) - 1) // stride[1] ) + 1 return StaticIntTuple[2](result_x_dim, result_y_dim) struct CONV2D: @staticmethod fn result_shape( input_shape: TensorShape, kernel_shape: TensorShape, bias_shape: TensorShape, attributes: AttributeVector, ) -> TensorShape: # Output shape = [batch, out_channels, oX, oY] var padding = attributes["padding"].value().to_static[2]() var stride = attributes["stride"].value().to_static[2]() var dilation = attributes["dilation"].value().to_static[2]() var res = get_result_shape(input_shape, kernel_shape, padding, stride, dilation) return TensorShape(input_shape[0], kernel_shape[0], res[0], res[1]) @staticmethod fn forward[ input_shape: TensorShape, kernel_shape: TensorShape, bias_shape: TensorShape, attributes: AttributeVector, ]( inout outputs: Tensor[dtype], inputs: Tensor[dtype], kernel: Tensor[dtype], bias: Tensor[dtype], ): """ Performs a 2D convolution on the input tensor using the kernel and bias. inputs.shape [batch, in_channels, iX, iY] kernel.shape [out_channels, in_channels, kX, kY] (or weights) bias.shape [out_channels]. output.shape [batch, out_channels, oX, oY]. """ alias padding = attributes["padding"].value().to_static[2]() alias stride = attributes["stride"].value().to_static[2]() alias dilation = attributes["dilation"].value().to_static[2]() alias padding_x = padding[0] alias padding_y = padding[1] alias stride_x = stride[0] alias stride_y = stride[1] alias dilation_x = dilation[0] alias dilation_y = dilation[1] alias batch_size = input_shape[0] alias in_channels = input_shape[1] alias in_x = input_shape[2] alias in_y = input_shape[3] alias out_channels = kernel_shape[0] alias k_x = kernel_shape[2] alias k_y = kernel_shape[3] alias out_x = output_shape[2] alias out_y = output_shape[3] alias col_x = out_x alias col_y = out_y alias col_shape = TensorShape( batch_size, col_x * col_y, in_channels * k_x * k_y ) # [batch, colX * colY, in_channels * kX * kY] alias output_shape = Self.result_shape( input_shape, kernel_shape, bias_shape, attributes ) alias col_shape_stripped = TensorShape(in_channels * k_x * k_y, col_x, col_y) alias inputs_strides = input_shape.strides() alias kernel_strides = kernel_shape.strides() alias outputs_strides = output_shape.strides() alias col_strides = col_shape.strides() var col_ptr = DTypePointer[dtype].alloc(col_shape.num_elements()) memset_zero(col_ptr, col_shape.num_elements()) @parameter fn im2col(batch: Int): for ux in range(out_x): for uy in range(out_y): for in_ch in range(in_channels): for kx in range(k_x): for ky in range(k_y): var ix = ux * stride_x - padding_x + kx * dilation_x var iy = uy * stride_y - padding_y + ky * dilation_y if ix < 0 or iy < 0 or ix >= in_x or iy >= in_y: continue var col_index = ( batch * col_strides[0] + (ux * col_y + uy) * col_strides[1] + (in_ch * k_x * k_y + kx * k_y + ky) ) var input_index = ( batch * inputs_strides[0] + in_ch * inputs_strides[1] + ix * inputs_strides[2] + iy ) col_ptr[col_index] = inputs[input_index] parallelize[im2col](batch_size) @parameter fn conv(batch: Int): for out_ch in range(out_channels): for ux in range(out_x): for uy in range(out_y): var result: SIMD[dtype, nelts] = 0 @parameter fn v_im2col[_nelts: Int](in_ch_kx_ky: Int): var col_index = ( batch * col_strides[0] + (ux * col_y + uy) * col_strides[1] + in_ch_kx_ky ) var kernel_index = ( out_ch * kernel_strides[0] + in_ch_kx_ky ) @parameter if _nelts == nelts: result += col_ptr.load[width=nelts]( col_index ) * kernel.load[nelts](kernel_index) else: result[0] += ( col_ptr.load[width=_nelts](col_index) * kernel.load[_nelts](kernel_index) ).reduce_add() vectorize[v_im2col, nelts](in_channels * k_x * k_y) var output_index = ( batch * outputs_strides[0] + out_ch * outputs_strides[1] + ux * outputs_strides[2] + uy ) outputs[output_index] = result.reduce_add() + bias[out_ch] parallelize[conv](batch_size) col_ptr.free() @staticmethod fn backward[ tensor_id: Int, ug_shape: TensorShape, input_shape: TensorShape, kernel_shape: TensorShape, bias_shape: TensorShape, attributes: AttributeVector, ]( ug: Tensor[dtype], inputs: Tensor[dtype], kernel: Tensor[dtype], bias: Tensor[dtype], ) -> Tensor[dtype]: """ Backward operation of 2D convolution. Upper gradient of shape: [batch, out_channels, uX, uY]. """ alias padding = attributes["padding"].value().to_static[2]() alias stride = attributes["stride"].value().to_static[2]() alias dilation = attributes["dilation"].value().to_static[2]() alias padding_0 = padding[0] alias padding_1 = padding[1] alias stride_0 = stride[0] alias stride_1 = stride[1] alias dilation_0 = dilation[0] alias dilation_1 = dilation[1] alias inputs_strides = input_shape.strides() alias kernel_strides = kernel_shape.strides() alias ug_strides = ug_shape.strides() alias inputs_strides_0 = inputs_strides[0] alias inputs_strides_1 = inputs_strides[1] alias inputs_strides_2 = inputs_strides[2] alias kernel_strides_0 = kernel_strides[0] alias kernel_strides_1 = kernel_strides[1] alias kernel_strides_2 = kernel_strides[2] alias ug_strides_0 = ug_strides[0] alias ug_strides_1 = ug_strides[1] alias ug_strides_2 = ug_strides[2] alias input_shape_0 = input_shape[0] alias input_shape_1 = input_shape[1] alias input_shape_2 = input_shape[2] alias input_shape_3 = input_shape[3] alias kernel_shape_2 = kernel_shape[2] alias kernel_shape_3 = kernel_shape[3] alias ug_shape_0 = ug_shape[0] alias ug_shape_1 = ug_shape[1] alias ug_shape_2 = ug_shape[2] alias ug_shape_3 = ug_shape[3] var res: Tensor[dtype] @parameter if tensor_id == 0: # Inputs # Sum of upper gradient over batch, X, Y dimensions res = Tensor[dtype](input_shape) @parameter fn input_grad(batch: Int): for out_ch in range(ug_shape_1): for ux in range(ug_shape_2): for uy in range(ug_shape_3): # For all the element of ug var ix_base = ux * stride_0 - padding_0 var iy_base = uy * stride_1 - padding_1 var ug_val = ug[ batch * ug_strides_0 + out_ch * ug_strides_1 + ux * ug_strides_2 + uy ] for in_ch in range(input_shape_1): for kx in range(kernel_shape_2): for ky in range(kernel_shape_3): var ix = ix_base + kx * dilation_0 var iy = iy_base + ky * dilation_1 if ( ix < 0 or iy < 0 or ix >= input_shape_2 or iy >= input_shape_3 ): continue var kernel_index = ( out_ch * kernel_strides_0 + in_ch * kernel_strides_1 + kx * kernel_strides_2 + ky ) var input_index = ( batch * inputs_strides_0 + in_ch * inputs_strides_1 + ix * inputs_strides_2 + iy ) res[input_index] += ( kernel[kernel_index] * ug_val ) parallelize[input_grad](input_shape_0) elif tensor_id == 1: # Kernel # Sum of upper gradient over batch and X, Y dimensions res = Tensor[dtype](kernel_shape) @parameter fn kernel_grad(out_ch: Int): var channel_offset = out_ch * kernel_strides_0 for k in range(input_shape_1 * kernel_shape_2 * kernel_shape_3): var in_ch_kx_ky = divmod(k, kernel_shape_3) var in_ch = k // (kernel_shape_2 * kernel_shape_3) var kx = in_ch_kx_ky[0] % kernel_shape_2 var ky = in_ch_kx_ky[1] # TODO: Cant vectorize since you are going different directions across input and upper grad # But theoretically could transpose or split somehow var result: Scalar[dtype] = 0 for batch in range(input_shape_0): for ux in range(ug_shape_2): for uy in range(ug_shape_3): var ix = ux * stride_0 - padding_0 + kx * dilation_0 var iy = uy * stride_1 - padding_1 + ky * dilation_1 if ( ix < 0 or iy < 0 or ix >= input_shape_2 or iy >= input_shape_3 ): continue var input_index = batch * inputs_strides_0 + in_ch * inputs_strides_1 + ix * inputs_strides_2 + iy var ug_index = batch * ug_strides_0 + out_ch * ug_strides_1 + ux * ug_strides_2 + uy result += inputs[input_index] * ug[ug_index] var kernel_index = channel_offset + k res[kernel_index] = result parallelize[kernel_grad](ug_shape_1) else: # Bias # Sum of upper gradient over batch and X, Y dimensions # out_channels == ug_shape[1] == bias_shape[0] res = Tensor[dtype](bias_shape) # Psuedocode # For every channel in the bias tensor, # Iterate over the upper gradient across the batch # For each batch, sum the upper gradient across X, Y dimensions # Add the sum to the bias tensor @parameter fn bias_grad(out_ch: Int): var channel_offset = out_ch * ug_strides_1 var sum: Scalar[dtype] = 0 for batch in range(ug_shape_0): var batch_offset = batch * ug_strides_0 + channel_offset @parameter fn vec_sum[Nelts: Int](ux_uy: Int): sum += ug.load[Nelts](batch_offset + ux_uy).reduce_add() vectorize[vec_sum, nelts, size = ug_shape_2 * ug_shape_3]() res[out_ch] = sum parallelize[bias_grad](ug_shape_1) return res
basalt/basalt/autograd/ops/conv.mojo
false
<filename>basalt/basalt/autograd/ops/dynamics.mojo from basalt import Symbol from basalt.nn.model import Parameters from ..attributes import AttributeVector struct CONCAT: @staticmethod fn result_shape( input_shapes: List[TensorShape], attributes: AttributeVector ) -> List[TensorShape]: # Assumptions: all tensors have the same shape, except for the concatenating dimension var dim = attributes["dim"].value().to_int() if attributes["dim"] else 0 var concat_size: Int = 0 for i in range(len(input_shapes)): concat_size += input_shapes[i][dim] var res_shape = input_shapes[0] res_shape[dim] = concat_size return List[TensorShape](res_shape) @staticmethod fn calc_chunks(shape: TensorShape, dim: Int) -> Int: # Number of chunks up to the concatenating dimension # Assuming tensor of equal shape, except for the concatenating dimension var chunks = 1 for i in range(dim): chunks *= shape[i] return chunks @staticmethod fn forward[attributes: AttributeVector]( inputs: List[Symbol], outputs: List[Symbol], parameters: Parameters, ): alias dim = attributes["dim"].value().to_int() if attributes["dim"] else 0 var n_chunks = Self.calc_chunks(inputs[0].shape, dim) var chunks = List[Int]() var chunk_offsets = List[Int](0) for i in range(len(inputs)): chunks.append(inputs[i].shape.num_elements() // n_chunks) chunk_offsets.append(chunk_offsets[i] + chunks[i]) for i in range(n_chunks): for j in range(len(inputs)): memcpy( parameters.tensors[outputs[0]].data() + i * chunk_offsets[len(inputs)] + chunk_offsets[j], parameters.tensors[inputs[j]].data() + i * chunks[j], chunks[j], ) @staticmethod fn backward[input_id: Int, attributes: AttributeVector]( inputs: List[Symbol], outputs: List[Symbol], parameters: Parameters, ) -> Tensor[dtype]: alias dim = attributes["dim"].value().to_int() if attributes["dim"] else 0 var n_chunks = Self.calc_chunks(inputs[0].shape, dim) var chunks = List[Int]() var chunk_offsets = List[Int](0) for i in range(len(inputs)): chunks.append(inputs[i].shape.num_elements() // n_chunks) chunk_offsets.append(chunk_offsets[i] + chunks[i]) var res_grad = Tensor[dtype](inputs[input_id].shape) for i in range(n_chunks): memcpy( res_grad.data() + i * chunks[input_id], parameters.grads[outputs[0]].data() + i * chunk_offsets[len(inputs)] + chunk_offsets[input_id], chunks[input_id], ) return res_grad ^ struct SPLIT: @staticmethod fn result_shape( input_shapes: List[TensorShape], attributes: AttributeVector ) -> List[TensorShape]: # Assuming the sum of the sections is equal to the total size in the dim dimension. # E.g. sections = [5, 5, 2] -> shape (., 12, ., .) for dim = 1 var dim = attributes["dim"].value().to_int() if attributes["dim"] else 0 var sections = attributes["sections"].value().to_shape() var res_shapes = List[TensorShape]() for i in range(sections.rank()): var new_shape = input_shapes[0] new_shape[dim] = sections[i] res_shapes.append(new_shape) return res_shapes @staticmethod fn calc_chunks(shape: TensorShape, dim: Int) -> Int: # Number of chunks up to the concatenating dimension # Assuming tensor of equal shape, except for the concatenating dimension var chunks = 1 for i in range(dim): chunks *= shape[i] return chunks @staticmethod fn forward[attributes: AttributeVector]( inputs: List[Symbol], outputs: List[Symbol], parameters: Parameters, ): alias dim = attributes["dim"].value().to_int() if attributes["dim"] else 0 alias sections = attributes["sections"].value().to_shape() var n_chunks = Self.calc_chunks(inputs[0].shape, dim) var chunks = List[Int]() var chunk_offsets = List[Int](0) for i in range(len(outputs)): chunks.append(outputs[i].shape.num_elements() // n_chunks) chunk_offsets.append(chunk_offsets[i] + chunks[i]) for i in range(n_chunks): for j in range(len(outputs)): memcpy( parameters.tensors[outputs[j]].data() + i * chunks[j], parameters.tensors[inputs[0]].data() + i * chunk_offsets[len(outputs)] + chunk_offsets[j], chunks[j], ) @staticmethod fn backward[input_id: Int, attributes: AttributeVector]( inputs: List[Symbol], outputs: List[Symbol], parameters: Parameters, ) -> Tensor[dtype]: alias dim = attributes["dim"].value().to_int() if attributes["dim"] else 0 alias sections = attributes["sections"].value().to_shape() var n_chunks = Self.calc_chunks(inputs[0].shape, dim) var chunks = List[Int]() var chunk_offsets = List[Int](0) for i in range(len(outputs)): chunks.append(outputs[i].shape.num_elements() // n_chunks) chunk_offsets.append(chunk_offsets[i] + chunks[i]) var res_grad = Tensor[dtype](inputs[input_id].shape) for i in range(n_chunks): for j in range(len(outputs)): memcpy( res_grad.data() + i * chunk_offsets[len(outputs)] + chunk_offsets[j], parameters.grads[outputs[j]].data() + i * chunks[j], chunks[j], ) return res_grad ^
basalt/basalt/autograd/ops/dynamics.mojo
false
<filename>basalt/basalt/autograd/ops/matmul.mojo from basalt.utils.tensorutils import transpose_2D from algorithm import vectorize, parallelize @always_inline fn calculate_block[ M: Int, N: Int, K: Int, BLOCK_M: Int, BLOCK_N: Int, nelts: Int ]( res: DTypePointer[dtype], t1: DTypePointer[dtype], t2: DTypePointer[dtype], bm: Int, bn: Int, ): # Compute tile var acc = stack_allocation[BLOCK_M * BLOCK_N, dtype]() memset_zero[dtype](acc, BLOCK_M * BLOCK_N) for k in range(K): @unroll for m in range(BLOCK_M): @parameter fn inner_n[nelts: Int](n: Int): acc.store[width=nelts]( m * BLOCK_N + n, SIMD[dtype, nelts] .splat(t1[(bm + m) * K + k]) .fma( t2.load[width=nelts](k * N + (bn + n)), acc.load[width=nelts](m * BLOCK_N + n), ), ) vectorize[inner_n, nelts](BLOCK_N) # Store tile for m in range(BLOCK_M): @parameter fn vec_store[nelts: Int](n: Int): res.store[width=nelts]( (bm + m) * N + (bn + n), acc.load[width=nelts](m * BLOCK_N + n) ) vectorize[vec_store, nelts](BLOCK_N) @parameter @always_inline fn dot[ t1_shape: TensorShape, t2_shape: TensorShape ](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]): dot[t1_shape, t2_shape](res.data(), t1.data(), t2.data()) @parameter @always_inline fn dot[ t1_shape: TensorShape, t2_shape: TensorShape ](res: DTypePointer[dtype], t1: DTypePointer[dtype], t2: DTypePointer[dtype]): alias M = t1_shape[0] # t1[0] alias K = t1_shape[1] # t1[1], t2[0] alias N = t2_shape[1] # t2[1] # simdwidthof[dtype]() = 8 for float32 alias nelts = simdwidthof[dtype]() alias BLOCK_N = 8 * 2 alias BLOCK_M = 6 alias THREADS = 6 # num_logical_cores() alias BLOCK_N_REMAINDER = N % BLOCK_N alias BLOCK_M_REMAINDER = M % BLOCK_M @parameter fn bm_par(m_outer: Int): var bm = m_outer * BLOCK_M for n_outer in range(0, N // BLOCK_N): var bn = n_outer * BLOCK_N calculate_block[M, N, K, BLOCK_M, BLOCK_N, nelts](res, t1, t2, bm, bn) # Handle the remainder of N @parameter if BLOCK_N_REMAINDER > 0: var bn = N - BLOCK_N_REMAINDER calculate_block[M, N, K, BLOCK_M, BLOCK_N_REMAINDER, nelts]( res, t1, t2, bm, bn ) parallelize[bm_par](M // BLOCK_M, M // BLOCK_M) # Handle the remainder of M @parameter if BLOCK_M_REMAINDER > 0: var bm = M - BLOCK_M_REMAINDER for n_outer in range(0, N // BLOCK_N): var bn = n_outer * BLOCK_N calculate_block[M, N, K, BLOCK_M_REMAINDER, BLOCK_N, nelts]( res, t1, t2, bm, bn ) # Handle corner remainder @parameter if BLOCK_N_REMAINDER > 0: var bn = N - BLOCK_N_REMAINDER calculate_block[M, N, K, BLOCK_M_REMAINDER, BLOCK_N_REMAINDER, nelts]( res, t1, t2, bm, bn ) fn dot_transpose_t2[ A_shape: TensorShape, B_shape: TensorShape ](inout C: DTypePointer[dtype], A: DTypePointer[dtype], B: DTypePointer[dtype]): dot[A_shape, TensorShape(B_shape[1], B_shape[0])](C, A, transpose_2D[B_shape](B)) fn dot_transpose_t2[ A_shape: TensorShape, B_shape: TensorShape ](inout C: Tensor[dtype], A: Tensor[dtype], B: Tensor[dtype]): memset_zero[dtype](C.data(), C.num_elements()) dot[A_shape, TensorShape(B_shape[1], B_shape[0])](C, A, transpose_2D[B_shape](B)) # @parameter # fn calc_row(i: Int): # for j in range(B_shape[0]): # @parameter # fn calc_row_A_B[nelts: Int](k: Int): # var A_pos = i * A.dim(1) + k # var B_pos = j * A.dim(1) + k # var t_new_pos = i * C.dim(1) + j # C[t_new_pos] += ( # A.load[nelts](A_pos) * B.load[nelts](B_pos) # ).reduce_add() # vectorize[calc_row_A_B, nelts, size=A_shape[1]]() # parallelize[calc_row](A_shape[0], 1) fn dot_transpose_t1[ A_shape: TensorShape, B_shape: TensorShape ](inout C: Tensor[dtype], A: Tensor[dtype], B: Tensor[dtype]): memset_zero[dtype](C.data(), C.num_elements()) dot[TensorShape(A_shape[1], A_shape[0]), B_shape](C, transpose_2D[A_shape](A), B) # @parameter # fn calc_row(i: Int): # for j in range(A_shape[0]): # @parameter # fn calc_row_t_new_B[nelts: Int](k: Int): # var A_pos = j * A.dim(1) + i # var B_pos = j * B.dim(1) + k # var t_new_pos = i * C.dim(1) + k # C.store[nelts]( # t_new_pos, # C.load[nelts](t_new_pos) # + A[A_pos] * B.load[nelts](B_pos), # ) # vectorize[calc_row_t_new_B, nelts, size=B_shape[1]]() # parallelize[calc_row](A_shape[1], 1)
basalt/basalt/autograd/ops/matmul.mojo
false
from algorithm import vectorize, parallelize from math import exp, pow, max, min, abs from math.limit import min_finite, max_finite from basalt import Tensor, TensorShape from basalt.utils.tensorutils import elwise_transform from basalt.autograd.attributes import Attribute, AttributeVector @value struct SIGMOID: @staticmethod fn result_shape(t1_shape: TensorShape) -> TensorShape: return t1_shape @staticmethod @always_inline fn sigmoid[ type: DType, simd_width: Int ](x: SIMD[type, simd_width]) -> SIMD[type, simd_width]: return 1 / (1 + exp(-x)) @staticmethod @always_inline fn sidmoid_bw[ type: DType, simd_width: Int ](x: SIMD[type, simd_width]) -> SIMD[type, simd_width]: return Self.sigmoid(x) * (1 - Self.sigmoid(x)) @staticmethod fn forward[ t1_shape: TensorShape, ](inout res: Tensor[dtype], t1: Tensor[dtype]): """Forward operation of sigmoid.""" elwise_transform[Self.sigmoid](res, t1) @staticmethod fn backward[ ug_shape: TensorShape, t1_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of sigmoid.""" # d(sigmod(x))/dx = sigmoid(x) * (1 - sigmoid(x)) var res_grad = Tensor[dtype](ug_shape) @parameter fn vec_sigmoid_bw[nelts: Int](idx: Int): res_grad.store[nelts]( idx, Self.sidmoid_bw(t1.load[nelts](idx)) * ug.load[nelts](idx), ) vectorize[vec_sigmoid_bw, nelts](ug_shape.num_elements()) return res_grad ^ struct RELU: @staticmethod fn result_shape(t1_shape: TensorShape) -> TensorShape: return t1_shape @staticmethod @always_inline fn relu[ type: DType, simd_width: Int ](x: SIMD[type, simd_width]) -> SIMD[type, simd_width]: # x if x > 0 else 0 return (x > 0).select(x, 0) @staticmethod @always_inline fn relu_bw[ type: DType, simd_width: Int ](x: SIMD[type, simd_width]) -> SIMD[type, simd_width]: # 1 if x > 0 else 0 return (x > 0).select[type](1, 0) @staticmethod fn forward[ t1_shape: TensorShape, ](inout res: Tensor[dtype], t1: Tensor[dtype]): """Forward operation of relu.""" elwise_transform[Self.relu](res, t1) @staticmethod fn backward[ ug_shape: TensorShape, t1_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of relu.""" # d(relu(x))/dx = 1 if x > 0 else 0. We also give 0 to x = 0 instead of undefined. var res_grad = Tensor[dtype](ug_shape) @parameter fn vec_relu_bw[nelts: Int](idx: Int): res_grad.store[nelts]( idx, Self.relu_bw(t1.load[nelts](idx)) * ug.load[nelts](idx) ) vectorize[vec_relu_bw, nelts](ug_shape.num_elements()) return res_grad ^ struct TANH: @staticmethod fn result_shape(t1_shape: TensorShape) -> TensorShape: return t1_shape @staticmethod @always_inline fn tanh[ type: DType, simd_width: Int ](x: SIMD[type, simd_width]) -> SIMD[type, simd_width]: return (exp(x) - exp(-x)) / (exp(x) + exp(-x)) @staticmethod @always_inline fn tanh_bw[ type: DType, simd_width: Int ](x: SIMD[type, simd_width]) -> SIMD[type, simd_width]: return 1 - pow(Self.tanh(x), 2) @staticmethod fn forward[ t1_shape: TensorShape, ](inout res: Tensor[dtype], t1: Tensor[dtype]): """Forward operation of tanh.""" elwise_transform[Self.tanh](res, t1) @staticmethod fn backward[ ug_shape: TensorShape, t1_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of tanh.""" # d(tanh(x))/dx = 1 - tanh(x) ** 2 var res_grad = Tensor[dtype](ug_shape) @parameter fn vec_tanh_bw[nelts: Int](idx: Int): res_grad.store[nelts]( idx, Self.tanh_bw(t1.load[nelts](idx)) * ug.load[nelts](idx) ) vectorize[vec_tanh_bw, nelts](ug_shape.num_elements()) return res_grad ^ struct CLIP: @staticmethod fn result_shape(t_shape: TensorShape) -> TensorShape: return t_shape @staticmethod fn forward[ t_shape: TensorShape, attributes: AttributeVector ](inout res: Tensor[dtype], t: Tensor[dtype]): """ Forward pass of the clip operation. """ alias min_attr = attributes["min"] alias max_attr = attributes["max"] var min_val = min_attr.value().to_scalar[dtype]() if min_attr else min_finite[ dtype ]() var max_val = max_attr.value().to_scalar[dtype]() if max_attr else max_finite[ dtype ]() @parameter fn vec_clip[nelts: Int](i: Int): res.store[nelts](i, t.load[nelts](i).min(max_val).max(min_val)) vectorize[vec_clip, nelts, size = t_shape.num_elements()]() @staticmethod fn backward[ ug_shape: TensorShape, t_shape: TensorShape, attributes: AttributeVector = AttributeVector(), ](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]: """Backward operation of clip.""" alias min_attr = attributes["min"] alias max_attr = attributes["max"] var min_val = min_attr.value().to_scalar[dtype]() if min_attr else min_finite[ dtype ]() var max_val = max_attr.value().to_scalar[dtype]() if max_attr else max_finite[ dtype ]() var res_grad = Tensor[dtype](t_shape) @parameter fn vec_clip_bw[nelts: Int](i: Int): var val = t.load[nelts](i) res_grad.store[nelts]( i, ((val >= min_val) * (val <= max_val)).select(ug.load[nelts](i), 0), ) vectorize[vec_clip_bw, nelts, size = t_shape.num_elements()]() return res_grad ^ struct SQUEEZE: @staticmethod fn result_shape(t1_shape: TensorShape, attributes: AttributeVector) -> TensorShape: var dim = attributes["dims"] var dims_to_squeeze = dim.value().to_shape() if dim else TensorShape() var new_shape = List[Int]() for i in range(t1_shape.rank()): if (not dim and t1_shape[i] == 1) or ( i in dims_to_squeeze and t1_shape[i] == 1 ): continue new_shape.append(t1_shape[i]) return TensorShape(new_shape) @staticmethod fn forward[ t1_shape: TensorShape, attributes: AttributeVector, ](inout res: Tensor[dtype], t1: Tensor[dtype]): memcpy(res.data(), t1.data(), t1.num_elements()) @staticmethod fn backward[ ug_shape: TensorShape, t1_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]: var res_grad = Tensor[dtype](t1_shape) memcpy(res_grad.data(), ug.data(), ug.num_elements()) return res_grad ^ struct UNSQUEEZE: @staticmethod fn result_shape(t1_shape: TensorShape, attributes: AttributeVector) -> TensorShape: var dim = attributes["dims"] var dims_to_squeeze = dim.value().to_shape() if dim else TensorShape() # Position in the expanded dims where the new dim (or dims) is placed. var new_rank = t1_shape.rank() + dims_to_squeeze.rank() var new_shape = List[Int]() var j = 0 for i in range(new_rank): if i in dims_to_squeeze or i - new_rank in dims_to_squeeze: new_shape.append(1) else: new_shape.append(t1_shape[j]) j += 1 return TensorShape(new_shape) @staticmethod fn forward[ t1_shape: TensorShape, attributes: AttributeVector, ](inout res: Tensor[dtype], t1: Tensor[dtype]): memcpy(res.data(), t1.data(), t1.num_elements()) @staticmethod fn backward[ ug_shape: TensorShape, t1_shape: TensorShape, ](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]: var res_grad = Tensor[dtype](t1_shape) memcpy(res_grad.data(), ug.data(), ug.num_elements()) return res_grad ^ struct SLICE: @staticmethod fn adjust_boundary(slice: Int, dim_size: Int) -> Int: # Adjust negative indices & ensure they are within bounds. var s = slice if slice >= 0 else dim_size + slice return max(min(s, dim_size), 0) @staticmethod fn default_starts(shape: TensorShape) -> List[Int]: var starts = List[Int]() for i in range(shape.rank()): starts.append(0) return starts^ @staticmethod fn default_ends(shape: TensorShape) -> List[Int]: var ends = List[Int]() for i in range(shape.rank()): ends.append(shape[i]) return ends^ @staticmethod fn default_steps(shape: TensorShape) -> List[Int]: var steps = List[Int]() for i in range(shape.rank()): steps.append(1) return steps^ @staticmethod fn default_axes(shape: TensorShape) -> List[Int]: # NOTE: axes can't be negative var axes = List[Int]() for i in range(shape.rank()): axes.append(i) return axes^ @staticmethod fn result_shape(t1_shape: TensorShape, attributes: AttributeVector) -> TensorShape: # NOTE: Starts and ends have to be of the same size # NOTE: If axes not provided, starts and ends have to be of the same size as t1_shape var starts = attributes["starts"].value().to_shape() var ends = attributes["ends"].value().to_shape() var steps = attributes["steps"].value().to_shape() if attributes["steps"] else Self.default_steps(starts) var axes = attributes["axes"].value().to_shape() if attributes["axes"] else Self.default_axes(t1_shape) var new_shape = t1_shape for i in range(starts.rank()): var axis = axes[i] new_shape[axis] = len(range( start = Self.adjust_boundary(starts[i], t1_shape[axis]), end = Self.adjust_boundary(ends[i], t1_shape[axis]), step = steps[i] )) return new_shape @staticmethod fn reorder_positions[id: Int](original: TensorShape, axes: TensorShape, t1_shape: TensorShape) -> List[Int]: # Reorder the starts (id=0), ends (id=1) or steps (id=2) to match the order of the axes var updated: List[Int] @parameter if id == 0: updated = Self.default_starts(t1_shape) elif id == 1: updated = Self.default_ends(t1_shape) else: updated = Self.default_steps(t1_shape) for i in range(axes.rank()): var axis = axes[i] updated[axis] = original[i] if id == 2 else Self.adjust_boundary(original[i], t1_shape[axis]) return updated^ # NOTE: For now you can't have recursive function as parameter functions. # NOTE: From testing it seems a recursive function is almost the same speed as doing multiple nested for loops. @staticmethod fn recursive_iters_slice[ shape: TensorShape, original_shape: TensorShape, steps: List[Int], starts: List[Int], ends: List[Int], backward_op: Bool = False ]( inout res: Tensor[dtype], t1: Tensor[dtype], last_dims: Int, position: Int, last_position: Int, idx: Int, idx_original: Int, ): alias strides = shape.strides() alias t1_strides = original_shape.strides() var idx_temp = idx var idx_original_temp = starts[position] * t1_strides[position] + idx_original if position == last_position + 1: # Work on the last dimensions alias position = shape.rank() - 1 alias stride = t1_strides[position] * steps[position] @parameter fn v_slice[nelts: Int](k : Int): @parameter if not backward_op: @parameter if steps[position] == 1: res.store[nelts](idx_temp + k, t1.load[nelts](idx_original_temp)) else: res.store[nelts]( idx_temp + k, t1.data().offset(idx_original_temp).simd_strided_load[nelts](stride) ) else: @parameter if steps[position] == 1: res.store[nelts](idx_original_temp, t1.load[nelts](idx_temp + k)) else: res.data().offset(idx_original_temp).simd_strided_store[nelts]( t1.load[nelts](idx_temp + k), stride ) idx_original_temp += stride * nelts vectorize[v_slice, nelts](last_dims) return for _ in range(shape[position]): Self.recursive_iters_slice[shape, original_shape, steps, starts, ends, backward_op]( res, t1, last_dims, position + 1, last_position, idx_temp, idx_original_temp ) idx_temp += strides[position] idx_original_temp += steps[position] * t1_strides[position] @staticmethod fn slice_kernel[ res_shape: TensorShape, original_shape: TensorShape, steps: List[Int], starts: List[Int], ends: List[Int], backward_op: Bool = False ](inout res: Tensor[dtype], t1: Tensor[dtype]): alias strides = original_shape.strides() # Get the dimensions for vectorization var last_dims = 1 var positions_to_skip = 0 for i in range(res_shape.rank() - 1, -1, -1): if steps[i] != 1 and i != res_shape.rank() - 1: break last_dims *= res_shape[i] positions_to_skip += 1 if starts[i] != 0 or ends[i] != original_shape[i] or steps[i] != 1: break # Get the dimensions for the first loop var first_dims = 1 var start_position = 0 for i in range(res_shape.rank() - positions_to_skip): if steps[i] != 1 or starts[i] != 0 or ends[i] != original_shape[i]: break first_dims *= res_shape[i] start_position += 1 var middle_dims = res_shape.num_elements() // last_dims // first_dims @parameter fn p_slice(i: Int): Self.recursive_iters_slice[ res_shape, original_shape, steps, starts, ends, backward_op ]( res, t1, last_dims, start_position, res_shape.rank() - 1 - positions_to_skip, i * middle_dims * last_dims, i * strides[start_position - 1] ) parallelize[p_slice](first_dims) @staticmethod fn forward[ t1_shape: TensorShape, attributes: AttributeVector, ](inout res: Tensor[dtype], t1: Tensor[dtype]): alias axes = attributes["axes"].value().to_shape() if attributes["axes"] else Self.default_axes(t1_shape) alias starts = Self.reorder_positions[0](attributes["starts"].value().to_shape(), axes, t1_shape) alias ends = Self.reorder_positions[1](attributes["ends"].value().to_shape(), axes, t1_shape) alias steps = Self.reorder_positions[2](attributes["steps"].value().to_shape(), axes, t1_shape) if attributes["steps"] else Self.default_steps(t1_shape) alias res_shape = Self.result_shape(t1_shape, attributes) Self.slice_kernel[res_shape, t1_shape, steps, starts, ends, False](res, t1) @staticmethod fn backward[ ug_shape: TensorShape, t1_shape: TensorShape, attributes: AttributeVector = AttributeVector(), ](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]: alias axes = attributes["axes"].value().to_shape() if attributes["axes"] else Self.default_axes(t1_shape) alias starts = Self.reorder_positions[0](attributes["starts"].value().to_shape(), axes, t1_shape) alias ends = Self.reorder_positions[1](attributes["ends"].value().to_shape(), axes, t1_shape) alias steps = Self.reorder_positions[2](attributes["steps"].value().to_shape(), axes, t1_shape) if attributes["steps"] else Self.default_steps(t1_shape) var res_grad = Tensor[dtype](t1_shape) Self.slice_kernel[ug_shape, t1_shape, steps, starts, ends, True](res_grad, ug) return res_grad ^
basalt/basalt/autograd/ops/mlops.mojo
false
from .basics import ( ADD, SUB, MUL, DIV, EXP, LOG, POW, DOT, SUM, MEAN, MAX, FLATTEN, RESHAPE, TRANSPOSE, FMA, ) from .mlops import SIGMOID, RELU, TANH, CLIP, SQUEEZE, UNSQUEEZE, SLICE from .dynamics import CONCAT, SPLIT from .conv import CONV2D from .pool import MAXPOOL2D from basalt import Tensor, TensorShape from basalt.nn.model import Parameters from basalt.utils.bytes import Bytes from basalt.utils.tensorutils import broadcast_shapes, accumulate_grad from ..attributes import AttributeVector # Define operators as named parameter expression @value @register_passable("trivial") struct OP(Stringable): """ Compile time Operators list. """ alias ADD = OP(0, "ADD") alias SUB = OP(1, "SUB") alias MUL = OP(2, "MUL") alias DIV = OP(3, "DIV") alias EXP = OP(4, "EXP") alias LOG = OP(5, "LOG") alias POW = OP(6, "POW") alias DOT = OP(7, "DOT") alias SUM = OP(8, "SUM") alias MEAN = OP(9, "MEAN") alias MAX = OP(10, "MAX") alias FLATTEN = OP(11, "FLATTEN") alias RESHAPE = OP(12, "RESHAPE") alias SIGMOID = OP(13, "SIGMOID") alias RELU = OP(14, "RELU") alias TANH = OP(15, "TANH") alias CONV2D = OP(16, "CONV2D") alias TRANSPOSE = OP(17, "TRANSPOSE") alias MAXPOOL2D = OP(18, "MAXPOOL2D") alias FMA = OP(19, "FMA") alias CLIP = OP(20, "CLIP") alias SQUEEZE = OP(21, "SQUEEZE") alias UNSQUEEZE = OP(22, "UNSQUEEZE") alias CONCAT = OP(23, "CONCAT", dynamic=True) alias SPLIT = OP(24, "SPLIT", dynamic=True) alias SLICE = OP(25, "SLICE") var id: UInt8 var name: Bytes[16] var dynamic: Bool fn __init__(inout self, id: UInt8, name: String, dynamic: Bool = False): self.id = id self.name = Bytes[16](name) self.dynamic = dynamic fn __eq__(self, other: OP) -> Bool: return self.id == other.id fn __str__(self) -> String: return str(self.name) fn static_result_shape( op: OP, operands: VariadicList[Symbol], attributes: AttributeVector ) -> TensorShape: """ Static result shape for operators. """ if len(operands) == 1: return static_result_shape(op, operands[0].shape, attributes) elif len(operands) == 2: return static_result_shape(op, operands[0].shape, operands[1].shape, attributes) elif len(operands) == 3: return static_result_shape( op, operands[0].shape, operands[1].shape, operands[2].shape, attributes ) else: print("Error: Invalid number of operands") return TensorShape() fn static_result_shape( op: OP, t1_shape: TensorShape, attributes: AttributeVector ) -> TensorShape: """ Static result shape for unary operators. """ if op == OP.EXP: return EXP.result_shape(t1_shape) elif op == OP.LOG: return LOG.result_shape(t1_shape) elif op == OP.SUM: return SUM.result_shape(t1_shape, attributes) elif op == OP.MEAN: return MEAN.result_shape(t1_shape, attributes) elif op == OP.MAX: return MAX.result_shape(t1_shape, attributes) elif op == OP.FLATTEN: return FLATTEN.result_shape(t1_shape) elif op == OP.RESHAPE: return RESHAPE.result_shape(t1_shape, attributes) elif op == OP.SIGMOID: return SIGMOID.result_shape(t1_shape) elif op == OP.RELU: return RELU.result_shape(t1_shape) elif op == OP.TANH: return TANH.result_shape(t1_shape) elif op == OP.TRANSPOSE: return TRANSPOSE.result_shape(t1_shape, attributes) elif op == OP.MAXPOOL2D: return MAXPOOL2D.result_shape(t1_shape, attributes) elif op == OP.CLIP: return CLIP.result_shape(t1_shape) elif op == OP.SQUEEZE: return SQUEEZE.result_shape(t1_shape, attributes) elif op == OP.UNSQUEEZE: return UNSQUEEZE.result_shape(t1_shape, attributes) elif op == OP.SLICE: return SLICE.result_shape(t1_shape, attributes) else: print("[ERROR] Operator not found.") return TensorShape(-1) fn static_result_shape( op: OP, t1_shape: TensorShape, t2_shape: TensorShape, attributes: AttributeVector, ) -> TensorShape: """ Static result shape for binary operators. """ if op == OP.ADD: return ADD.result_shape(t1_shape, t2_shape) elif op == OP.SUB: return SUB.result_shape(t1_shape, t2_shape) elif op == OP.MUL: return MUL.result_shape(t1_shape, t2_shape) elif op == OP.DIV: return DIV.result_shape(t1_shape, t2_shape) elif op == OP.POW: return POW.result_shape(t1_shape, t2_shape) elif op == OP.DOT: return DOT.result_shape(t1_shape, t2_shape) else: # We can't print at compile time (at least for now it crashes at comp time with an error) print("[ERROR] Operator not found.") return TensorShape(-1, -1) fn static_result_shape( op: OP, t1_shape: TensorShape, t2_shape: TensorShape, t3_shape: TensorShape, attributes: AttributeVector, ) -> TensorShape: """ Static result shape for ternary operators. """ if op == OP.CONV2D: return CONV2D.result_shape(t1_shape, t2_shape, t3_shape, attributes) elif op == OP.FMA: return FMA.result_shape(t1_shape, t2_shape, t3_shape) else: print("[ERROR] Operator not found.") return TensorShape(-1, -1) fn dynamic_result_shape( op: OP, operands: VariadicList[Symbol], attributes: AttributeVector, ) -> List[TensorShape]: """ Static result shape for dynamic operators. """ # Unknown number of inputs and outputs. var input_shapes = List[TensorShape]() for operand in operands: input_shapes.append(operand.shape) if op == OP.CONCAT: return CONCAT.result_shape(input_shapes, attributes) elif op == OP.SPLIT: return SPLIT.result_shape(input_shapes, attributes) else: print("[ERROR] Operator not found.") return List[TensorShape](TensorShape(-1)) fn forward_op[ op: OP, t1_shape: TensorShape, attributes: AttributeVector ](inout res: Tensor[dtype], t1: Tensor[dtype]): """ Forward pass for unary operators. """ @parameter if op == OP.EXP: EXP.forward[t1_shape](res, t1) elif op == OP.LOG: LOG.forward[t1_shape](res, t1) elif op == OP.SUM: SUM.forward[t1_shape, attributes](res, t1) elif op == OP.MEAN: MEAN.forward[t1_shape, attributes](res, t1) elif op == OP.MAX: MAX.forward[t1_shape, attributes](res, t1) elif op == OP.FLATTEN: FLATTEN.forward[t1_shape](res, t1) elif op == OP.RESHAPE: RESHAPE.forward[t1_shape](res, t1) elif op == OP.SIGMOID: SIGMOID.forward[t1_shape](res, t1) elif op == OP.RELU: RELU.forward[t1_shape](res, t1) elif op == OP.TANH: TANH.forward[t1_shape](res, t1) elif op == OP.TRANSPOSE: TRANSPOSE.forward[t1_shape, attributes](res, t1) elif op == OP.MAXPOOL2D: MAXPOOL2D.forward[t1_shape, attributes](res, t1) elif op == OP.CLIP: CLIP.forward[t1_shape, attributes](res, t1) elif op == OP.SQUEEZE: SQUEEZE.forward[t1_shape, attributes](res, t1) elif op == OP.UNSQUEEZE: UNSQUEEZE.forward[t1_shape, attributes](res, t1) elif op == OP.SLICE: SLICE.forward[t1_shape, attributes](res, t1) else: print("[ERROR] Operator not found.") fn forward_op[ op: OP, t1_shape: TensorShape, t2_shape: TensorShape, attributes: AttributeVector ](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]): """ Forward pass for binary operators. """ @parameter if op == OP.ADD: ADD.forward[t1_shape, t2_shape](res, t1, t2) elif op == OP.SUB: SUB.forward[t1_shape, t2_shape](res, t1, t2) elif op == OP.MUL: MUL.forward[t1_shape, t2_shape](res, t1, t2) elif op == OP.DIV: DIV.forward[t1_shape, t2_shape](res, t1, t2) elif op == OP.POW: POW.forward[t1_shape, t2_shape](res, t1, t2) elif op == OP.DOT: DOT.forward[t1_shape, t2_shape](res, t1, t2) else: print("[ERROR] Operator not found.") fn forward_op[ op: OP, t1_shape: TensorShape, t2_shape: TensorShape, t3_shape: TensorShape, attributes: AttributeVector, ](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype], t3: Tensor[dtype]): """ Forward pass for ternary operators. """ @parameter if op == OP.CONV2D: CONV2D.forward[t1_shape, t2_shape, t3_shape, attributes](res, t1, t2, t3) elif op == OP.FMA: FMA.forward[t1_shape, t2_shape, t3_shape](res, t1, t2, t3) else: print("[ERROR] Operator not found.") fn forward_op[ op: OP, attributes: AttributeVector, ]( inputs: List[Symbol], outputs: List[Symbol], parameters: Parameters, ): """ Forward pass for dynamic operators. """ if op == OP.CONCAT: CONCAT.forward[attributes](inputs, outputs, parameters) elif op == OP.SPLIT: SPLIT.forward[attributes](inputs, outputs, parameters) else: print("[ERROR] Operator not found.") fn backward_op[ tensor_id: Int, op: OP, ug_shape: TensorShape, t1_shape: TensorShape, attributes: AttributeVector, ](ug: Tensor[dtype], t1: Tensor[dtype], inout grad: Tensor[dtype]): """ Backward pass for unary operators. """ var res_grad: Tensor[dtype] @parameter if op == OP.EXP: res_grad = EXP.backward[ug_shape, t1_shape](ug, t1) elif op == OP.LOG: res_grad = LOG.backward[ug_shape, t1_shape](ug, t1) elif op == OP.SUM: res_grad = SUM.backward[ug_shape, t1_shape, attributes](ug, t1) elif op == OP.MEAN: res_grad = MEAN.backward[ug_shape, t1_shape, attributes](ug, t1) elif op == OP.MAX: res_grad = MAX.backward[ug_shape, t1_shape, attributes](ug, t1) elif op == OP.FLATTEN: res_grad = FLATTEN.backward[ug_shape, t1_shape](ug, t1) elif op == OP.RESHAPE: res_grad = RESHAPE.backward[ug_shape, t1_shape](ug, t1) elif op == OP.SIGMOID: res_grad = SIGMOID.backward[ug_shape, t1_shape](ug, t1) elif op == OP.RELU: res_grad = RELU.backward[ug_shape, t1_shape](ug, t1) elif op == OP.TANH: res_grad = TANH.backward[ug_shape, t1_shape](ug, t1) elif op == OP.TRANSPOSE: res_grad = TRANSPOSE.backward[ug_shape, t1_shape, attributes](ug, t1) elif op == OP.MAXPOOL2D: res_grad = MAXPOOL2D.backward[ug_shape, t1_shape, attributes](ug, t1) elif op == OP.CLIP: res_grad = CLIP.backward[ug_shape, t1_shape, attributes](ug, t1) elif op == OP.SQUEEZE: res_grad = SQUEEZE.backward[ug_shape, t1_shape](ug, t1) elif op == OP.UNSQUEEZE: res_grad = UNSQUEEZE.backward[ug_shape, t1_shape](ug, t1) elif op == OP.SLICE: res_grad = SLICE.backward[ug_shape, t1_shape, attributes](ug, t1) else: print("[ERROR] Operator not found.") res_grad = Tensor[dtype](-1) accumulate_grad(grad, res_grad) fn backward_op[ tensor_id: Int, op: OP, ug_shape: TensorShape, t1_shape: TensorShape, t2_shape: TensorShape, attributes: AttributeVector, ](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype], inout grad: Tensor[dtype]): """ Backward pass for binary operators. """ var res_grad: Tensor[dtype] @parameter if op == OP.ADD: res_grad = ADD.backward[tensor_id, ug_shape, t1_shape, t2_shape](ug, t1, t2) elif op == OP.SUB: res_grad = SUB.backward[tensor_id, ug_shape, t1_shape, t2_shape](ug, t1, t2) elif op == OP.MUL: res_grad = MUL.backward[tensor_id, ug_shape, t1_shape, t2_shape](ug, t1, t2) elif op == OP.DIV: res_grad = DIV.backward[tensor_id, ug_shape, t1_shape, t2_shape](ug, t1, t2) elif op == OP.POW: res_grad = POW.backward[tensor_id, ug_shape, t1_shape, t2_shape](ug, t1, t2) elif op == OP.DOT: res_grad = DOT.backward[tensor_id, ug_shape, t1_shape, t2_shape](ug, t1, t2) else: print("[ERROR] Operator not found.") res_grad = Tensor[dtype](-1, -1) fn broadcastable(op: OP) -> Bool: return op == OP.ADD or op == OP.SUB or op == OP.MUL or op == OP.DIV @parameter if broadcastable(op): accumulate_grad[ grad_shape = t1_shape if tensor_id == 0 else t2_shape, res_grad_shape = broadcast_shapes(t1_shape, t2_shape), ](grad, res_grad) else: accumulate_grad(grad, res_grad) fn backward_op[ tensor_id: Int, op: OP, ug_shape: TensorShape, t1_shape: TensorShape, t2_shape: TensorShape, t3_shape: TensorShape, attributes: AttributeVector, ]( ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype], t3: Tensor[dtype], inout grad: Tensor[dtype], ): """ Backward pass for ternary operators. """ var res_grad: Tensor[dtype] @parameter if op == OP.CONV2D: res_grad = CONV2D.backward[ tensor_id, ug_shape, t1_shape, t2_shape, t3_shape, attributes ](ug, t1, t2, t3) elif op == OP.FMA: res_grad = FMA.backward[tensor_id, ug_shape, t1_shape, t2_shape, t3_shape]( ug, t1, t2, t3 ) else: print("[ERROR] Operator not found.") res_grad = Tensor[dtype](-1, -1) accumulate_grad(grad, res_grad) fn backward_op[ input_id: Int, op: OP, attributes: AttributeVector, ]( inputs: List[Symbol], outputs: List[Symbol], inout grad: Tensor[dtype], parameters: Parameters, ): """ Backward pass for dynamic operators. """ var res_grad: Tensor[dtype] if op == OP.CONCAT: res_grad = CONCAT.backward[input_id, attributes](inputs, outputs, parameters) elif op == OP.SPLIT: res_grad = SPLIT.backward[input_id, attributes](inputs, outputs, parameters) else: print("[ERROR] Operator not found.") res_grad = Tensor[dtype](-1, -1) accumulate_grad(grad, res_grad)
basalt/basalt/autograd/ops/ops.mojo
false
<filename>basalt/basalt/autograd/ops/pool.mojo from math.limit import neginf from basalt import Tensor, TensorShape from basalt.autograd.attributes import AttributeVector from basalt.autograd.ops.conv import get_result_shape struct MAXPOOL2D: @staticmethod fn result_shape( input_shape: TensorShape, attributes: AttributeVector ) -> TensorShape: var kernel_size = attributes["kernel_size"].value().to_static[2]() var padding = attributes["padding"].value().to_static[2]() var stride = attributes["stride"].value().to_static[2]() var dilation = attributes["dilation"].value().to_static[2]() var res = get_result_shape( input_shape, TensorShape(kernel_size[0], kernel_size[1]), padding, stride, dilation, ) return TensorShape(input_shape[0], input_shape[1], res[0], res[1]) @staticmethod fn forward[ input_shape: TensorShape, attributes: AttributeVector ](inout outputs: Tensor[dtype], inputs: Tensor[dtype]): """ Returns the max value of each kernel in the input tensor. inputs.shape [batch_size, channels, iX, iY] with kernel_size = (kX, kY) outputs.shape [batch_size, channels, oX, oY]. """ alias kernel_size = attributes["kernel_size"].value().to_static[2]() alias padding = attributes["padding"].value().to_static[2]() alias stride = attributes["stride"].value().to_static[2]() alias dilation = attributes["dilation"].value().to_static[2]() alias inputs_strides = input_shape.strides() alias output_shape = Self.result_shape(input_shape, attributes) alias outputs_strides = output_shape.strides() for batch in range(input_shape[0]): for in_ch in range(input_shape[1]): for x in range(output_shape[2]): for y in range(output_shape[3]): var max_val: Scalar[dtype] = neginf[dtype]() var ix_base = x * stride[0] - padding[0] var iy_base = y * stride[1] - padding[1] for kx in range(kernel_size[0]): for ky in range(kernel_size[1]): var ix = ix_base + kx * dilation[0] var iy = iy_base + ky * dilation[1] if ( ix < 0 or iy < 0 or ix >= input_shape[2] or iy >= input_shape[3] ): continue var idx = ( batch * inputs_strides[0] + in_ch * inputs_strides[1] + ix * inputs_strides[2] + iy ) var val = inputs[idx] if val > max_val: max_val = val var out_idx = ( batch * outputs_strides[0] + in_ch * outputs_strides[1] + x * outputs_strides[2] + y ) outputs[out_idx] = max_val @staticmethod fn backward[ ug_shape: TensorShape, input_shape: TensorShape, attributes: AttributeVector ](ug: Tensor[dtype], inputs: Tensor[dtype]) -> Tensor[dtype]: """ Backward operation of MAXPOOL2D. Upper gradient of shape: [batch_size, channels, uX, uY] """ alias kernel_size = attributes["kernel_size"].value().to_static[2]() alias padding = attributes["padding"].value().to_static[2]() alias stride = attributes["stride"].value().to_static[2]() alias dilation = attributes["dilation"].value().to_static[2]() alias ug_strides = ug_shape.strides() alias inputs_strides = input_shape.strides() var res = Tensor[dtype](input_shape) for batch in range(input_shape[0]): for in_ch in range(input_shape[1]): for x in range(ug_shape[2]): for y in range(ug_shape[3]): var max_val: Scalar[dtype] = neginf[dtype]() var max_idx: Int = -1 var ix_base = x * stride[0] - padding[0] var iy_base = y * stride[1] - padding[1] for kx in range(kernel_size[0]): for ky in range(kernel_size[1]): var ix = ix_base + kx * dilation[0] var iy = iy_base + ky * dilation[1] if ( ix < 0 or iy < 0 or ix >= input_shape[2] or iy >= input_shape[3] ): continue var idx = ( batch * inputs_strides[0] + in_ch * inputs_strides[1] + ix * inputs_strides[2] + iy ) var val = inputs[idx] if val > max_val: max_val = val max_idx = idx var ug_idx = ( batch * ug_strides[0] + in_ch * ug_strides[1] + x * ug_strides[2] + y ) res[max_idx] += ug[ug_idx] return res
basalt/basalt/autograd/ops/pool.mojo
false
<filename>basalt/basalt/autograd/ops/__init__.mojo from .ops import ( OP, static_result_shape, dynamic_result_shape, forward_op, backward_op, )
basalt/basalt/autograd/ops/__init__.mojo
false
<filename>basalt/basalt/nn/activations.mojo from basalt import Tensor, TensorShape from basalt import Graph, Symbol, OP from basalt.autograd.attributes import Attribute, AttributeVector # '''Activation functions.''' fn ReLU(inout g: Graph, input: Symbol) -> Symbol: return g.op(OP.RELU, input) fn Sigmoid(inout g: Graph, input: Symbol) -> Symbol: return g.op(OP.SIGMOID, input) fn Tanh(inout g: Graph, input: Symbol) -> Symbol: return g.op(OP.TANH, input) fn Softmax(inout g: Graph, input: Symbol, axis: Int) -> Symbol: # softmax: exp(x_i) / sum(exp(x_j)) # stable softmax: exp(x_i - max(x_j)) / sum(exp(x_j - max(x_j))) var max_values = g.op( OP.MAX, input, attributes=AttributeVector(Attribute("axis", axis)) ) var input_minus_max = g.op(OP.SUB, input, max_values) var exp_values = g.op(OP.EXP, input_minus_max) var sum_values = g.op( OP.SUM, exp_values, attributes=AttributeVector(Attribute("axis", axis)) ) return g.op(OP.DIV, exp_values, sum_values) fn LogSoftmax(inout g: Graph, input: Symbol, axis: Int) -> Symbol: # stable logsoftmax: log(exp(x_i - max(x_j)) / sum(exp(x_j - max(x_j)))) # stable logsoftmax: x_i - max(x_j) - log(sum(exp(x_j - max(x_j)))) var max_values = g.op( OP.MAX, input, attributes=AttributeVector(Attribute("axis", axis)) ) var input_minus_max = g.op(OP.SUB, input, max_values) var exp_values = g.op(OP.EXP, input_minus_max) var sum_values = g.op( OP.SUM, exp_values, attributes=AttributeVector(Attribute("axis", axis)) ) var log_values = g.op(OP.LOG, sum_values) return g.op(OP.SUB, input_minus_max, log_values)
basalt/basalt/nn/activations.mojo
false
from math import sqrt from basalt import dtype from basalt import Tensor, TensorShape from basalt.utils.rand_utils import rand_normal, rand_uniform fn initialize_tensor( shape: TensorShape, type: String, data: List[Scalar[dtype]] ) -> Tensor[dtype]: if type == "random_uniform": var low = data[0] var high = data[1] var t = Tensor[dtype](shape) rand_uniform(t, low=low, high=high) return t elif type == "random_normal": var mean = data[0].cast[DType.float64]() var std = data[1].cast[DType.float64]() var t = Tensor[dtype](shape) rand_normal(t, mean=mean, std=std) return t # elif type == "kaiming_uniform": # # mode, nonlinearity # var mode_id = data[0] # var mode = "fan_in" if mode_id == 0 else "fan_out" # return kaiming_uniform(shape, mode = mode) # elif type == "kaiming_normal": # # mode, nonlinearity # var mode_id = data[0] # var mode = "fan_in" if mode_id == 0 else "fan_out" # return kaiming_normal(shape, mode = mode) else: print("[ERROR] Unsupported initialization type: " + type) return Tensor[dtype]() fn calculate_fan(shape: TensorShape, mode: String) -> Scalar[dtype]: """ Calculate the fan-in and fan-out of any tensor. """ # NOTE: shape.rank() should be > 2 # mode: "fan_in" or "fan_out" if shape.rank() < 2: print( "[ERROR] Fan in and fan out can not be calculated for tensor with less than" " 2 dimensions" ) var num_input_fmaps = shape[1] var num_output_fmaps = shape[0] var receptive_field_size = 1 if shape.rank() > 2: for i in range(2, shape.rank()): receptive_field_size *= shape[i] var fan_in = num_input_fmaps * receptive_field_size var fan_out = num_output_fmaps * receptive_field_size if mode == "fan_in": return fan_in else: return fan_out # # TODO: https://pytorch.org/docs/stable/_modules/torch/nn/init.html # fn kaiming_uniform(shape: TensorShape, mode: String = "fan_in", nonlinearity: String = "leaky_relu") -> Tensor[dtype]: # var fan = calculate_fan(shape, mode) # # TODO: add support for other gains: https://github.com/pytorch/pytorch/blob/main/torch/nn/init.py#L68 # # Gain for linear and conv layers is 1 # var gain = 1 # var std = gain / sqrt(fan) # # var bound = sqrt(3) * std.cast[dtype]() # var bound = std.cast[dtype]() # # print("Shape", shape, "Fan", fan, "Bound", bound) # var t = Tensor[dtype](shape) # rand_uniform(t, low = -bound, high = bound) # return t^ # # TODO: https://pytorch.org/docs/stable/_modules/torch/nn/init.html # fn kaiming_normal(shape: TensorShape, mode: String = "fan_in", nonlinearity: String = "leaky_relu") -> Tensor[dtype]: # var fan = calculate_fan(shape, mode) # # TODO: add support for other gains: https://github.com/pytorch/pytorch/blob/main/torch/nn/init.py#L68 # # Gain for linear and conv layers is 1 # var gain = 1 # var std = gain / sqrt(fan) # var t = Tensor[dtype](shape) # rand_normal(t, mean = 0, std = std.cast[DType.float64]()) # return t^
basalt/basalt/nn/initializers.mojo
false
<filename>basalt/basalt/nn/loss.mojo import basalt.nn as nn from basalt import Tensor, TensorShape from basalt import Graph, Symbol, OP fn MSELoss( inout g: Graph, y_pred: Symbol, y_true: Symbol, ) -> Symbol: # 1/N * sum( (outputs - targets)^2 ) var diff = g.op(OP.SUB, y_true, y_pred) var loss = g.op(OP.POW, diff, 2) var mean_loss = g.op(OP.MEAN, loss) return mean_loss fn CrossEntropyLoss( inout g: Graph, y_pred: Symbol, y_true: Symbol, ) -> Symbol: # -1/N * sum( targets * log_softmax(outputs) ) var log_softmax = nn.LogSoftmax(g, y_pred, axis=1) # CrossEntropy (reduction Mean) var targets_log_softmax = g.op(OP.MUL, y_true, log_softmax) var ret = g.op(OP.SUM, targets_log_softmax) var negDivN = g.op(OP.MUL, ret, -1.0 / y_pred.shape[0]) return negDivN
basalt/basalt/nn/loss.mojo
false
<filename>basalt/basalt/nn/model.mojo from collections.optional import Optional, OptionalReg from pathlib import Path from sys import env_get_int from basalt import Graph, Symbol, Tensor, TensorShape from basalt.autograd.ops import forward_op, backward_op from basalt.utils.collection import Collection from basalt.utils.tensorutils import fill from .initializers import initialize_tensor from basalt.utils.perf_utils import PerfMetrics from basalt.utils.onnx_utils import load_onnx_model, export_onnx_model # When runing mojo -D DEBUG=1 -I . file, a crash happens at some point at runtime because of an error in linking it seems (because of using -I .) # For now it seems one has to change this variable manually to be able to run model with performance metrics. alias DEBUG = env_get_int["DEBUG", 0]() # TODO: remove when ability to concatenate graphs (modules) fn dv_contains(dv: List[Symbol], symbol: Symbol) -> Bool: for i in range(len(dv)): if dv[i] == symbol: return True return False # TODO: remove when ability to concatenate graphs (modules) fn n_inference_nodes(g: Graph) -> OptionalReg[Int]: """ Calculate the index of the node up to wich the forward pass should be executed for a model inference. When looping in revers: Equals the first index on which the node output is also a graph output. The number of inference nodes is that index + 1. """ for i in range(len(g.nodes) - 1, -1, -1): for j in range(len(g.nodes[i].outputs)): if dv_contains(g.outputs, g.nodes[i].outputs[j]): return i + 1 return None @value struct Parameters: var tensors: Collection var grads: Collection fn __init__(inout self): self.tensors = Collection() self.grads = Collection() struct Model[ g: Graph, n_inference_nodes: OptionalReg[Int] = n_inference_nodes(g), ](): var parameters: Parameters var perf_metrics: PerfMetrics fn __init__(inout self, inference_only: Bool = False): self.parameters = Parameters() @parameter if DEBUG == 1: self.perf_metrics = PerfMetrics(g) else: self.perf_metrics = PerfMetrics() self.allocate_tensor_memory() self.allocate_grad_memory() # TODO: remove this when ability to concatenate graphs (modules) # NOTE: inference_only only used for surpressing the warning. if not inference_only and not g.loss_out: print("\n\n[WARNING]: No loss defined, model.forward() unavailable!\n\n") if not n_inference_nodes: print( "\n\n[WARNING]: No graph out defined, model.inference()" " unavailable!\n\n" ) # TODO: remove when ability to concatenate graphs (modules) # Removes the need for splitting in forward and inference mode fn forward(inout self, *t_inputs: Tensor[dtype]) -> Tensor[dtype]: # NOTE: Important detail here is that the order of the inputs must be the same as the order the inputs were defined in the graph. # Example: If you were te define the y_true before the x when creating the graph # # var g = Graph() # var y_true = g.input(TensorShape(batch_size, n_outputs)) # var x = g.input(TensorShape(batch_size, n_inputs)) # # Then the order of the inputs in the forward call must be the same: # # model.forward(batch.labels, batch.inputs) # 1. Execute a full forward pass (model inference + loss) self.execute[g.nodes.size](t_inputs ^) # 2. Return loss from allocated output memory # TODO: known copy (reference?) return self.parameters.tensors[g.loss_out.value()] fn inference(inout self, *t_inputs: Tensor[dtype]) -> List[Tensor[dtype]]: # 1. Execute forward pass up to model out self.execute[n_inference_nodes.value()](t_inputs) # 2. Return outputs from allocated output memory # TODO: known copies (reference?) var outputs = List[Tensor[dtype]]() for i in range(len(g.outputs)): outputs.append(self.parameters.tensors[g.outputs[i]]) return outputs ^ fn execute[num_nodes: Int](inout self, t_input: VariadicListMem[Tensor[dtype]]): # 1. Write inputs to allocated input memory for i in range(len(g.inputs)): self.parameters.tensors[g.inputs[i]] = t_input[i] # 2. Loop over all nodes and execute forward operations @parameter fn fw_unroll[i: Int](): alias op = g.nodes[i].operator alias attrs = g.nodes[i].attributes # Save start time for performance metrics @parameter if DEBUG == 1: self.perf_metrics.start_forward_pass() @parameter if op.dynamic: forward_op[op, attrs]( g.nodes[i].inputs, g.nodes[i].outputs, self.parameters, ) else: # Statically known shapes and number of operands alias num_operands = len(g.nodes[i].inputs) alias t1 = g.nodes[i].inputs[0] alias out = g.nodes[i].outputs[0] @parameter if num_operands == 1: # Unary operator forward_op[op, t1.shape, attrs]( self.parameters.tensors[out], self.parameters.tensors[t1] ) elif num_operands == 2: # Binary operator alias t2 = g.nodes[i].inputs[1] forward_op[op, t1.shape, t2.shape, attrs]( self.parameters.tensors[out], self.parameters.tensors[t1], self.parameters.tensors[t2], ) elif num_operands == 3: # Ternary operator alias t2 = g.nodes[i].inputs[1] alias t3 = g.nodes[i].inputs[2] forward_op[op, t1.shape, t2.shape, t3.shape, attrs]( self.parameters.tensors[out], self.parameters.tensors[t1], self.parameters.tensors[t2], self.parameters.tensors[t3], ) # Save end time for performance metrics @parameter if DEBUG == 1: self.perf_metrics.end_forward_pass(i) unroll[fw_unroll, num_nodes]() fn backward(inout self, *upper_grads: Tensor[dtype]): """ Main entrypoint of backward pass. """ # 1. Initialize output gradient at the beginning of the backward pass if len(upper_grads) == 0: # TODO remove loss_out tag fill(self.parameters.grads[g.loss_out.value()], 1.0) else: var node_outputs = g.nodes[g.nodes.size - 1].outputs if len(upper_grads) != node_outputs.size: print( "[WARNING] Number of upper grads does not match number of node" " outputs!" ) for i in range(node_outputs.size): self.parameters.grads[node_outputs[i]] = upper_grads[i] # 2. Loop over all nodes in reverse order and execute backward operations @parameter fn bw_unroll[i: Int](): alias reverse_i = g.nodes.size - i - 1 alias op = g.nodes[reverse_i].operator alias attrs = g.nodes[reverse_i].attributes alias num_operands = len(g.nodes[reverse_i].inputs) # Save start time for performance metrics @parameter if DEBUG == 1: self.perf_metrics.start_backward_pass() @parameter if op.dynamic: @parameter fn unroll_dynamic[j: Int](): @parameter if g.nodes[reverse_i].inputs[j].trainable: backward_op[j, op, attrs]( g.nodes[reverse_i].inputs, g.nodes[reverse_i].outputs, self.parameters.grads[g.nodes[reverse_i].inputs[j]], self.parameters, ) unroll[unroll_dynamic, num_operands]() else: # Statically known shapes and number of operands alias out = g.nodes[reverse_i].outputs[0] # or upper_grad symbol alias t1 = g.nodes[reverse_i].inputs[0] @parameter if num_operands == 1: # Unary operator @parameter if t1.trainable: backward_op[0, op, out.shape, t1.shape, attrs]( self.parameters.grads[out], self.parameters.tensors[t1], self.parameters.grads[t1], # grad to be updated: inputs[0] ) elif num_operands == 2: # Binary operator alias t2 = g.nodes[reverse_i].inputs[1] @parameter if t1.trainable: backward_op[0, op, out.shape, t1.shape, t2.shape, attrs]( self.parameters.grads[out], self.parameters.tensors[t1], self.parameters.tensors[t2], self.parameters.grads[t1], # grad to be updated: inputs[0] ) @parameter if t2.trainable: backward_op[1, op, out.shape, t1.shape, t2.shape, attrs]( self.parameters.grads[out], self.parameters.tensors[t1], self.parameters.tensors[t2], self.parameters.grads[t2], # grad to be updated: inputs[1] ) elif num_operands == 3: # Ternary operator alias t2 = g.nodes[reverse_i].inputs[1] alias t3 = g.nodes[reverse_i].inputs[2] @parameter if t1.trainable: backward_op[ 0, op, out.shape, t1.shape, t2.shape, t3.shape, attrs ]( self.parameters.grads[out], self.parameters.tensors[t1], self.parameters.tensors[t2], self.parameters.tensors[t3], self.parameters.grads[t1], # grad to be updated: inputs[0] ) @parameter if t2.trainable: backward_op[ 1, op, out.shape, t1.shape, t2.shape, t3.shape, attrs ]( self.parameters.grads[out], self.parameters.tensors[t1], self.parameters.tensors[t2], self.parameters.tensors[t3], self.parameters.grads[t2], # grad to be updated: inputs[1] ) @parameter if t3.trainable: backward_op[ 2, op, out.shape, t1.shape, t2.shape, t3.shape, attrs ]( self.parameters.grads[out], self.parameters.tensors[t1], self.parameters.tensors[t2], self.parameters.tensors[t3], self.parameters.grads[t3], # grad to be updated: inputs[2] ) # Save end time for performance metrics @parameter if DEBUG == 1: self.perf_metrics.end_backward_pass(i) unroll[bw_unroll, g.nodes.size]() fn allocate_tensor_memory(inout self): for i in range(len(g.inputs)): self.parameters.tensors.append( Tensor[dtype](g.inputs[i].shape), g.inputs[i] ) for i in range(len(g.params)): var p = g.params.symbols[i] var p_init = g.params.values[i] var par: Tensor[dtype] if p_init.initializer: # 1. Specific parameter initialization defined var initializer_attr = p_init.initializer.value()[] par = initialize_tensor( shape=p.shape, type=initializer_attr.to_string(), data=p_init.data.value()[], ) elif p_init.data: # 2. Parameter initialized with data only # Data is assumed to contain the tensor par = g.params.get_tensor(i) else: # Default parameter initialization to zero par = Tensor[dtype](p.shape) self.parameters.tensors.append(par ^, p) for i in range(len(g.nodes)): # Assumption: An input or a param cannot be an output of a node for j in range(len(g.nodes[i].outputs)): self.parameters.tensors.append( Tensor[dtype](g.nodes[i].outputs[j].shape), g.nodes[i].outputs[j] ) fn allocate_grad_memory(inout self): # Gradient have same shape as the tensor for i in range(len(g.inputs)): if g.inputs[i].trainable: self.parameters.grads.append( Tensor[dtype](g.inputs[i].shape), g.inputs[i] ) for i in range(len(g.params)): var grad = g.params.symbols[i] if grad.trainable: self.parameters.grads.append(Tensor[dtype](grad.shape), grad) for i in range(len(g.nodes)): for j in range(len(g.nodes[i].outputs)): var out = g.nodes[i].outputs[j] if out.trainable: self.parameters.grads.append(Tensor[dtype](out.shape), out) fn print_perf_metrics(self, time_format: String = "ns", print_shape: Bool = False): self.perf_metrics.print_forward_perf_metrics(time_format, print_shape) self.perf_metrics.print_backward_perf_metrics(time_format, print_shape) fn load_model_data(inout self, model_path: String): var path = Path(model_path) print("Loading model data from:", path) try: if path.suffix() == ".onnx": load_onnx_model(model_path, self.parameters, self.g) else: print("Model file format not supported:", path.suffix()) except e: print("Error loading model data:", e) fn export_model(self, model_path: String): var path = Path(model_path) print("Exporting model to:", path) try: if path.suffix() == ".onnx": export_onnx_model(model_path, self.parameters, self.g) else: print("Model file format not supported:", path.suffix()) except e: print("Error exporting model:", e)
basalt/basalt/nn/model.mojo
false
<filename>basalt/basalt/nn/optim.mojo from math import add, mul, div, sqrt, sub from algorithm import vectorize, parallelize from .model import Parameters from basalt import Graph, Tensor, TensorShape from basalt.utils.collection import Collection fn get_trainable_parameters(g: Graph) -> List[Symbol]: """ Get all symbols of trainable parameters. """ var trainable_parameters = List[Symbol]() for i in range(len(g.params)): if g.params.symbols[i].trainable: trainable_parameters.append(g.params.symbols[i]) return trainable_parameters ^ struct Adam[ g: Graph, mutability: __mlir_type.i1, lifetime: AnyLifetime[mutability].type, trainable_parameters: List[Symbol] = get_trainable_parameters(g), ]: var parameters: Reference[Parameters, mutability, lifetime] var lr: Scalar[dtype] var beta1: Scalar[dtype] var beta2: Scalar[dtype] var epsilon: Scalar[dtype] var iter: Int var rms_grads: Collection var momentum_grads: Collection fn __init__( inout self, parameters: Reference[Parameters, mutability, lifetime], lr: Scalar[dtype] = 0.001, beta1: Scalar[dtype] = 0.9, beta2: Scalar[dtype] = 0.999, epsilon: Scalar[dtype] = 1e-8, ): self.parameters = parameters self.lr = lr self.beta1 = beta1 self.beta2 = beta2 self.epsilon = epsilon self.iter = 0 # Capacity of the collections should be the n of trainable parameters self.rms_grads = Collection(capacity=len(trainable_parameters)) self.momentum_grads = Collection(capacity=len(trainable_parameters)) self.allocate_rms_and_momentum() fn zero_grad(inout self): """Set all gradients to zero.""" self.parameters[].grads.set_zero() fn step(inout self): """Update model parameters.""" self.iter += 1 # Loop over all trainable parameters @parameter fn p_step(i: Int): var param = trainable_parameters[i] @parameter fn v_step[nelts: Int](j: Int): var momentum_grads = self.momentum_grads[param].load[nelts](j) var rms_grads = self.rms_grads[param].load[nelts](j) var grads = self.parameters[].grads[param].load[nelts](j) var params = self.parameters[].tensors[param].load[nelts](j) # Momentum beta 1 # f1 = beta1 * momentum + (1 - beta1) * grad momentum_grads = self.beta1 * momentum_grads + (1 - self.beta1) * grads self.momentum_grads[param].store[nelts](j, momentum_grads) # Bias correction # f2 = f1 / (1 - beta1 ** iter) momentum_grads = momentum_grads / (1 - self.beta1**self.iter) # RMS beta 2 # f1 = beta2 * rms + (1 - beta2) * grad ** 2 rms_grads = self.beta2 * rms_grads + (1 - self.beta2) * grads * grads self.rms_grads[param].store[nelts](j, rms_grads) # Bias correction # f2 = f1 / (1 - beta2 ** iter) rms_grads = rms_grads / (1 - self.beta2**self.iter) # tensor = tensor - lr * (f2 / (sqrt(rms) + epsilon)) params = params - self.lr * ( momentum_grads / (sqrt(rms_grads) + self.epsilon) ) self.parameters[].tensors[param].store[nelts](j, params) vectorize[v_step, 1](param.shape.num_elements()) parallelize[p_step](len(trainable_parameters)) fn allocate_rms_and_momentum(inout self): # They are initialized to zero # Loop over all trainable parameters for i in range(len(trainable_parameters)): var param = trainable_parameters[i] self.rms_grads.append(Tensor[dtype](param.shape), param) self.momentum_grads.append(Tensor[dtype](param.shape), param)
basalt/basalt/nn/optim.mojo
false
from math import min from testing import assert_true from algorithm import vectorize from tensor import Tensor as _Tensor from tensor import TensorShape as _TensorShape alias MAX_RANK = 8 @register_passable("trivial") struct TensorShape(Stringable): var _rank: Int var _shape: StaticIntTuple[MAX_RANK] @always_inline("nodebug") fn __init__(inout self, *shape: Int): self._rank = len(shape) self._shape = StaticIntTuple[MAX_RANK]() for i in range(min(self._rank, MAX_RANK)): self._shape[i] = shape[i] @always_inline("nodebug") fn __init__(inout self, shapes: VariadicList[Int]): self._rank = len(shapes) self._shape = StaticIntTuple[MAX_RANK]() for i in range(min(self._rank, MAX_RANK)): self._shape[i] = shapes[i] @always_inline("nodebug") fn __init__(inout self, shape: List[Int]): self._rank = len(shape) self._shape = StaticIntTuple[MAX_RANK]() for i in range(min(self._rank, MAX_RANK)): self._shape[i] = shape[i] @always_inline("nodebug") fn __init__[num: Int](inout self, shape: StaticIntTuple[num]): self._rank = num self._shape = StaticIntTuple[MAX_RANK]() for i in range(min(self._rank, MAX_RANK)): self._shape[i] = shape[i] @always_inline("nodebug") fn __init__(inout self, rank: Int, shape: StaticIntTuple[MAX_RANK]): self._rank = rank self._shape = shape @always_inline("nodebug") fn __init__(inout self, owned shape: _TensorShape): self._rank = shape.rank() self._shape = StaticIntTuple[MAX_RANK]() for i in range(min(self._rank, MAX_RANK)): self._shape[i] = shape[i] @always_inline("nodebug") fn __getitem__(self, index: Int) -> Int: return self._shape[index if index >= 0 else self._rank + index] @always_inline("nodebug") fn __setitem__(inout self, index: Int, value: Int): self._shape[index if index >= 0 else self._rank + index] = value @always_inline("nodebug") fn rank(self) -> Int: return self._rank @always_inline("nodebug") fn num_elements(self) -> Int: var result = 1 for i in range(self._rank): result *= self._shape[i] return result @always_inline("nodebug") fn strides(self) -> StaticIntTuple[MAX_RANK]: var result = StaticIntTuple[MAX_RANK](0) result[self._rank - 1] = 1 for i in range(self._rank - 2, -1, -1): result[i] = result[i + 1] * self._shape[i + 1] return result @always_inline("nodebug") fn _std_shape(self) -> _TensorShape: var s = List[Int](capacity=self.rank()) for i in range(self.rank()): s.append(self[i]) return _TensorShape(s) @always_inline("nodebug") fn __str__(self) -> String: return str(self._std_shape()) @always_inline("nodebug") fn __eq__(self, other: TensorShape) -> Bool: if self.rank() != other.rank(): return False for i in range(self.rank()): if self[i] != other[i]: return False return True @always_inline("nodebug") fn __ne__(self, other: TensorShape) -> Bool: return not self.__eq__(other) @always_inline("nodebug") fn __contains__(self, value: Int) -> Bool: for i in range(self.rank()): if self[i] == value: return True return False struct Tensor[dtype: DType](Stringable, Movable, CollectionElement): var _data: DTypePointer[dtype] var _shape: TensorShape @always_inline("nodebug") fn __init__(inout self, *dims: Int): self._shape = TensorShape(dims) self._data = DTypePointer[dtype].alloc(self._shape.num_elements()) memset_zero(self._data, self._shape.num_elements()) @always_inline("nodebug") fn __init__(inout self, owned shape: TensorShape): self._data = DTypePointer[dtype].alloc(shape.num_elements()) memset_zero(self._data, shape.num_elements()) self._shape = shape @always_inline("nodebug") fn __init__( inout self, owned data: DTypePointer[dtype], owned shape: TensorShape ): # NOTE: Remember to use _ = your_tensor that you passed, so there is no weird behavior in this function self._data = DTypePointer[dtype].alloc(shape.num_elements()) self._shape = shape memcpy(self._data, data, self._shape.num_elements()) _ = data @always_inline("nodebug") fn __init__(inout self, owned tensor: _Tensor[dtype]): self._data = DTypePointer[dtype].alloc(tensor.num_elements()) self._shape = tensor.shape() memcpy(self._data, tensor.data(), self._shape.num_elements()) _ = tensor @always_inline("nodebug") fn __moveinit__(inout self, owned other: Tensor[dtype]): self._data = other._data self._shape = other._shape @always_inline("nodebug") fn __copyinit__(inout self, other: Tensor[dtype]): # print("[WARNING] Copying tensor") self._data = DTypePointer[dtype].alloc(other._shape.num_elements()) memcpy(self._data, other._data, other.num_elements()) self._shape = other._shape @always_inline("nodebug") fn __getitem__(self, index: Int) -> Scalar[dtype]: return self._data[index] @always_inline("nodebug") fn __setitem__(self, index: Int, value: Scalar[dtype]): self._data[index] = value @always_inline("nodebug") fn data(self) -> DTypePointer[dtype]: return self._data @always_inline("nodebug") fn shape(self) -> TensorShape: return self._shape @always_inline("nodebug") fn load[simd_width: Int](self, index: Int) -> SIMD[dtype, simd_width]: return self._data.load[width=simd_width](index) @always_inline("nodebug") fn store[simd_width: Int](self, index: Int, value: SIMD[dtype, simd_width]): self._data.store[width=simd_width](index, value) @always_inline("nodebug") fn strides(self) -> StaticIntTuple[MAX_RANK]: return self._shape.strides() @always_inline("nodebug") fn rank(self) -> Int: return self._shape.rank() @always_inline("nodebug") fn num_elements(self) -> Int: return self._shape.num_elements() @always_inline("nodebug") fn dim(self, index: Int) -> Int: return self._shape[index] @always_inline("nodebug") fn zero(self): memset_zero(self._data, self.num_elements()) @always_inline("nodebug") fn ireshape(inout self, new_shape: TensorShape) raises: # NOTE Consider not raising on error assert_true(self.num_elements() == new_shape.num_elements()) self._shape = new_shape @always_inline("nodebug") fn __str__(self) -> String: var new_data = DTypePointer[dtype].alloc(self.num_elements()) var std_shape = self._shape._std_shape() memcpy(new_data, self._data, self.num_elements()) return str(_Tensor[dtype](ptr=new_data, shape=std_shape)) @always_inline("nodebug") fn __del__(owned self): self._data.free()
basalt/basalt/nn/tensor.mojo
false
<filename>basalt/basalt/nn/__init__.mojo from .tensor import Tensor, TensorShape from .model import Model from .layers.linear import Linear from .layers.conv import Conv2d from .layers.pool import MaxPool2d from .loss import MSELoss, CrossEntropyLoss from .activations import Softmax, LogSoftmax, ReLU, Sigmoid, Tanh
basalt/basalt/nn/__init__.mojo
false
from basalt import Graph, Symbol, OP from basalt import Tensor, TensorShape from basalt.utils import q_sqrt from basalt.autograd.params import Param from basalt.autograd.attributes import AttributeVector, Attribute fn Conv2d( inout g: Graph, inputs: Symbol, out_channels: Int, kernel_size: StaticIntTuple[2], padding: StaticIntTuple[2] = 0, stride: StaticIntTuple[2] = 1, dilation: StaticIntTuple[2] = 1, ) -> Symbol: """ A 2D Convolution Layer. Parameters inputs.shape [batch, in_channels, iX, iY] kernel.shape [out_channels, in_channels, kX, kY] (or weights) bias.shape [out_channels]. output.shape [batch, out_channels, oX, oY]. """ var in_channels: Int = inputs.shape[1] var fan_in: Scalar[dtype] = in_channels * kernel_size[0] * kernel_size[1] var bound = q_sqrt(fan_in) var weights = g.param( TensorShape(out_channels, in_channels, kernel_size[0], kernel_size[1]), init=Param("random_uniform", -bound, bound) # init=Param("kaiming_uniform", 0) ) var bias = g.param( TensorShape(out_channels), init=Param("random_uniform", -bound, bound) ) return g.op( OP.CONV2D, inputs, weights, bias, attributes=AttributeVector( Attribute("padding", padding), Attribute("stride", stride), Attribute("dilation", dilation), ), )
basalt/basalt/nn/layers/conv.mojo
false
<filename>basalt/basalt/nn/layers/linear.mojo from basalt import Tensor, TensorShape from basalt import Graph, Symbol, OP from basalt.utils import q_sqrt from basalt.autograd.params import Param fn Linear( inout g: Graph, inputs: Symbol, n_outputs: Int, ) -> Symbol: """ A fully connected layer. """ var fan_in: Scalar[dtype] = inputs.shape[1] var bound = q_sqrt(fan_in) var weights = g.param( TensorShape(inputs.shape[1], n_outputs), init=Param("random_uniform", -bound, bound) # init=Param("random_uniform", 1) # NOTE: mode: fan_out required as weight are defined transposed ) var b = g.param(TensorShape(n_outputs), init=Param("random_uniform", -bound, bound)) var res = g.op(OP.DOT, inputs, weights) return g.op(OP.ADD, res, b)
basalt/basalt/nn/layers/linear.mojo
false
from basalt import Tensor, TensorShape from collections.optional import Optional from basalt import Graph, Symbol, OP from basalt.autograd.attributes import AttributeVector, Attribute fn set_static_stride( kernel_size: StaticIntTuple[2], stride: Optional[Int] = None ) -> StaticIntTuple[2]: if stride: return StaticIntTuple[2](stride.value()[], stride.value()[]) else: return kernel_size fn MaxPool2d( inout g: Graph, inputs: Symbol, kernel_size: StaticIntTuple[2], stride: Optional[Int] = None, padding: StaticIntTuple[2] = 0, dilation: StaticIntTuple[2] = 1, ) -> Symbol: """ A 2D Max Pooling Layer. Kernel is unaware of the in_channels and out_channels of the input tensor. kernel.size (kX, kY) """ # TODO: assert padding <= kernel_size / 2 (at compile time) var stride_temp = set_static_stride(kernel_size, stride) return MaxPool2d(g, inputs, kernel_size, stride_temp, padding, dilation) fn MaxPool2d( inout g: Graph, inputs: Symbol, kernel_size: StaticIntTuple[2], stride: StaticIntTuple[2], # stride should be 1 or more padding: StaticIntTuple[2] = 0, dilation: StaticIntTuple[2] = 1, ) -> Symbol: """ A 2D Max Pooling Layer. Kernel is unaware of the in_channels and out_channels of the input tensor. kernel.size (kX, kY) """ # TODO: assert padding <= kernel_size / 2 (at compile time) return g.op( OP.MAXPOOL2D, inputs, attributes=AttributeVector( Attribute("kernel_size", kernel_size), Attribute("padding", padding), Attribute("stride", stride), Attribute("dilation", dilation), ), ) # # TODO
basalt/basalt/nn/layers/pool.mojo
false
from math import nan from math.limit import inf alias ScalarBytes = DType.uint64.sizeof() @register_passable("trivial") struct Bytes[capacity: Int](Stringable, CollectionElement, EqualityComparable): """ Static sequence of bytes. """ var data: StaticTuple[UInt8, capacity] @always_inline("nodebug") fn __init__(inout self): var data = StaticTuple[UInt8, capacity]() @unroll for i in range(capacity): data[i] = 0 self.data = data @always_inline("nodebug") fn __init__(inout self, s: String): var data = StaticTuple[UInt8, capacity]() var length = len(s) @unroll for i in range(capacity): data[i] = ord(s[i]) if i < length else 0 self.data = data @always_inline("nodebug") fn __len__(self) -> Int: return capacity @always_inline("nodebug") fn __setitem__(inout self, index: Int, value: UInt8): self.data[index] = value @always_inline("nodebug") fn __getitem__(self, index: Int) -> UInt8: return self.data[index] @always_inline("nodebug") fn __eq__(self, other: Self) -> Bool: @unroll for i in range(capacity): if self[i] != other[i]: return False return True @always_inline("nodebug") fn __ne__(self, other: Self) -> Bool: @unroll for i in range(capacity): if self[i] != other[i]: return True return False @always_inline("nodebug") fn __str__(self) -> String: var result: String = "" @unroll for i in range(capacity): var val = self[i] if val != 0: result += chr(int(val)) return result fn scalar_to_bytes[ dtype: DType, Size: Int = ScalarBytes ](value: Scalar[dtype]) -> Bytes[Size]: constrained[Size >= ScalarBytes, "Size must be at least ${ScalarBytes}"]() var bits = bitcast[DType.uint64](value.cast[expand_type[dtype]()]()) var data = Bytes[Size]() @unroll for i in range(ScalarBytes): data[i] = (bits >> (i << 3)).cast[DType.uint8]() return data fn bytes_to_scalar[dtype: DType](data: Bytes) -> Scalar[dtype]: constrained[data.capacity >= ScalarBytes, "Size must be at least ${ScalarBytes}"]() var bits: UInt64 = 0 @unroll for i in range(ScalarBytes): bits |= data[i].cast[DType.uint64]() << (i << 3) return bitcast[expand_type[dtype]()](bits).cast[dtype]() fn expand_type[dtype: DType]() -> DType: @parameter if dtype.is_floating_point(): return DType.float64 elif dtype.is_signed(): return DType.int64 elif dtype.is_integral(): return DType.uint64 constrained[False, "Type must be numeric"]() return DType.invalid
basalt/basalt/utils/bytes.mojo
false
<filename>basalt/basalt/utils/collection.mojo from math import max, divmod from memory.unsafe_pointer import UnsafePointer, initialize_pointee_move, destroy_pointee from basalt import Tensor, Symbol struct Collection(CollectionElement, Sized): """ A collection of tensors with associated symbols. """ var size: Int var capacity: Int var data: UnsafePointer[Tensor[dtype]] var symbols: DTypePointer[DType.uint32] @always_inline("nodebug") fn __init__(inout self, *, capacity: Int = 0): """ Initializes a new Collection with the given capacity. """ self.size = 0 self.capacity = capacity self.data = UnsafePointer[Tensor[dtype]].alloc(capacity) self.symbols = DTypePointer[DType.uint32].alloc(capacity) @always_inline("nodebug") fn __moveinit__(inout self, owned existing: Self): """ Move initializes a Collection from an existing one. """ self.size = existing.size self.capacity = existing.capacity self.data = existing.data self.symbols = existing.symbols @always_inline("nodebug") fn __copyinit__(inout self, existing: Self): """ Copy initializes a Collection from an existing one. """ self.capacity = existing.capacity self.size = existing.size self.data = UnsafePointer[Tensor[dtype]].alloc(existing.capacity) self.symbols = DTypePointer[DType.uint32].alloc(existing.capacity) memcpy(self.symbols, existing.symbols, existing.capacity) for i in range(existing.size): initialize_pointee_move((self.data + i), (existing.data + i)[]) @always_inline("nodebug") fn __del__(owned self): """ Destructor for the Collection. """ for i in range(self.size): destroy_pointee((self.data + i)) if self.data: self.data.free() if self.symbols: self.symbols.free() @always_inline("nodebug") fn __len__(self) -> Int: """ Returns the number of elements in the Collection. """ return self.size @always_inline("nodebug") fn _realloc(inout self, new_capacity: Int): """ Reallocates the Collection to the new capacity. """ var new_data = UnsafePointer[Tensor[dtype]].alloc(new_capacity) var new_symbols = DTypePointer[DType.uint32].alloc(new_capacity) for i in range(self.size): initialize_pointee_move((new_data + i), (self.data + i)[]) new_symbols[i] = self.symbols[i] self.data.free() self.symbols.free() self.data = new_data self.symbols = new_symbols self.capacity = new_capacity @always_inline("nodebug") fn append(inout self, owned value: Tensor[dtype], symbol: Symbol): """ Appends a tensor and its associated symbol to the Collection. """ self.append(value ^, symbol.name) @always_inline("nodebug") fn append(inout self, owned value: Tensor[dtype], symbol_name: UInt32): """ Appends a tensor and its associated symbol name to the Collection. """ if self.size >= self.capacity: self._realloc(max(1, self.capacity * 2)) initialize_pointee_move((self.data + self.size), value ^) self.symbols[self.size] = symbol_name self.size += 1 @always_inline("nodebug") fn get_index(self, symbol_name: UInt32) -> Int: """ Returns the index of the tensor with the given symbol name. """ alias factor = 8 # 2 -> 5.32s MNIST # 4 -> 4.95s MNIST # 8 -> 4.85s MNIST # 16 -> 5.19s MNIST # NOTE: This ideally should just be a hashmap for i in range(0, self.size, factor): var elems = self.symbols.load[width=factor](i) == symbol_name for j in range(factor): if elems[j]: return i + j var split = divmod(self.size, factor) for i in range(split[1]): var index = split[0] + i if self.symbols[index] == symbol_name: return index return -1 @always_inline("nodebug") fn __refitem__[ mutability: __mlir_type.i1, lifetime: AnyLifetime[mutability].type, ]( self: Reference[Self, mutability, lifetime]._mlir_type, symbol: Symbol, ) -> Reference[Tensor[dtype], mutability, lifetime]: """ Returns a reference to the tensor with the given symbol. """ var index = Reference(self)[].get_index(symbol.name) return (Reference(self)[].data + index)[] @always_inline("nodebug") fn clear(inout self): """ Clears the Collection, removing all tensors and symbols. """ for i in range(self.size): destroy_pointee((self.data + i)) memset_zero(self.symbols, self.capacity) self.size = 0 @always_inline("nodebug") fn set_zero(self): """ Zeroes out all the tensors in the collection. """ for i in range(self.size): self.data[i].zero()
basalt/basalt/utils/collection.mojo
false
from testing import assert_equal from math import min from memory import memcpy from basalt import dtype, nelts from basalt import Tensor, TensorShape @value struct Batch[dtype: DType](CollectionElement): var data: Tensor[dtype] var labels: Tensor[dtype] fn __init__(inout self, batch_data: Tensor[dtype], batch_labels: Tensor[dtype]): self.data = batch_data self.labels = batch_labels fn __init__( inout self, df_data: Tensor[dtype], df_labels: Tensor[dtype], start: Int, batch_data_shape: TensorShape, batch_labels_shape: TensorShape, ): # TODO: find a better way to do this # Links to the copies of the input tensors in model.forward() self.data = Tensor[dtype](batch_data_shape) self.labels = Tensor[dtype](batch_labels_shape) memcpy( self.data.data(), df_data.data().offset(start * batch_data_shape.strides()[0]), batch_data_shape.num_elements(), ) memcpy( self.labels.data(), df_labels.data().offset(start * batch_labels_shape.strides()[0]), batch_labels_shape.num_elements(), ) fn __getitem__(self, index: Int) -> Tensor[dtype]: if index == 0: return self.data elif index == 1: return self.labels else: print("[ERROR] Batch.__getitem__(): Index out of bounds") return Tensor[dtype]() @value struct DataLoader: var data: Tensor[dtype] var labels: Tensor[dtype] var batch_size: Int var _current_index: Int var _num_batches: Int var _data_batch_shape: TensorShape var _label_batch_shape: TensorShape fn __init__( inout self, data: Tensor[dtype], labels: Tensor[dtype], batch_size: Int, ): self.data = data self.labels = labels self.batch_size = batch_size # Number of batches to iter, NOTE: ignore the remainder for now # var remainder = 1 if self.data.dim(0) % self.batch_size != 0 else 0 self._current_index = 0 self._num_batches = self.data.dim(0) // self.batch_size # + remainder # Batch shapes self._data_batch_shape = self.data.shape() self._label_batch_shape = self.labels.shape() self._data_batch_shape[0] = self.batch_size self._label_batch_shape[0] = self.batch_size @always_inline fn __len__(self) -> Int: """ Returns the number of the batches left in the dataset. """ return self._num_batches fn __iter__(self) -> Self: # TODO: Starting the iterator requires to return (COPY!) the whole dataloader which containts the whole dataset # Does this mean that the whole dataset is copied every epoch ?! return self fn __next__(inout self) -> Batch[dtype]: # NOTE: ignore the remainder for now # var end = min(self._current_index + self.batch_size, self.data.dim(0)) # self._data_shape[0] = end - self._current_index # self._label_shape[0] = end - self._current_index var temp_current_index = self._current_index self._current_index += self.batch_size self._num_batches -= 1 return Batch[dtype]( self.data, self.labels, temp_current_index, self._data_batch_shape, self._label_batch_shape, )
basalt/basalt/utils/dataloader.mojo
false
from algorithm import vectorize from math import div from basalt import dtype from basalt import Tensor, TensorShape from basalt.utils.tensorutils import elwise_op, tmean, tstd struct BostonHousing: alias n_inputs = 13 var data: Tensor[dtype] var labels: Tensor[dtype] fn __init__(inout self, file_path: String) raises: var s = read_file(file_path) # Skip the first and last lines # This does assume your last line in the file has a newline at the end var list_of_lines = s.split("\n")[1:-1] # Length is number of lines var N = len(list_of_lines) self.data = Tensor[dtype](N, self.n_inputs) # All columns except the last one self.labels = Tensor[dtype](N, 1) # Only the last column (MEDV) var line: List[String] = List[String]() # Load data in Tensor for item in range(N): line = list_of_lines[item].split(",") self.labels[item] = cast_string[dtype](line[-1]) for n in range(self.n_inputs): self.data[item * self.n_inputs + n] = cast_string[dtype](line[n]) # Normalize data # TODO: redo when tensorutils tmean2 and tstd2 are implemented alias nelts = simdwidthof[dtype]() var col = Tensor[dtype](N) for j in range(self.n_inputs): for k in range(N): col[k] = self.data[k * self.n_inputs + j] for i in range(N): self.data[i * self.n_inputs + j] = (self.data[i * self.n_inputs + j] - tmean(col)) / tstd(col) struct MNIST: var data: Tensor[dtype] var labels: Tensor[dtype] fn __init__(inout self, file_path: String) raises: var s = read_file(file_path) # Skip the first and last lines # This does assume your last line in the file has a newline at the end var list_of_lines = s.split("\n")[1:-1] # Length is number of lines var N = len(list_of_lines) self.data = Tensor[dtype](N, 1, 28, 28) self.labels = Tensor[dtype](N) var line: List[String] = List[String]() # Load data in Tensor for item in range(N): line = list_of_lines[item].split(",") self.labels[item] = atol(line[0]) for i in range(self.data.shape()[2]): for j in range(self.data.shape()[3]): self.data[item * 28 * 28 + i * 28 + j] = atol(line[i * 28 + j + 1]) # Normalize data alias nelts = simdwidthof[dtype]() @parameter fn vecdiv[nelts: Int](idx: Int): self.data.store[nelts](idx, div(self.data.load[nelts](idx), 255.0)) vectorize[vecdiv, nelts](self.data.num_elements()) fn read_file(file_path: String) raises -> String: var s: String with open(file_path, "r") as f: s = f.read() return s fn find_first(s: String, delimiter: String) -> Int: for i in range(len(s)): if s[i] == delimiter: return i return -1 fn cast_string[dtype: DType](s: String) raises -> Scalar[dtype]: """ Cast a string with decimal to a SIMD vector of dtype. """ var idx = find_first(s, delimiter=".") var x: Scalar[dtype] = -1 if idx == -1: # No decimal point x = atol(s) return x else: var c_int: Scalar[dtype] var c_frac: Scalar[dtype] c_int = atol(s[:idx]) c_frac = atol(s[idx + 1 :]) x = c_int + c_frac / (10 ** len(s[idx + 1 :])) return x
basalt/basalt/utils/datasets.mojo
false
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
32
Edit dataset card