content
stringlengths 32
91.6k
| path
stringlengths 14
91
| fimified
bool 2
classes |
---|---|---|
<filename>arrow.mojo/arrow/util.mojo
"""
Arrow buffers are recommended to have an alignment and padding of 64 bytes
https://arrow.apache.org/docs/format/Columnar.html#buffer-alignment-and-padding.
"""
alias PADDING = 64
alias ALIGNMENT = 64
fn get_num_bytes_with_padding(num_bytes: Int) -> Int:
return ((num_bytes + PADDING - 1) // PADDING) * PADDING
| arrow.mojo/arrow/util.mojo | false |
from arrow.physical_layout.arrow import (
ArrowFixedWidthVector,
ArrowIntVector,
)
from arrow.array.bool_array import ArrowBooleanArray
from arrow.buffer.bitmap import Bitmap
from arrow.buffer.offset import OffsetBuffer32, OffsetBuffer64
from arrow.physical_layout.varbinary import ArrowStringVector
| arrow.mojo/arrow/__init__.mojo | false |
from arrow.buffer.bitmap import Bitmap
struct ArrowBooleanArray:
var length: Int
var null_count: Int
var _validity: Bitmap
var _buffer: Bitmap
var mem_used: Int
fn __init__(inout self, values: List[Bool]):
self.length = len(values)
self.null_count = 0
self._validity = Bitmap(List(True) * len(values))
self._buffer = Bitmap(values)
self.mem_used = self._validity.mem_used + self._buffer.mem_used
fn __init__(inout self, length: Int):
self.length = length
self.null_count = 0
self._validity = Bitmap(List(True) * length)
self._buffer = Bitmap(length)
self.mem_used = self._validity.mem_used + self._buffer.mem_used
fn __init__(inout self, values: List[Optional[Bool]]):
self.length = len(values)
self.null_count = 0
var validity_list = List[Bool](capacity=len(values))
var value_list = List[Bool](capacity=len(values))
for i in range(len(values)):
if values[i] is None:
validity_list.append(False)
self.null_count += 1
else:
validity_list.append(True)
value_list.append(values[i])
self._validity = Bitmap(validity_list)
self._buffer = Bitmap(value_list)
self.mem_used = self._validity.mem_used + self._buffer.mem_used
fn __len__(self) -> Int:
return self.length
fn __getitem__(self, index: Int) raises -> Optional[Bool]:
if index < 0 or index >= self.length:
raise Error("index out of range for ArrowBoolVector")
if self._validity._unsafe_getitem(index):
return self._buffer._unsafe_getitem(index)
return None
| arrow.mojo/arrow/array/bool_array.mojo | false |
<filename>arrow.mojo/arrow/buffer/binary.mojo
from arrow.util import ALIGNMENT, get_num_bytes_with_padding
@value
struct BinaryBuffer:
alias _ptr_type = DTypePointer[DType.uint8]
var _buffer: Self._ptr_type
var length: Int
var mem_used: Int
fn __init__(inout self, length_unpadded: Int):
self.length = length_unpadded
self.mem_used = get_num_bytes_with_padding(length_unpadded)
self._buffer = Self._ptr_type.alloc(self.mem_used, alignment=ALIGNMENT)
memset_zero(self._buffer, self.mem_used)
fn __init__(inout self, values: List[UInt8]):
self = Self(len(values))
self._unsafe_set_sequence(0, values)
@always_inline
fn _unsafe_setitem(self, index: Int, value: UInt8):
self._buffer[index] = value
fn __setitem__(self, index: Int, value: UInt8) raises:
if index < 0 or index >= self.length:
raise Error("index out of range for BinaryBuffer")
self._unsafe_setitem(index, value)
@always_inline
fn _unsafe_getitem(self, index: Int) -> UInt8:
return self._buffer[index]
fn __getitem__(self, index: Int) raises -> UInt8:
if index < 0 or index >= self.length:
raise Error("index out of range for BinaryBuffer")
return self._unsafe_getitem(index)
fn _unsafe_set_sequence(self, start: Int, values: List[UInt8]):
for i in range(len(values)):
self._unsafe_setitem(start + i, values[i])
fn set_sequence(self, start: Int, values: List[UInt8]) raises:
if start < 0 or start + len(values) > self.length:
raise Error("index out of range for BinaryBuffer")
self._unsafe_set_sequence(start, values)
fn _unsafe_get_sequence(self, start: Int, length: Int) -> List[UInt8]:
var values = List[UInt8](capacity=length)
for i in range(length):
values.append(self._unsafe_getitem(start + i))
return values
fn get_sequence(self, start: Int, length: Int) raises -> List[UInt8]:
if start < 0 or start + length > self.length:
raise Error("index out of range for BinaryBuffer")
return self._unsafe_get_sequence(start, length)
fn __len__(self) -> Int:
return self.length
# Lifecycle methods
fn __moveinit__(inout self, owned existing: BinaryBuffer):
self._buffer = existing._buffer
self.length = existing.length
self.mem_used = existing.mem_used
fn __copyinit__(inout self, existing: BinaryBuffer):
self.length = existing.length
self.mem_used = existing.mem_used
self._buffer = Self._ptr_type.alloc(self.mem_used, alignment=ALIGNMENT)
for i in range(self.mem_used):
self._buffer[i] = existing._buffer[i]
fn __del__(owned self):
self._buffer.free()
| arrow.mojo/arrow/buffer/binary.mojo | false |
from memory.unsafe import Pointer
from memory import memset_zero
from arrow.util import PADDING, ALIGNMENT, get_num_bytes_with_padding
struct Bitmap(StringableRaising):
"""Bitmap according to the Apache Arrow specification which can found here.
Source: https://arrow.apache.org/docs/format/Columnar.html#validity-bitmaps
The source provides this pseudo code:
```
is_valid[j] -> bitmap[j / 8] & (1 << (j % 8))
```
And the following explanation:
> We use least-significant bit (LSB) numbering (also known as bit-endianness). This means that within a group of 8 bits, we read right-to-left:
```
values = [0, 1, null, 2, null, 3]
bitmap
j mod 8 7 6 5 4 3 2 1 0
0 0 1 0 1 0 1 1
```
"""
alias _ptr_type = DTypePointer[DType.uint8]
var _buffer: Self._ptr_type
var length: Int
var mem_used: Int
# TODO maybe buffers shouldn't have length and mem_used, just size.
# The layouts that use the buffers can keep track of their length.
fn __init__(inout self, length_unpadded: Int):
"""Creates a new Bitmap that supports at least `length_unpadded` elements.
Args:
length_unpadded: The number of elements the Bitmap should support.
Buffers are typically padded to 32, 64, or 128 bytes but it
depends on the architecture.
"""
var num_bytes = (length_unpadded + 7) // 8
var num_bytes_with_padding = get_num_bytes_with_padding(num_bytes)
self._buffer = Self._ptr_type.alloc(
num_bytes_with_padding, alignment=ALIGNMENT
)
memset_zero(self._buffer, num_bytes_with_padding)
self.length = length_unpadded
self.mem_used = num_bytes_with_padding
fn __init__(inout self, bools: List[Bool]):
self = Self(len(bools))
for i in range(len(bools)):
self._unsafe_setitem(i, bools[i])
fn _unsafe_setitem(self, index: Int, value: Bool):
"""Doesn't check if index is out of bounds.
Only works if memory is true, doesn't work if memory is 1 and value is False
"""
var byte_index = index // 8
var bitmask = UInt8(value.__int__()) << (index % 8)
var new_byte = self._buffer[
byte_index
] | bitmask # only works if memory is 0
self._buffer[byte_index] = new_byte
@always_inline
fn _unsafe_getitem(self, index: Int) -> Bool:
"""Doesn't check if index is out of bounds.
Follows this pseudo code from the Apache Arrow specification
`is_valid[j] -> bitmap[j / 8] & (1 << (j % 8))`
"""
var byte_index = index // 8
var bitmask: UInt8 = 1 << (index % 8)
return ((self._buffer[byte_index] & bitmask)).__bool__()
fn __getitem__(self, index: Int) raises -> Bool:
if index < 0 or index >= self.length:
raise Error("index out of range for Bitmap")
return self._unsafe_getitem(index)
fn __len__(self) -> Int:
return self.length
fn __del__(owned self):
self._buffer.free()
fn __moveinit__(inout self, owned existing: Bitmap):
self._buffer = existing._buffer
self.length = existing.length
self.mem_used = existing.mem_used
fn __copyinit__(inout self, existing: Bitmap):
self._buffer = Self._ptr_type.alloc(
existing.mem_used, alignment=ALIGNMENT
)
for i in range(existing.mem_used):
self._buffer[i] = existing._buffer[i]
self.length = existing.length
self.mem_used = existing.mem_used
fn __str__(self) raises -> String:
var output: String = "["
for i in range(self.length):
output = output + self[i].__str__()
if i < self.length - 1:
output = output + ", "
return output + "]"
fn to_list(self) raises -> List[Bool]:
var bools = List[Bool](capacity=self.length)
for i in range(self.length):
bools.append(self[i])
return bools
| arrow.mojo/arrow/buffer/bitmap.mojo | false |
<filename>arrow.mojo/arrow/buffer/dtype.mojo
from arrow.util import ALIGNMENT, get_num_bytes_with_padding
struct DTypeBuffer[type: DType]:
alias _ptr_type = DTypePointer[type]
alias element_type = Scalar[type]
alias element_byte_width = sizeof[Self.element_type]()
var _buffer: Self._ptr_type
var length: Int
var mem_used: Int
fn __init__(inout self, length: Int):
self.length = length
var num_bytes = self.length * Self.element_byte_width
self.mem_used = get_num_bytes_with_padding(num_bytes)
var alloc_count = self.mem_used // Self.element_byte_width
self._buffer = Self._ptr_type.alloc(alloc_count, alignment=ALIGNMENT)
memset_zero(self._buffer, alloc_count)
fn __init__(inout self, values: List[Int]):
self = Self(len(values))
for i in range(len(values)):
self._unsafe_setitem(i, values[i])
@always_inline
fn _unsafe_getitem(self, index: Int) -> Self.element_type:
return self._buffer[index]
fn __getitem__(self, index: Int) raises -> Self.element_type:
if index < 0 or index >= self.length:
raise Error("index out of range for DTypeBuffer")
return self._unsafe_getitem(index)
@always_inline
fn _unsafe_setitem(self, index: Int, value: Self.element_type):
self._buffer[index] = value
fn __setitem__(self, index: Int, value: Self.element_type) raises:
if index < 0 or index >= self.length:
raise Error("index out of range for DTypeBuffer")
self._unsafe_setitem(index, value)
fn __len__(self) -> Int:
return self.length
fn __moveinit__(inout self, owned existing: Self):
self._buffer = existing._buffer
self.length = existing.length
self.mem_used = existing.mem_used
fn __copyinit__(inout self, existing: Self):
self.length = existing.length
self.mem_used = existing.mem_used
self._buffer = Self._ptr_type.alloc(self.mem_used, alignment=ALIGNMENT)
for i in range(self.mem_used):
self._buffer[i] = existing._buffer[i]
fn __del__(owned self):
self._buffer.free()
| arrow.mojo/arrow/buffer/dtype.mojo | false |
<filename>arrow.mojo/arrow/buffer/offset.mojo
from arrow.util import ALIGNMENT, get_num_bytes_with_padding
from arrow.buffer.dtype import DTypeBuffer
alias OffsetBuffer32 = DTypeBuffer[DType.int32]
alias OffsetBuffer64 = DTypeBuffer[DType.int64]
| arrow.mojo/arrow/buffer/offset.mojo | false |
<filename>arrow.mojo/arrow/buffer/__init__.mojo
from arrow.buffer.binary import BinaryBuffer
from arrow.buffer.bitmap import Bitmap
from arrow.buffer.offset import OffsetBuffer32, OffsetBuffer64
from arrow.buffer.dtype import DTypeBuffer
| arrow.mojo/arrow/buffer/__init__.mojo | false |
<filename>arrow.mojo/arrow/c_data_interface/c_data_interface.mojo
alias ARROW_FLAG_DICTIONARY_ORDERED = 1
alias ARROW_FLAG_NULLABLE = 2
alias ARROW_FLAG_MAP_KEYS_SORTED = 4
# @value
# struct ArrowSchema:
# var format: String
# var name: String
# var metadata: String
# var flags: Int64
# var n_children: Int64
# var children: List[Pointer[Self]]
# var dictionary: Pointer[Self]
# var release: Pointer[fn (Pointer[Self]) -> None]
# var private_data: Pointer[UInt8]
# @value
# struct ArrowArray:
# var length: Int64
# var null_count: Int64
# var offset: Int64
# var n_buffers: Int64
# var n_children: Int64
# var buffers: List[Pointer[UInt8]]
# var children: List[Pointer[Self]]
# var dictionary: Pointer[Self]
# var release: Pointer[fn (Pointer[Self]) -> None]
# var private_data: Pointer[UInt8]
| arrow.mojo/arrow/c_data_interface/c_data_interface.mojo | false |
from memory.unsafe import Pointer
from memory import memset_zero
from arrow.util import ALIGNMENT, get_num_bytes_with_padding
from arrow.buffer.bitmap import Bitmap
from arrow.buffer.offset import OffsetBuffer64
struct ArrowFixedWidthVector[T: AnyTrivialRegType]:
# TODO: support null values
var length: Int
var null_count: Int
var validity: Bitmap
var value: Pointer[UInt8]
var view: Pointer[T]
var mem_use: Int
fn __init__(inout self, values: List[T]):
var byte_width = sizeof[T]()
var num_bytes = len(values) * byte_width
var num_bytes_with_padding = get_num_bytes_with_padding(num_bytes)
var ui8_ptr = Pointer[UInt8].alloc(
num_bytes_with_padding, alignment=ALIGNMENT
)
memset_zero(ui8_ptr, num_bytes_with_padding)
var ptr = ui8_ptr.bitcast[T]()
var validity_list = List[Bool](len(values))
for i in range(values.size):
validity_list.append(True)
var val = values[i]
ptr.store(i, val)
self.value = ui8_ptr
self.validity = Bitmap(validity_list)
self.null_count = 0
self.view = ptr
self.length = len(values)
self.mem_use = num_bytes_with_padding
fn __getitem__(self, index: Int) raises -> T:
if index < 0 or index >= self.length:
raise Error("index out of range for ArrowFixedWidthVector")
return self.view.load(index)
fn __len__(self) -> Int:
return self.length
fn __del__(owned self):
self.value.free()
struct ArrowIntVector:
"""
Temporary solution until we can create ArrowFixedWidthVector[Int]
Depends on https://github.com/modularml/mojo/issues/2956 to be fixed.
"""
var length: Int
var null_count: Int
var validity: Bitmap
var value_buffer: OffsetBuffer64
var mem_used: Int
fn __init__(inout self, values: List[Int]):
self.length = len(values)
self.value_buffer = OffsetBuffer64(values)
var validity_list = List[Bool](capacity=len(values))
for i in range(values.size):
validity_list.append(True)
var val = values[i]
self.value_buffer._unsafe_setitem(i, val)
self.validity = Bitmap(validity_list)
self.null_count = 0
self.mem_used = self.value_buffer.mem_used + self.validity.mem_used
fn __getitem__(self, index: Int) raises -> Int64:
return self.value_buffer[index]
fn __len__(self) -> Int:
return self.length
| arrow.mojo/arrow/physical_layout/arrow.mojo | false |
<filename>arrow.mojo/arrow/physical_layout/varbinary.mojo
from arrow.util import ALIGNMENT, get_num_bytes_with_padding
from arrow.arrow import Bitmap
from arrow.buffer import BinaryBuffer, OffsetBuffer32, OffsetBuffer64
struct ArrowStringVector:
var length: Int
var null_count: Int
var validity: Bitmap
var offsets: OffsetBuffer64
var value_buffer: BinaryBuffer
var mem_used: Int
fn __init__(inout self, values: List[String]):
var validity_list = List[Bool](capacity=len(values))
var offset_list = List[Int](capacity=len(values) + 1)
# Calculate the size of the buffer and allocate it
var buffer_size = 0
for i in range(len(values)):
buffer_size += values[i]._buffer.size
self.value_buffer = BinaryBuffer(buffer_size)
offset_list.append(0)
var offset_cursor = 0
for i in range(len(values)):
validity_list.append(True)
var bytes = values[i].as_bytes()
self.value_buffer._unsafe_set_sequence(offset_cursor, bytes)
offset_cursor += len(bytes)
offset_list.append(offset_cursor)
self.length = len(values)
self.null_count = 0
self.validity = Bitmap(validity_list)
self.offsets = OffsetBuffer64(offset_list)
self.mem_used = self.value_buffer.mem_used + self.offsets.mem_used
fn __getitem__(self, index: Int) raises -> String:
if index < 0 or index >= self.length:
raise Error("index out of range for ArrowStringVector")
var start = self.offsets[index]
var length = self.offsets[index + 1] - start
var bytes = self.value_buffer._unsafe_get_sequence(
rebind[Int](start), rebind[Int](length)
)
bytes.extend(
List(UInt8(0))
) # TODO: null terminate string without copying
return String(bytes)
fn __len__(self) -> Int:
return self.length
| arrow.mojo/arrow/physical_layout/varbinary.mojo | false |
<filename>arrow.mojo/arrow/physical_layout/varlist.mojo
from arrow.util import ALIGNMENT, get_num_bytes_with_padding
from arrow.arrow import Bitmap
from arrow.buffer import DTypeBuffer, OffsetBuffer32, OffsetBuffer64
struct VariableSizedList[type: DType]:
alias element_type = Scalar[type]
alias element_byte_width = sizeof[Self.element_type]()
var length: Int
var null_count: Int
var validity: Bitmap
var offsets: OffsetBuffer64
var value_buffer: DTypeBuffer[type]
var mem_used: Int
fn __init__(inout self, values: List[List[Self.element_type]]) raises:
self.length = len(values)
var validity_list = List[Bool](capacity=len(values))
var offset_list = List[Int](capacity=len(values) + 1)
# Calculate the size of the buffer and allocate it
var buffer_size = 0
for i in range(len(values)):
buffer_size += len(values[i])
self.value_buffer = DTypeBuffer[type](buffer_size)
offset_list.append(0)
var offset_cursor: Int = 0
for i in range(len(values)):
# TODO: support nulls
validity_list.append(True)
var data_list = values[i]
for value in data_list:
self.value_buffer[offset_cursor] = value[]
offset_cursor += 1
offset_list.append(offset_cursor)
self.null_count = 0
self.validity = Bitmap(validity_list)
self.offsets = OffsetBuffer64(offset_list)
self.mem_used = self.value_buffer.mem_used + self.offsets.mem_used
fn __getitem__(self, index: Int) raises -> List[Self.element_type]:
if index < 0 or index >= self.length:
# TODO: Sprintf the index into the error
raise Error("index out of range for ArrowVariableSizedList")
var ret = List[Self.element_type]()
var start: Int = int(self.offsets[index])
var length: Int = int(self.offsets[index + 1] - start)
for i in range(length):
ret.append(self.value_buffer[start + i])
return ret
fn __len__(self) -> Int:
return self.length
| arrow.mojo/arrow/physical_layout/varlist.mojo | false |
from testing import assert_equal
from arrow.array.bool_array import ArrowBooleanArray
def test_ArrowBooleanArray():
var bools = List[Optional[Bool]](True, None, False)
var arr = ArrowBooleanArray(bools)
for i in range(len(arr)):
if arr[i] is None:
print("None")
else:
print(arr[i].or_else(False))
assert_equal(arr.length, 3)
assert_equal(arr.null_count, 1)
assert_equal(arr.mem_used, 128)
| arrow.mojo/test/array/test_bool_array.mojo | false |
from testing import assert_true
from arrow.buffer.binary import BinaryBuffer
def list_equality(list1: List[UInt8], list2: List[UInt8]) -> Bool:
if list1.size != list2.size:
return False
for i in range(list1.size):
if list1[i] != list2[i]:
return False
return True
def test_BinaryBuffer():
var test_case = List(UInt8(0), UInt8(1), UInt8(2), UInt8(3))
var buffer = BinaryBuffer(test_case)
var list_from_buffer = buffer.get_sequence(0, len(test_case))
assert_true(list_equality(test_case, list_from_buffer))
assert_true(buffer.length == len(test_case))
assert_true(buffer.mem_used == 64)
def test_BinaryBuffer_2():
var test_case = List(UInt8(0), UInt8(1), UInt8(31985))
var buffer = BinaryBuffer(test_case)
var list_from_buffer = buffer.get_sequence(0, len(test_case))
assert_true(list_equality(test_case, list_from_buffer))
assert_true(buffer.length == len(test_case))
assert_true(buffer.mem_used == 64)
| arrow.mojo/test/buffer/test_binary.mojo | false |
from arrow import Bitmap
from testing import assert_equal
def check_if_works(bool_list: List[Bool]) -> Bitmap:
var bitmap = Bitmap(bool_list)
var list_from_bitmap = bitmap.to_list()
for i in range(bool_list.size):
assert_equal(bool_list[i], list_from_bitmap[i])
return bitmap
def test_Bitmap_0():
var test_case = List(False)
var bitmap = check_if_works(test_case)
assert_equal(bitmap.length, 1)
assert_equal(bitmap.mem_used, 64)
def test_Bitmap_1():
var test_case = List(True)
var bitmap = check_if_works(test_case)
assert_equal(bitmap.length, 1)
assert_equal(bitmap.mem_used, 64)
def test_Bitmap_2():
var test_case = List(False, False)
var bitmap = check_if_works(test_case)
assert_equal(bitmap.length, 2)
assert_equal(bitmap.mem_used, 64)
def test_Bitmap_3():
var test_case = List(False, True)
var bitmap = check_if_works(test_case)
assert_equal(bitmap.length, 2)
assert_equal(bitmap.mem_used, 64)
def test_Bitmap_4():
var test_case = List(True, False)
var bitmap = check_if_works(test_case)
assert_equal(bitmap.length, 2)
assert_equal(bitmap.mem_used, 64)
def test_Bitmap_5():
var test_case = List(False, True)
var bitmap = check_if_works(test_case)
assert_equal(bitmap.length, 2)
assert_equal(bitmap.mem_used, 64)
def main():
test_Bitmap_0()
test_Bitmap_1()
test_Bitmap_2()
test_Bitmap_3()
test_Bitmap_4()
test_Bitmap_5()
| arrow.mojo/test/buffer/test_bitmap.mojo | false |
<filename>arrow.mojo/test/physical_layout/test_arrow.mojo
from arrow import ArrowIntVector
from testing import assert_equal
def test_ArrowIntVector():
var ints = List[Int]()
ints.append(-11)
ints.append(2)
ints.append(4)
ints.append(7643)
ints.append(69)
var int_arrow_buf = ArrowIntVector(ints)
assert_equal(int_arrow_buf[0], -11)
assert_equal(int_arrow_buf[1], 2)
assert_equal(int_arrow_buf[2], 4)
assert_equal(int_arrow_buf[3], 7643)
assert_equal(int_arrow_buf[4], 69)
assert_equal(len(int_arrow_buf), 5)
assert_equal(int_arrow_buf.mem_used, 128)
assert_equal(int_arrow_buf.value_buffer.mem_used, 64)
def main():
test_ArrowIntVector()
| arrow.mojo/test/physical_layout/test_arrow.mojo | false |
from testing import assert_equal
from arrow.physical_layout.varbinary import ArrowStringVector
def test_string_vector():
var strings = List[String]()
strings.append("hello")
strings.append("world")
strings.append("this")
strings.append("is")
strings.append("a")
strings.append("test")
strings.append("of")
strings.append("strings")
var string_vec = ArrowStringVector(strings)
assert_equal(string_vec[0], "hello")
assert_equal(string_vec[1], "world")
assert_equal(string_vec[2], "this")
assert_equal(string_vec[3], "is")
assert_equal(string_vec[4], "a")
assert_equal(string_vec[5], "test")
assert_equal(string_vec[6], "of")
assert_equal(string_vec[7], "strings")
def main():
test_string_vector()
| arrow.mojo/test/physical_layout/test_varbinary.mojo | false |
from testing import assert_equal
from arrow.physical_layout.varlist import VariableSizedList
def test_var_list():
var list_of_lists = List[List[Int64]](
List[Int64](1, 2, 3),
List[Int64](4, 5),
List[Int64](),
List[Int64](7, 8),
)
var var_list = VariableSizedList(list_of_lists)
assert_equal(var_list[0][0], 1)
assert_equal(var_list[0][1], 2)
assert_equal(var_list[0][2], 3)
assert_equal(var_list[1][0], 4)
assert_equal(var_list[1][1], 5)
assert_equal(len(var_list[2]), 0)
assert_equal(var_list[3][0], 7)
assert_equal(var_list[3][1], 8)
def main():
test_var_list()
| arrow.mojo/test/physical_layout/test_varlist.mojo | false |
<filename>basalt/basalt/__init__.mojo
from .autograd import Graph, Symbol, OP
from .nn import Tensor, TensorShape
from basalt.utils.collection import Collection
alias dtype = DType.float32
alias nelts = 2 * simdwidthof[dtype]()
alias seed = 42
alias epsilon = 1e-12
| basalt/basalt/__init__.mojo | false |
from collections import Optional, OptionalReg
from basalt.nn.tensor import Tensor, TensorShape, MAX_RANK
from basalt.utils.bytes import Bytes, scalar_to_bytes, bytes_to_scalar
alias MAX_ATTRS = 10
alias MAX_NAME_CHARS = 16
alias MAX_DATA_BYTES = 32
@register_passable("trivial")
struct AttributeType(Stringable):
alias BOOL = AttributeType(0, "BOOL")
alias INT = AttributeType(1, "INT")
alias FLOAT = AttributeType(2, "FLOAT")
alias STRING = AttributeType(3, "STRING")
alias INTS = AttributeType(4, "INTS")
alias FLOATS = AttributeType(5, "FLOATS")
var id: UInt8
var name: Bytes[MAX_NAME_CHARS]
fn __init__(inout self, id: UInt8, name: String):
self.id = id
self.name = Bytes[MAX_NAME_CHARS](name)
fn __init__(inout self, type: DType):
if type.is_floating_point():
self = AttributeType.FLOAT
elif type.is_bool():
self = AttributeType.BOOL
else:
self = AttributeType.INT
fn __eq__(self, other: Self) -> Bool:
return self.id == other.id
fn __str__(self) -> String:
return str(self.name)
@register_passable("trivial")
struct AttributeVector(Sized, Stringable, CollectionElement):
var attributes: StaticTuple[Attribute, MAX_ATTRS]
var size: Int
@always_inline("nodebug")
fn __init__(inout self, *attributes: Attribute):
self.attributes = StaticTuple[Attribute, MAX_ATTRS]()
self.size = len(attributes)
for i in range(self.size):
self.attributes[i] = attributes[i]
@always_inline("nodebug")
fn __len__(self) -> Int:
return self.size
@always_inline("nodebug")
fn __getitem__(self, index: Int) -> Attribute:
return self.attributes[index]
@always_inline("nodebug")
fn __getitem__(self, index: StringLiteral) -> OptionalReg[Attribute]:
for i in range(self.size):
if self.attributes[i].name == Bytes[MAX_NAME_CHARS](index):
return self.attributes[i]
return None
@always_inline("nodebug")
fn __str__(self) -> String:
var s: String = "["
for i in range(self.size):
s += str(self.attributes[i])
if i < self.size - 1:
s += ", "
return s + "]"
@register_passable("trivial")
struct Attribute(Stringable, CollectionElement):
var data_shape: StaticIntTuple[MAX_RANK]
var name: Bytes[MAX_NAME_CHARS]
var data: Bytes[MAX_DATA_BYTES]
var type: AttributeType
var size: Int
@always_inline("nodebug")
fn __init__(inout self, name: String, value: String):
self.data_shape = StaticIntTuple[MAX_RANK]()
self.name = Bytes[MAX_NAME_CHARS](name)
self.data = Bytes[MAX_DATA_BYTES](value)
self.type = AttributeType.STRING
self.size = len(value)
@always_inline("nodebug")
fn __init__(inout self, name: String, value: TensorShape):
self.data_shape = StaticIntTuple[MAX_RANK]()
self.name = Bytes[MAX_NAME_CHARS](name)
self.data = Bytes[MAX_DATA_BYTES]()
self.type = AttributeType.INTS
self.size = value.rank()
for i in range(self.size):
self.data_shape[i] = value._shape[i]
@always_inline("nodebug")
fn __init__[N: Int](inout self, name: String, value: StaticIntTuple[N]):
constrained[N < MAX_RANK, "Attribute rank must be less than MAX_RANK."]()
self.data_shape = StaticIntTuple[MAX_RANK]()
self.name = Bytes[MAX_NAME_CHARS](name)
self.data = Bytes[MAX_DATA_BYTES]()
self.type = AttributeType.INTS
self.size = N
for i in range(self.size):
self.data_shape[i] = value[i]
@always_inline("nodebug")
fn __init__[dtype: DType](inout self, name: String, value: Scalar[dtype]):
constrained[dtype.is_numeric(), "Attribute value must be numeric."]()
self.data_shape = StaticIntTuple[MAX_RANK]()
self.name = Bytes[MAX_NAME_CHARS](name)
self.data = scalar_to_bytes[dtype, MAX_DATA_BYTES](value)
self.type = AttributeType(dtype)
self.size = 1
@always_inline("nodebug")
fn __init__(inout self, name: String, value: Int):
self.__init__(name, Int64(value))
self.data_shape[0] = 1
@always_inline("nodebug")
fn __init__(inout self, name: String, value: FloatLiteral):
self.__init__(name, Float64(value))
self.data_shape[0] = 1
@always_inline("nodebug")
fn __str__(self) -> String:
return "Attribute(" + str(self.name) + ", " + "..." + ")"
@always_inline("nodebug")
fn to_string(self) -> String:
return str(self.data)
@always_inline("nodebug")
fn to_shape(self) -> TensorShape:
return TensorShape(rank=self.size, shape=self.data_shape)
@always_inline("nodebug")
fn to_static[N: Int](self) -> StaticIntTuple[N]:
constrained[N < MAX_RANK, "Attribute rank must be less than MAX_RANK."]()
var result = StaticIntTuple[N]()
for i in range(N):
result[i] = int(self.data_shape[i])
return result
@always_inline("nodebug")
fn to_scalar[dtype: DType](self) -> Scalar[dtype]:
constrained[dtype.is_numeric(), "Attribute value must be numeric."]()
return bytes_to_scalar[dtype](self.data)
@always_inline("nodebug")
fn to_int(self) -> Int:
return int(self.to_scalar[DType.int64]())
fn json(self) -> String:
var result = '{"name": "' + str(self.name) + '", '
var type: String = ""
var value: String = ""
if self.type == AttributeType.STRING:
type = "STRING"
value = '"' + self.to_string() + '"'
elif self.type == AttributeType.INTS:
type = "INTS"
var value_temp = self.to_shape()
value = "["
for i in range(value_temp.rank()):
value += str(value_temp._shape[i])
if i < value_temp.rank() - 1:
value += ", "
value += "]"
elif self.type == AttributeType.FLOAT:
type = "FLOAT"
value = str(self.to_scalar[DType.float64]())
elif self.type == AttributeType.INT:
type = "INT"
value = str(self.to_int())
else:
type = "UNKNOWN"
value = "UNKNOWN"
result += '"type": "' + type + '", ' + '"value": ' + value
return result + "}"
| basalt/basalt/autograd/attributes.mojo | false |
<filename>basalt/basalt/autograd/graph.mojo
from python.python import Python
from collections.optional import Optional, OptionalReg
from .node import Node
from .attributes import AttributeVector, Attribute
from .symbol import Symbol
from .ops import OP, static_result_shape, dynamic_result_shape
from .params import ParamDict, Param
from basalt import seed, dtype
from basalt import Tensor, TensorShape
struct Graph:
var inputs: List[Symbol]
var params: ParamDict
var nodes: List[Node]
var outputs: List[Symbol]
var loss_out: OptionalReg[Symbol]
var symbol_count: UInt32
fn __init__(inout self):
self.inputs = List[Symbol]()
self.params = ParamDict()
self.nodes = List[Node]()
self.outputs = List[Symbol]()
self.loss_out = None
self.symbol_count = 0
fn __moveinit__(inout self, owned other: Graph):
self.inputs = other.inputs^
self.params = other.params^
self.nodes = other.nodes^
self.outputs = other.outputs^
self.loss_out = other.loss_out
self.symbol_count = other.symbol_count
fn create_symbol(inout self, shape: TensorShape, data: Optional[Param] = None, trainable: Bool = False, is_input: Bool = False) -> Symbol:
var symbol = Symbol(self.symbol_count, dtype, shape, trainable)
self.symbol_count += 1
if data is not None:
self.params.put(symbol, data.take())
else:
self.params.put(symbol)
if is_input:
self.inputs.append(symbol)
return symbol
fn input(inout self, shape: TensorShape, trainable: Bool = False) -> Symbol:
return self.create_symbol(shape, trainable=trainable, is_input=True)
fn param(inout self, shape: TensorShape, init: Param, trainable: Bool = True) -> Symbol:
return self.create_symbol(shape, init, trainable)
fn param(inout self, shape: TensorShape, trainable: Bool = True) -> Symbol:
return self.create_symbol(shape, trainable=trainable)
fn scalar(inout self, value: Scalar[dtype]) -> Symbol:
return self.create_symbol(TensorShape(1), Param(value), trainable=False)
fn constant(inout self, shape: TensorShape, data: List[Scalar[dtype]]) -> Symbol:
return self.create_symbol(shape, Param(data), trainable=False)
fn out(inout self, symbol: Symbol):
self.outputs.append(symbol)
fn loss(inout self, symbol: Symbol):
self.loss_out = symbol
fn op(
inout self,
op: OP,
*operands: Symbol,
attributes: AttributeVector = AttributeVector(),
) -> Symbol:
var res_shape = static_result_shape(op, operands, attributes)
var res = Symbol(self.symbol_count, dtype, res_shape, self.result_trainable(operands))
self.symbol_count += 1
var inputs = List[Symbol]()
inputs.reserve(len(operands))
for operand in operands:
inputs.append(operand)
self.nodes.append(Node(op, inputs, List[Symbol](res), attributes))
return res
fn op(
inout self,
op: OP,
operand_1: Symbol,
operand_2: Float64,
attributes: AttributeVector = AttributeVector(),
) -> Symbol:
return self.op(op, operand_1, self.scalar(operand_2), attributes=attributes)
fn op(
inout self,
op: OP,
operand_1: Float64,
operand_2: Symbol,
attributes: AttributeVector = AttributeVector(),
) -> Symbol:
return self.op(op, self.scalar(operand_1), operand_2, attributes=attributes)
fn create_symbols(inout self, shapes: List[TensorShape], trainable: Bool = False) -> List[Symbol]:
var symbols = List[Symbol]()
symbols.reserve(len(shapes))
for shape in shapes:
symbols.append(Symbol(self.symbol_count, dtype, shape[], trainable))
self.symbol_count += 1
return symbols
fn add_node(inout self, op: OP, inputs: List[Symbol], outputs: List[Symbol], attributes: AttributeVector):
self.nodes.append(Node(op, inputs, outputs, attributes))
fn concat(inout self, *operands: Symbol, dim: Int = 0) -> Symbol:
var attributes = AttributeVector(Attribute("dim", dim))
var res_shape = dynamic_result_shape(OP.CONCAT, operands, attributes)[0]
var res_symbols = self.create_symbols(List[TensorShape](res_shape), self.result_trainable(operands))
var operand_list = List[Symbol]()
operand_list.reserve(len(operands))
for operand in operands:
operand_list.append(operand)
self.add_node(OP.CONCAT, operand_list, res_symbols, attributes)
return res_symbols[0]
fn split(
inout self, operand: Symbol, sections: List[Int], dim: Int = 0
) -> List[Symbol]:
var attributes = AttributeVector(Attribute("sections", TensorShape(sections)), Attribute("dim", dim))
var res_shapes = dynamic_result_shape(OP.SPLIT, operand, attributes)
var trainable = self.result_trainable(operand)
var result_symbols = self.create_symbols(res_shapes, trainable)
self.add_node(OP.SPLIT, List[Symbol](operand), result_symbols, attributes)
return result_symbols
@staticmethod
fn result_trainable(operands: VariadicList[Symbol]) -> Bool:
for operand in operands:
if operand.trainable:
return True
return False
fn json(self) -> String:
var result: String = '{"graph_name": "basalt", "nodes": ['
for i in range(len(self.nodes)):
result += self.nodes[i].json()
if i < len(self.nodes) - 1:
result += ", "
result += '], "inputs": ['
for i in range(len(self.inputs)):
result += self.inputs[i].json()
if i < len(self.inputs) - 1:
result += ", "
result += '], "outputs": ['
for i in range(len(self.outputs)):
result += self.outputs[i].json()
if i < len(self.outputs) - 1:
result += ", "
if self.loss_out:
result += '], "loss": ['
result += self.loss_out.value().json()
result += '], "params": ['
for i in range(len(self.params)):
result += self.params.symbols[i].json()
if i < len(self.params) - 1:
result += ", "
result += "]}"
return result
fn render(self, render_type: String = "node") raises:
Python.add_to_path("./basalt/utils")
var renderer = Python.import_module("graph_render")
var json = Python.import_module("json")
_ = renderer.netron_render(json.loads(self.json()), render_type)
fn compile(inout self):
# 0. Sorting the graph
# The staticlly defined graph has an implicit topological sorted order because,
# each new operation is added the list of nodes after its dependencies have been calculated.
# This eliminates the need for explicit topological sorting.
# Possibilities:
# - 1. Graph layout transformation (graph rewrite)
# - Layer pruning (removing nodes that have no effect - with common sub-tree identification)
# - Eliminate redundant intermediate data copies
# - Operator replacement (e.g. replacing (combination of) costly ops with more efficient ones)
# - (exmple of graph rewrite: https://dl.acm.org/doi/pdf/10.1145/3453483.3454083 - Table 4)
# - Other intra-block optimizations: (e.g. data layout transformation BCHW -> BHWC, etc.)
# - 2. Operator fusion (combining ops without materializing intermediate results)
# - Fusion plan exploration
# - Fusion plan generation (with subsequent intra-block optimizations)
# - (example fusion plan algorithm: https://dl.acm.org/doi/pdf/10.1145/3453483.3454083 - Listing 1)
# - 3. Fusion Code generation (behaviour)
# - Code generation for planned fusion blocks
# - Other inter-block optimizations (e.g. data layout transformation BCHW -> BHWC, etc.)
# - 4. Auto-tuning (of vectorization-, parallelization-, tiling-, unrolling-parameters)
# - (Might only work when memory is initialized)
# Other considerations:
# - Efficient Memory management:
# - Memory reuse (in-place operations)
# - Data layout from BCHW (batch, channel, height, width) to BHWC can lead to better utilization and efficiency
# - VJP, JVP (for automatic differentiation)
pass
| basalt/basalt/autograd/graph.mojo | false |
from collections.optional import Optional
from utils.variant import Variant
from basalt.autograd import Symbol
from basalt.autograd.ops import OP
from .attributes import AttributeVector
@value
struct Node(CollectionElement, Stringable):
var operator: OP
var inputs: List[Symbol]
var outputs: List[Symbol]
var attributes: AttributeVector
fn __init__(
inout self,
operator: OP,
inputs: List[Symbol],
outputs: List[Symbol],
attributes: AttributeVector = AttributeVector(),
):
self.operator = operator
self.inputs = inputs
self.outputs = outputs
self.attributes = attributes
fn __str__(self) -> String:
return self.json()
fn json(self) -> String:
var s: String = '{"operator": "' + str(self.operator.name) + '", "inputs": ['
for i in range(len(self.inputs)):
s += self.inputs[i].json()
if i < len(self.inputs) - 1:
s += ", "
s += '], "outputs": ['
for i in range(len(self.outputs)):
s += self.outputs[i].json()
if i < len(self.outputs) - 1:
s += ", "
s += '], "attributes": ['
for i in range(len(self.attributes)):
s += self.attributes[i].json()
if i < len(self.attributes) - 1:
s += ", "
s += "]}"
return s
| basalt/basalt/autograd/node.mojo | false |
<filename>basalt/basalt/autograd/params.mojo
from collections.optional import Optional
from basalt import dtype
from basalt import Tensor, TensorShape
from .symbol import Symbol
from .attributes import Attribute
@value
struct Param(CollectionElement, Stringable):
var data: Optional[List[Scalar[dtype]]]
var initializer: Optional[Attribute]
fn __init__(inout self):
self.data = None
self.initializer = None
fn __init__(inout self, data: List[Scalar[dtype]]):
self.data = data
self.initializer = None
fn __init__(inout self, data: Scalar[dtype]):
self.data = List[Scalar[dtype]](data)
self.initializer = None
fn __init__(inout self, initializer: String, *args: Scalar[dtype]):
# Supported initializers:
# "random_uniform", lower_bound, upper_bound
# "random_normal", mean, std
# #TODO: "kaiming_uniform", mode, nonlinearity
# #TODO: "kaiming_normal", mode, nonlinearity
self.initializer = Attribute("initializer", initializer)
var data = List[Scalar[dtype]]()
for arg in args:
data.append(arg)
self.data = data
fn __getitem__(self, i: Int) -> Optional[Scalar[dtype]]:
if self.data:
return self.data.value()[][i]
else:
return None
fn __str__(self) -> String:
var s: String = ""
if self.data:
var data = self.data.value()
s += "["
for i in range(len(data[])):
s += str(data[][i])
if i < len(data[]) - 1:
s += ", "
s += "]"
return s
@value
struct ParamDict(Sized):
var symbols: List[Symbol]
var values: List[Param]
fn __init__(inout self):
self.symbols = List[Symbol]()
self.values = List[Param]()
fn put(inout self, param_id: Symbol, value: Param = Param()):
self.symbols.append(param_id)
self.values.append(value)
fn get_tensor(self, idx: Int) -> Tensor[dtype]:
# May only be called at runtime
var num = self.symbols[idx].shape.num_elements()
var t = DTypePointer[dtype].alloc(num)
for i in range(num):
t[i] = self.values[idx][i].value()[]
return Tensor[dtype](t, self.symbols[idx].shape)
fn __len__(self) -> Int:
return len(self.symbols)
| basalt/basalt/autograd/params.mojo | false |
from basalt import Tensor, TensorShape
@value
@register_passable("trivial")
struct Symbol(CollectionElement, Stringable, EqualityComparable):
var name: UInt32
var dtype: DType
var shape: TensorShape
var trainable: Bool
fn __eq__(self, other: Self) -> Bool:
return self.name == other.name
fn __ne__(self, other: Self) -> Bool:
return self.name != other.name
fn __str__(self) -> String:
return self.json()
fn json(self) -> String:
return (
'{"name": "'
+ str(self.name)
+ '", "dtype": "'
+ str(self.dtype)
+ '", "shape": "'
+ str(self.shape)
+ '", "trainable": "'
+ str(self.trainable)
+ '"}'
)
| basalt/basalt/autograd/symbol.mojo | false |
<filename>basalt/basalt/autograd/__init__.mojo
from .symbol import Symbol
from .graph import Graph
from .ops import OP
| basalt/basalt/autograd/__init__.mojo | false |
<filename>basalt/basalt/autograd/ops/basics.mojo
from math import add, sub, mul, div, log, exp
from algorithm import vectorize
from memory import memcpy
from basalt import Tensor, TensorShape
from basalt.nn.tensor import MAX_RANK
from basalt.utils.tensorutils import *
from basalt.autograd.attributes import Attribute, AttributeVector
from basalt.autograd.ops.matmul import dot, dot_transpose_t1, dot_transpose_t2
"""
Implement forward and backward operations for basic tensor manipulations.
"""
@value
struct ADD:
@staticmethod
fn result_shape(t1_shape: TensorShape, t2_shape: TensorShape) -> TensorShape:
return broadcast_shapes(t1_shape, t2_shape)
@staticmethod
fn forward[
t1_shape: TensorShape,
t2_shape: TensorShape,
](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]):
"""
Forward pass of the add operation.
"""
elwise_op[t1_shape, t2_shape, add](res, t1, t2)
@staticmethod
fn backward[
tensor_id: Int,
ug_shape: TensorShape,
t1_shape: TensorShape,
t2_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of element wise addition."""
# d(x + y) / dx = d(x + y) / dy = 1
return ug
@value
struct SUB:
@staticmethod
fn result_shape(t1_shape: TensorShape, t2_shape: TensorShape) -> TensorShape:
return broadcast_shapes(t1_shape, t2_shape)
@staticmethod
fn forward[
t1_shape: TensorShape,
t2_shape: TensorShape,
](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]):
"""
Forward pass of the subtraction operation.
"""
elwise_op[t1_shape, t2_shape, sub](res, t1, t2)
@staticmethod
fn backward[
tensor_id: Int,
ug_shape: TensorShape,
t1_shape: TensorShape,
t2_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of element wise subtraction."""
# d(x - y) / dx = 1
# d(x - y) / dy = -1
@parameter
if tensor_id == 0:
return ug
else:
var res_grad = Tensor[dtype](ug_shape)
elwise_op[mul](res_grad, ug, -1.0)
return res_grad ^
@value
struct MUL:
@staticmethod
fn result_shape(t1_shape: TensorShape, t2_shape: TensorShape) -> TensorShape:
return broadcast_shapes(t1_shape, t2_shape)
@staticmethod
fn forward[
t1_shape: TensorShape,
t2_shape: TensorShape,
](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]):
"""
Forward pass of the multiplication operation.
"""
elwise_op[t1_shape, t2_shape, mul](res, t1, t2)
@staticmethod
fn backward[
tensor_id: Int,
ug_shape: TensorShape,
t1_shape: TensorShape,
t2_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of element wise multiplication."""
# d(x * y) / dx = y
# d(x * y) / dy = x
@parameter
if tensor_id == 0:
var res_grad = Tensor[dtype](ug_shape)
elwise_op[ug_shape, t2_shape, mul](res_grad, ug, t2)
return res_grad ^
else:
var res_grad = Tensor[dtype](ug_shape)
elwise_op[ug_shape, t1_shape, mul](res_grad, ug, t1)
return res_grad ^
@value
struct DIV:
@staticmethod
fn result_shape(t1_shape: TensorShape, t2_shape: TensorShape) -> TensorShape:
return broadcast_shapes(t1_shape, t2_shape)
@staticmethod
fn forward[
t1_shape: TensorShape, t2_shape: TensorShape
](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]):
"""
Forward operation of element wise division.
"""
elwise_op[t1_shape, t2_shape, div](res, t1, t2)
@staticmethod
fn backward[
tensor_id: Int,
ug_shape: TensorShape,
t1_shape: TensorShape,
t2_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of element wise division."""
# d(x/y) / dx = 1/y
# d(x/y) / dy = -x/y^2
@parameter
if tensor_id == 0:
var res_grad = Tensor[dtype](ug_shape)
elwise_op[ug_shape, t2_shape, div](res_grad, ug, t2)
return res_grad ^
else:
alias broadcast = (t1_shape != t2_shape)
alias is_scalar = (t2_shape == TensorShape(1))
var res_grad = Tensor[dtype](ug_shape)
@parameter
if is_scalar:
var factor: Scalar[dtype] = -1.0 / (t2[0] ** 2)
@parameter
fn vec_div_bw_scalar[nelts: Int](i: Int):
res_grad.store[nelts](
i, factor * t1.load[nelts](i) * ug.load[nelts](i)
)
vectorize[vec_div_bw_scalar, nelts](ug_shape.num_elements())
elif broadcast and not is_scalar:
alias size = ug_shape.rank()
alias strides1 = broadcast_calculate_strides[size, t1_shape, ug_shape]()
alias strides2 = broadcast_calculate_strides[size, t2_shape, ug_shape]()
@parameter
fn vec_div_bw_broadcast[netls: Int](i: Int):
var index1 = get_real_index[size, strides1, ug_shape](i)
var index2 = get_real_index[size, strides2, ug_shape](i)
res_grad.store[nelts](
i,
-t1.load[nelts](index1)
/ (t2.load[nelts](index2) ** 2)
* ug.load[nelts](i),
)
vectorize[vec_div_bw_broadcast, 1](ug_shape.num_elements())
else:
@parameter
fn vec_div_bw[nelts: Int](i: Int):
res_grad.store[nelts](
i,
-t1.load[nelts](i)
/ (t2.load[nelts](i) ** 2)
* ug.load[nelts](i),
)
vectorize[vec_div_bw, nelts](ug_shape.num_elements())
return res_grad ^
@value
struct DOT:
@staticmethod
fn result_shape(t1_shape: TensorShape, t2_shape: TensorShape) -> TensorShape:
return TensorShape(t1_shape[0], t2_shape[1])
@staticmethod
fn forward[
t1_shape: TensorShape,
t2_shape: TensorShape,
](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]):
"""
Forward pass of the dot operation.
"""
dot[t1_shape, t2_shape](res, t1, t2)
@staticmethod
fn backward[
tensor_id: Int,
ug_shape: TensorShape,
t1_shape: TensorShape,
t2_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of dot product."""
@parameter
if tensor_id == 0:
# dot(ug, t2.T)
var res_grad = Tensor[dtype](t1_shape)
dot_transpose_t2[ug_shape, t2_shape](res_grad, ug, t2)
return res_grad ^
else:
# dot(t1.T, ug)
var res_grad = Tensor[dtype](t2_shape)
dot_transpose_t1[t1_shape, ug_shape](res_grad, t1, ug)
return res_grad ^
@value
struct EXP:
@staticmethod
fn result_shape(t1_shape: TensorShape) -> TensorShape:
return t1_shape
@staticmethod
fn forward[
t1_shape: TensorShape,
](inout res: Tensor[dtype], t1: Tensor[dtype]):
"""Forward operation of exp."""
elwise_transform[exp](res, t1)
@staticmethod
fn backward[
ug_shape: TensorShape,
t1_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of exp."""
# d(exp(x)) / dx = exp(x)
var res_grad = Tensor[dtype](ug_shape)
@parameter
fn vec_exp_bw[nelts: Int](i: Int):
res_grad.store[nelts](i, exp(t1.load[nelts](i)) * ug.load[nelts](i))
vectorize[vec_exp_bw, nelts](ug_shape.num_elements())
return res_grad ^
@value
struct LOG:
@staticmethod
fn result_shape(t1_shape: TensorShape) -> TensorShape:
return t1_shape
@staticmethod
fn forward[
t1_shape: TensorShape,
](inout res: Tensor[dtype], t1: Tensor[dtype]):
"""Forward operation of exp."""
elwise_transform[log](res, t1)
@staticmethod
fn backward[
ug_shape: TensorShape,
t1_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of log."""
# d(log(x)) / dx = 1 / x
var res_grad = Tensor[dtype](ug_shape)
elwise_op[ug_shape, t1_shape, div](res_grad, ug, t1)
return res_grad ^
struct POW:
@staticmethod
fn result_shape(t1_shape: TensorShape, t2_shape: TensorShape) -> TensorShape:
# t2_shape == TensorShape(1)
return t1_shape
@staticmethod
fn forward[
t1_shape: TensorShape,
t2_shape: TensorShape,
](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]):
"""Forward operation of element wise pow."""
# t2_shape is a graph scalar
elwise_pow(res, t1, int(t2[0]))
@staticmethod
fn backward[
tensor_id: Int,
ug_shape: TensorShape,
t1_shape: TensorShape,
t2_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of element wise pow."""
# d(x^y) / dx = y * x^(y-1)
# d(x^y) / dy = sum( x^y * log(x) )
var res_grad: Tensor[dtype]
var a = int(t2[0])
@parameter
if tensor_id == 0:
res_grad = Tensor[dtype](t1_shape)
@parameter
fn vec_pow_bw_x[nelts: Int](i: Int):
res_grad.store[nelts](i, a * ((t1.load[nelts](i) + epsilon) ** (a - 1)) * ug.load[nelts](i))
vectorize[vec_pow_bw_x, nelts](t1_shape.num_elements())
else:
res_grad = Tensor[dtype](t2_shape) # t2_shape == TensorShape(1)
@parameter
fn vec_pow_bw_y[nelts: Int](i: Int):
res_grad[0] += (
(t1.load[nelts](i) ** a)
* log(t1.load[nelts](i))
* ug.load[nelts](i)
).reduce_add()
vectorize[vec_pow_bw_y, nelts](ug_shape.num_elements())
return res_grad ^
struct SUM:
@staticmethod
fn result_shape(t_shape: TensorShape, attributes: AttributeVector) -> TensorShape:
var axis = attributes["axis"]
if axis:
return get_reduce_shape(t_shape, axis.value().to_int())
else:
return TensorShape(1)
@staticmethod
fn forward[
t_shape: TensorShape, attributes: AttributeVector
](inout res: Tensor[dtype], t: Tensor[dtype]):
"""
Forward pass of the sum operation.
"""
alias axis = attributes["axis"]
@parameter
if axis:
tsum(res, t, axis.value().to_int())
else:
res[0] = tsum(t)
@staticmethod
fn backward[
ug_shape: TensorShape, t_shape: TensorShape, attributes: AttributeVector
](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of sum."""
return Self.backward[ug_shape, t_shape](ug, t)
@staticmethod
fn backward[
ug_shape: TensorShape, t_shape: TensorShape
](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of sum."""
var res_grad = Tensor[dtype](t_shape)
fill(res_grad, 1.0)
elwise_op[t_shape, ug_shape, mul](res_grad, res_grad, ug)
return res_grad ^
@value
struct MEAN:
@staticmethod
fn result_shape(t_shape: TensorShape, attributes: AttributeVector) -> TensorShape:
var axis = attributes["axis"]
if axis:
return get_reduce_shape(t_shape, axis.value().to_int())
else:
return TensorShape(1)
@staticmethod
fn forward[
t_shape: TensorShape, attributes: AttributeVector
](inout res: Tensor[dtype], t: Tensor[dtype]):
"""
Forward pass of the mean operation.
"""
alias axis = attributes["axis"]
@parameter
if axis:
tmean(res, t, axis.value().to_int())
else:
res[0] = tmean(t)
@staticmethod
fn backward[
ug_shape: TensorShape, t_shape: TensorShape, attributes: AttributeVector
](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of mean."""
alias axis = attributes["axis"]
@parameter
if axis:
return Self.backward[ug_shape, t_shape](ug, t, axis.value().to_int())
else:
return Self.backward[ug_shape, t_shape](ug, t)
@staticmethod
fn backward[
ug_shape: TensorShape, t_shape: TensorShape
](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of mean."""
# d(mean(t)) / dt = 1 / t.num_elements()
var res_grad = Tensor[dtype](t_shape)
var grad: Scalar[dtype] = 1.0 / t_shape.num_elements()
grad = (
grad * ug[0]
) # because ug is a tensor of size 1 when mean is used without an axis
@parameter
fn v_mean_d[nelts: Int](i: Int):
res_grad.store[nelts](i, grad)
vectorize[v_mean_d, nelts](t_shape.num_elements())
return res_grad ^
@staticmethod
fn backward[
ug_shape: TensorShape, t_shape: TensorShape
](ug: Tensor[dtype], t: Tensor[dtype], axis: Int) -> Tensor[dtype]:
"""Backward operation of mean."""
# d(mean(t)) / dt = 1 / t.dim(axis)
var res_grad = Tensor[dtype](t_shape)
var grad: Scalar[dtype] = 1.0 / t_shape[axis]
fill(res_grad, grad)
elwise_op[t_shape, ug_shape, mul](res_grad, res_grad, ug)
return res_grad ^
struct MAX:
@staticmethod
fn result_shape(t_shape: TensorShape, attributes: AttributeVector) -> TensorShape:
var axis = attributes["axis"]
if axis:
return get_reduce_shape(t_shape, axis.value().to_int())
else:
return TensorShape(1)
@staticmethod
fn forward[
t_shape: TensorShape, attributes: AttributeVector
](inout res: Tensor[dtype], t: Tensor[dtype]):
"""
Forward pass of the max operation.
"""
alias axis = attributes["axis"]
@parameter
if axis:
tmax(res, t, axis.value().to_int())
else:
res[0] = tmax(t)
@staticmethod
fn backward[
ug_shape: TensorShape, t_shape: TensorShape, attributes: AttributeVector
](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of max."""
alias axis = attributes["axis"]
@parameter
if axis:
return Self.backward[ug_shape, t_shape](ug, t, axis.value().to_int())
else:
return Self.backward[ug_shape, t_shape](ug, t)
@staticmethod
fn backward[
ug_shape: TensorShape, t_shape: TensorShape
](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of max."""
# This could be changed to something like in tinygrad:
# max_1s = CMPEQ(original_tensor, expanded(max_tensor), axis=axis)
# sum_max_1s = SUM(max_1s)
# div_sum_max_1s = DIV(max_1, sum_max_1s)
# The selected element gradient is 1.0, the others are 0.0. And if there are
# multiple max values, the gradient is divided by the number of max
# values (1/n) for each max value.
var res_grad = Tensor[dtype](t_shape)
# ug_shape size is 1
var max_res = tmax(t)
var sum_eq: Scalar[dtype] = 0
for i in range(t.num_elements()):
if t[i] == max_res:
sum_eq += 1
var factor = 1 / sum_eq
for i in range(res_grad.num_elements()):
if t[i] == max_res:
res_grad[i] = factor * ug[0]
return res_grad ^
@staticmethod
fn backward[
ug_shape: TensorShape, t_shape: TensorShape
](ug: Tensor[dtype], t: Tensor[dtype], axis: Int) -> Tensor[dtype]:
"""Backward operation of max."""
# The selected element gradient is 1.0, the others are 0.0. And if there are
# multiple max values, the gradient is divided by the number of max
# values (1/n) for each max value.
var res_grad = Tensor[dtype](t_shape)
var max_res = Tensor[dtype](ug_shape)
alias strides = t_shape.strides()
tmax(
max_res, t, axis
) # To not calculate this again we could receive the result of the forward pass as a parameter
for i in range(max_res.num_elements()):
var index_base = (i % strides[axis]) + (i // strides[axis]) * (
strides[axis] * t.dim(axis)
)
var count_1s: Scalar[dtype] = 0
# Count the number of values equal to max_res
for j in range(t.dim(axis)):
var index = index_base + j * strides[axis]
if t[index] == max_res[i]:
count_1s += 1
# Divide 1.0 by the number of max values (n) and multiply by upper gradient value
var factor = 1 / count_1s
for j in range(t.dim(axis)):
var index = index_base + j * strides[axis]
if t[index] == max_res[i]:
res_grad[index] = factor * ug[i]
return res_grad ^
struct TRANSPOSE:
@staticmethod
fn result_shape(t_shape: TensorShape, attributes: AttributeVector) -> TensorShape:
var axes = attributes["axes"] # axes to be permuted
var rank = t_shape.rank()
var shape = StaticIntTuple[MAX_RANK]()
if axes:
# NOTE: axis has to be the size of rank of the tensor
var axes_shape = axes.value().to_shape()
for i in range(rank):
shape[i] = t_shape[axes_shape[i]]
else:
for i in range(rank):
shape[i] = t_shape[rank - i - 1]
return TensorShape(rank=rank, shape=shape)
@staticmethod
fn forward[
t_shape: TensorShape, attributes: AttributeVector
](inout res: Tensor[dtype], t: Tensor[dtype]):
"""
Forward pass of the transpose operation.
"""
alias axes = attributes["axes"]
@parameter
if axes:
var axes_shape = axes.value().to_shape()
transpose(res, t, axes_shape)
else:
fn create_transpose_axes() -> TensorShape:
var rank = t_shape.rank()
var axes = StaticIntTuple[MAX_RANK]()
for i in range(rank):
axes[i] = rank - i - 1
return TensorShape(rank=rank, shape=axes)
alias axes_shape = create_transpose_axes()
transpose(res, t, axes_shape)
@staticmethod
fn backward[
ug_shape: TensorShape, t_shape: TensorShape, attributes: AttributeVector
](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of transpose."""
# No local gradient. Transpose is its own inverse.
alias axes = attributes["axes"]
var res_grad = Tensor[dtype](t_shape)
@parameter
if axes:
fn create_inverse_axes() -> TensorShape:
var axes_shape = axes.value().to_shape()
var rank = axes_shape.rank()
var axes_shape_inv = StaticIntTuple[MAX_RANK]()
for i in range(rank):
axes_shape_inv[axes_shape[i]] = i
return TensorShape(rank=rank, shape=axes_shape_inv)
alias axes_shape_inv = create_inverse_axes()
transpose(res_grad, ug, axes_shape_inv)
else:
fn create_transpose_axes() -> TensorShape:
var rank = t_shape.rank()
var axes = StaticIntTuple[MAX_RANK]()
for i in range(rank):
axes[i] = rank - i - 1
return TensorShape(axes)
alias axes_shape_inv = create_transpose_axes()
transpose(res_grad, ug, axes_shape_inv)
return res_grad ^
struct FLATTEN:
@staticmethod
fn result_shape(t_shape: TensorShape) -> TensorShape:
return TensorShape(t_shape.num_elements())
@staticmethod
fn forward[t_shape: TensorShape](inout res: Tensor[dtype], t: Tensor[dtype]):
"""
Forward pass of the flatten operation.
"""
memcpy(res.data(), t.data(), t_shape.num_elements())
@staticmethod
fn backward[
ug_shape: TensorShape, t_shape: TensorShape
](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of flatten."""
var res_grad = Tensor[dtype](t_shape)
memcpy(res_grad.data(), ug.data(), ug_shape.num_elements())
return res_grad ^
struct RESHAPE:
@staticmethod
fn result_shape(t_shape: TensorShape, attributes: AttributeVector) -> TensorShape:
var new_shape = attributes["shape"]
return new_shape.value().to_shape()
@staticmethod
fn forward[t_shape: TensorShape](inout res: Tensor[dtype], t: Tensor[dtype]):
"""
Forward pass of the reshape operation.
"""
memcpy(res.data(), t.data(), t_shape.num_elements())
@staticmethod
fn backward[
ug_shape: TensorShape, t_shape: TensorShape
](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of reshape."""
var res_grad = Tensor[dtype](t_shape)
memcpy(res_grad.data(), ug.data(), ug_shape.num_elements())
return res_grad ^
struct FMA:
@staticmethod
fn result_shape(
t1_shape: TensorShape, t2_shape: TensorShape, t3_shape: TensorShape
) -> TensorShape:
# FMA assumes: t1_shape == t2_shape == t3_shape
# TODO: Error handling, constraints in API
return t1_shape
@staticmethod
fn forward[
t1_shape: TensorShape,
t2_shape: TensorShape,
t3_shape: TensorShape,
](
inout res: Tensor[dtype],
t1: Tensor[dtype],
t2: Tensor[dtype],
t3: Tensor[dtype],
):
"""
Forward pass of the fma operation.
"""
@parameter
fn vec_fma[nelts: Int](i: Int):
res.store[nelts](
i, t1.load[nelts](i).fma(t2.load[nelts](i), t3.load[nelts](i))
)
vectorize[vec_fma, nelts, size = t1_shape.num_elements()]()
@staticmethod
fn backward[
tensor_id: Int,
ug_shape: TensorShape,
t1_shape: TensorShape,
t2_shape: TensorShape,
t3_shape: TensorShape,
](
ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype], t3: Tensor[dtype]
) -> Tensor[dtype]:
"""Backward operation of fma."""
# d(x * y + z) / dx = y
# d(x * y + z) / dy = x
# d(x * y + z) / dz = 1
@parameter
if tensor_id == 0:
var res_grad = Tensor[dtype](ug_shape)
elwise_op[ug_shape, t2_shape, mul](res_grad, ug, t2)
return res_grad ^
elif tensor_id == 1:
var res_grad = Tensor[dtype](ug_shape)
elwise_op[ug_shape, t1_shape, mul](res_grad, ug, t1)
return res_grad ^
else:
return ug
| basalt/basalt/autograd/ops/basics.mojo | false |
from basalt import Tensor, TensorShape
from basalt.autograd.attributes import AttributeVector
from algorithm import parallelize, vectorize, tile
from math import divmod
from utils.loop import unroll
@always_inline
fn get_result_shape(
input_shape: TensorShape,
kernel_shape: TensorShape,
padding: StaticIntTuple[2],
stride: StaticIntTuple[2],
dilation: StaticIntTuple[2],
) -> StaticIntTuple[2]:
"""
Calculates the X and Y dimensions of the resulting convolution.
Dimensions X, Y are on the end of the shape (..., X, Y)
dimension X on index -2.
dimension Y on index -1.
"""
var result_x_dim = (
(input_shape[-2] + (2 * padding[0]) - dilation[0] * (kernel_shape[-2] - 1) - 1)
// stride[0]
) + 1
var result_y_dim = (
(input_shape[-1] + (2 * padding[1]) - dilation[1] * (kernel_shape[-1] - 1) - 1)
// stride[1]
) + 1
return StaticIntTuple[2](result_x_dim, result_y_dim)
struct CONV2D:
@staticmethod
fn result_shape(
input_shape: TensorShape,
kernel_shape: TensorShape,
bias_shape: TensorShape,
attributes: AttributeVector,
) -> TensorShape:
# Output shape = [batch, out_channels, oX, oY]
var padding = attributes["padding"].value().to_static[2]()
var stride = attributes["stride"].value().to_static[2]()
var dilation = attributes["dilation"].value().to_static[2]()
var res = get_result_shape(input_shape, kernel_shape, padding, stride, dilation)
return TensorShape(input_shape[0], kernel_shape[0], res[0], res[1])
@staticmethod
fn forward[
input_shape: TensorShape,
kernel_shape: TensorShape,
bias_shape: TensorShape,
attributes: AttributeVector,
](
inout outputs: Tensor[dtype],
inputs: Tensor[dtype],
kernel: Tensor[dtype],
bias: Tensor[dtype],
):
"""
Performs a 2D convolution on the input tensor using the kernel and bias.
inputs.shape [batch, in_channels, iX, iY]
kernel.shape [out_channels, in_channels, kX, kY] (or weights)
bias.shape [out_channels].
output.shape [batch, out_channels, oX, oY].
"""
alias padding = attributes["padding"].value().to_static[2]()
alias stride = attributes["stride"].value().to_static[2]()
alias dilation = attributes["dilation"].value().to_static[2]()
alias padding_x = padding[0]
alias padding_y = padding[1]
alias stride_x = stride[0]
alias stride_y = stride[1]
alias dilation_x = dilation[0]
alias dilation_y = dilation[1]
alias batch_size = input_shape[0]
alias in_channels = input_shape[1]
alias in_x = input_shape[2]
alias in_y = input_shape[3]
alias out_channels = kernel_shape[0]
alias k_x = kernel_shape[2]
alias k_y = kernel_shape[3]
alias out_x = output_shape[2]
alias out_y = output_shape[3]
alias col_x = out_x
alias col_y = out_y
alias col_shape = TensorShape(
batch_size, col_x * col_y, in_channels * k_x * k_y
) # [batch, colX * colY, in_channels * kX * kY]
alias output_shape = Self.result_shape(
input_shape, kernel_shape, bias_shape, attributes
)
alias col_shape_stripped = TensorShape(in_channels * k_x * k_y, col_x, col_y)
alias inputs_strides = input_shape.strides()
alias kernel_strides = kernel_shape.strides()
alias outputs_strides = output_shape.strides()
alias col_strides = col_shape.strides()
var col_ptr = DTypePointer[dtype].alloc(col_shape.num_elements())
memset_zero(col_ptr, col_shape.num_elements())
@parameter
fn im2col(batch: Int):
for ux in range(out_x):
for uy in range(out_y):
for in_ch in range(in_channels):
for kx in range(k_x):
for ky in range(k_y):
var ix = ux * stride_x - padding_x + kx * dilation_x
var iy = uy * stride_y - padding_y + ky * dilation_y
if ix < 0 or iy < 0 or ix >= in_x or iy >= in_y:
continue
var col_index = (
batch * col_strides[0]
+ (ux * col_y + uy) * col_strides[1]
+ (in_ch * k_x * k_y + kx * k_y + ky)
)
var input_index = (
batch * inputs_strides[0]
+ in_ch * inputs_strides[1]
+ ix * inputs_strides[2]
+ iy
)
col_ptr[col_index] = inputs[input_index]
parallelize[im2col](batch_size)
@parameter
fn conv(batch: Int):
for out_ch in range(out_channels):
for ux in range(out_x):
for uy in range(out_y):
var result: SIMD[dtype, nelts] = 0
@parameter
fn v_im2col[_nelts: Int](in_ch_kx_ky: Int):
var col_index = (
batch * col_strides[0]
+ (ux * col_y + uy) * col_strides[1]
+ in_ch_kx_ky
)
var kernel_index = (
out_ch * kernel_strides[0] + in_ch_kx_ky
)
@parameter
if _nelts == nelts:
result += col_ptr.load[width=nelts](
col_index
) * kernel.load[nelts](kernel_index)
else:
result[0] += (
col_ptr.load[width=_nelts](col_index)
* kernel.load[_nelts](kernel_index)
).reduce_add()
vectorize[v_im2col, nelts](in_channels * k_x * k_y)
var output_index = (
batch * outputs_strides[0]
+ out_ch * outputs_strides[1]
+ ux * outputs_strides[2]
+ uy
)
outputs[output_index] = result.reduce_add() + bias[out_ch]
parallelize[conv](batch_size)
col_ptr.free()
@staticmethod
fn backward[
tensor_id: Int,
ug_shape: TensorShape,
input_shape: TensorShape,
kernel_shape: TensorShape,
bias_shape: TensorShape,
attributes: AttributeVector,
](
ug: Tensor[dtype],
inputs: Tensor[dtype],
kernel: Tensor[dtype],
bias: Tensor[dtype],
) -> Tensor[dtype]:
"""
Backward operation of 2D convolution.
Upper gradient of shape: [batch, out_channels, uX, uY].
"""
alias padding = attributes["padding"].value().to_static[2]()
alias stride = attributes["stride"].value().to_static[2]()
alias dilation = attributes["dilation"].value().to_static[2]()
alias padding_0 = padding[0]
alias padding_1 = padding[1]
alias stride_0 = stride[0]
alias stride_1 = stride[1]
alias dilation_0 = dilation[0]
alias dilation_1 = dilation[1]
alias inputs_strides = input_shape.strides()
alias kernel_strides = kernel_shape.strides()
alias ug_strides = ug_shape.strides()
alias inputs_strides_0 = inputs_strides[0]
alias inputs_strides_1 = inputs_strides[1]
alias inputs_strides_2 = inputs_strides[2]
alias kernel_strides_0 = kernel_strides[0]
alias kernel_strides_1 = kernel_strides[1]
alias kernel_strides_2 = kernel_strides[2]
alias ug_strides_0 = ug_strides[0]
alias ug_strides_1 = ug_strides[1]
alias ug_strides_2 = ug_strides[2]
alias input_shape_0 = input_shape[0]
alias input_shape_1 = input_shape[1]
alias input_shape_2 = input_shape[2]
alias input_shape_3 = input_shape[3]
alias kernel_shape_2 = kernel_shape[2]
alias kernel_shape_3 = kernel_shape[3]
alias ug_shape_0 = ug_shape[0]
alias ug_shape_1 = ug_shape[1]
alias ug_shape_2 = ug_shape[2]
alias ug_shape_3 = ug_shape[3]
var res: Tensor[dtype]
@parameter
if tensor_id == 0:
# Inputs
# Sum of upper gradient over batch, X, Y dimensions
res = Tensor[dtype](input_shape)
@parameter
fn input_grad(batch: Int):
for out_ch in range(ug_shape_1):
for ux in range(ug_shape_2):
for uy in range(ug_shape_3): # For all the element of ug
var ix_base = ux * stride_0 - padding_0
var iy_base = uy * stride_1 - padding_1
var ug_val = ug[
batch * ug_strides_0
+ out_ch * ug_strides_1
+ ux * ug_strides_2
+ uy
]
for in_ch in range(input_shape_1):
for kx in range(kernel_shape_2):
for ky in range(kernel_shape_3):
var ix = ix_base + kx * dilation_0
var iy = iy_base + ky * dilation_1
if (
ix < 0
or iy < 0
or ix >= input_shape_2
or iy >= input_shape_3
):
continue
var kernel_index = (
out_ch * kernel_strides_0
+ in_ch * kernel_strides_1
+ kx * kernel_strides_2
+ ky
)
var input_index = (
batch * inputs_strides_0
+ in_ch * inputs_strides_1
+ ix * inputs_strides_2
+ iy
)
res[input_index] += (
kernel[kernel_index] * ug_val
)
parallelize[input_grad](input_shape_0)
elif tensor_id == 1:
# Kernel
# Sum of upper gradient over batch and X, Y dimensions
res = Tensor[dtype](kernel_shape)
@parameter
fn kernel_grad(out_ch: Int):
var channel_offset = out_ch * kernel_strides_0
for k in range(input_shape_1 * kernel_shape_2 * kernel_shape_3):
var in_ch_kx_ky = divmod(k, kernel_shape_3)
var in_ch = k // (kernel_shape_2 * kernel_shape_3)
var kx = in_ch_kx_ky[0] % kernel_shape_2
var ky = in_ch_kx_ky[1]
# TODO: Cant vectorize since you are going different directions across input and upper grad
# But theoretically could transpose or split somehow
var result: Scalar[dtype] = 0
for batch in range(input_shape_0):
for ux in range(ug_shape_2):
for uy in range(ug_shape_3):
var ix = ux * stride_0 - padding_0 + kx * dilation_0
var iy = uy * stride_1 - padding_1 + ky * dilation_1
if (
ix < 0
or iy < 0
or ix >= input_shape_2
or iy >= input_shape_3
):
continue
var input_index = batch * inputs_strides_0 + in_ch * inputs_strides_1 + ix * inputs_strides_2 + iy
var ug_index = batch * ug_strides_0 + out_ch * ug_strides_1 + ux * ug_strides_2 + uy
result += inputs[input_index] * ug[ug_index]
var kernel_index = channel_offset + k
res[kernel_index] = result
parallelize[kernel_grad](ug_shape_1)
else:
# Bias
# Sum of upper gradient over batch and X, Y dimensions
# out_channels == ug_shape[1] == bias_shape[0]
res = Tensor[dtype](bias_shape)
# Psuedocode
# For every channel in the bias tensor,
# Iterate over the upper gradient across the batch
# For each batch, sum the upper gradient across X, Y dimensions
# Add the sum to the bias tensor
@parameter
fn bias_grad(out_ch: Int):
var channel_offset = out_ch * ug_strides_1
var sum: Scalar[dtype] = 0
for batch in range(ug_shape_0):
var batch_offset = batch * ug_strides_0 + channel_offset
@parameter
fn vec_sum[Nelts: Int](ux_uy: Int):
sum += ug.load[Nelts](batch_offset + ux_uy).reduce_add()
vectorize[vec_sum, nelts, size = ug_shape_2 * ug_shape_3]()
res[out_ch] = sum
parallelize[bias_grad](ug_shape_1)
return res
| basalt/basalt/autograd/ops/conv.mojo | false |
<filename>basalt/basalt/autograd/ops/dynamics.mojo
from basalt import Symbol
from basalt.nn.model import Parameters
from ..attributes import AttributeVector
struct CONCAT:
@staticmethod
fn result_shape(
input_shapes: List[TensorShape], attributes: AttributeVector
) -> List[TensorShape]:
# Assumptions: all tensors have the same shape, except for the concatenating dimension
var dim = attributes["dim"].value().to_int() if attributes["dim"] else 0
var concat_size: Int = 0
for i in range(len(input_shapes)):
concat_size += input_shapes[i][dim]
var res_shape = input_shapes[0]
res_shape[dim] = concat_size
return List[TensorShape](res_shape)
@staticmethod
fn calc_chunks(shape: TensorShape, dim: Int) -> Int:
# Number of chunks up to the concatenating dimension
# Assuming tensor of equal shape, except for the concatenating dimension
var chunks = 1
for i in range(dim):
chunks *= shape[i]
return chunks
@staticmethod
fn forward[attributes: AttributeVector](
inputs: List[Symbol],
outputs: List[Symbol],
parameters: Parameters,
):
alias dim = attributes["dim"].value().to_int() if attributes["dim"] else 0
var n_chunks = Self.calc_chunks(inputs[0].shape, dim)
var chunks = List[Int]()
var chunk_offsets = List[Int](0)
for i in range(len(inputs)):
chunks.append(inputs[i].shape.num_elements() // n_chunks)
chunk_offsets.append(chunk_offsets[i] + chunks[i])
for i in range(n_chunks):
for j in range(len(inputs)):
memcpy(
parameters.tensors[outputs[0]].data()
+ i * chunk_offsets[len(inputs)]
+ chunk_offsets[j],
parameters.tensors[inputs[j]].data() + i * chunks[j],
chunks[j],
)
@staticmethod
fn backward[input_id: Int, attributes: AttributeVector](
inputs: List[Symbol],
outputs: List[Symbol],
parameters: Parameters,
) -> Tensor[dtype]:
alias dim = attributes["dim"].value().to_int() if attributes["dim"] else 0
var n_chunks = Self.calc_chunks(inputs[0].shape, dim)
var chunks = List[Int]()
var chunk_offsets = List[Int](0)
for i in range(len(inputs)):
chunks.append(inputs[i].shape.num_elements() // n_chunks)
chunk_offsets.append(chunk_offsets[i] + chunks[i])
var res_grad = Tensor[dtype](inputs[input_id].shape)
for i in range(n_chunks):
memcpy(
res_grad.data() + i * chunks[input_id],
parameters.grads[outputs[0]].data()
+ i * chunk_offsets[len(inputs)]
+ chunk_offsets[input_id],
chunks[input_id],
)
return res_grad ^
struct SPLIT:
@staticmethod
fn result_shape(
input_shapes: List[TensorShape], attributes: AttributeVector
) -> List[TensorShape]:
# Assuming the sum of the sections is equal to the total size in the dim dimension.
# E.g. sections = [5, 5, 2] -> shape (., 12, ., .) for dim = 1
var dim = attributes["dim"].value().to_int() if attributes["dim"] else 0
var sections = attributes["sections"].value().to_shape()
var res_shapes = List[TensorShape]()
for i in range(sections.rank()):
var new_shape = input_shapes[0]
new_shape[dim] = sections[i]
res_shapes.append(new_shape)
return res_shapes
@staticmethod
fn calc_chunks(shape: TensorShape, dim: Int) -> Int:
# Number of chunks up to the concatenating dimension
# Assuming tensor of equal shape, except for the concatenating dimension
var chunks = 1
for i in range(dim):
chunks *= shape[i]
return chunks
@staticmethod
fn forward[attributes: AttributeVector](
inputs: List[Symbol],
outputs: List[Symbol],
parameters: Parameters,
):
alias dim = attributes["dim"].value().to_int() if attributes["dim"] else 0
alias sections = attributes["sections"].value().to_shape()
var n_chunks = Self.calc_chunks(inputs[0].shape, dim)
var chunks = List[Int]()
var chunk_offsets = List[Int](0)
for i in range(len(outputs)):
chunks.append(outputs[i].shape.num_elements() // n_chunks)
chunk_offsets.append(chunk_offsets[i] + chunks[i])
for i in range(n_chunks):
for j in range(len(outputs)):
memcpy(
parameters.tensors[outputs[j]].data() + i * chunks[j],
parameters.tensors[inputs[0]].data()
+ i * chunk_offsets[len(outputs)]
+ chunk_offsets[j],
chunks[j],
)
@staticmethod
fn backward[input_id: Int, attributes: AttributeVector](
inputs: List[Symbol],
outputs: List[Symbol],
parameters: Parameters,
) -> Tensor[dtype]:
alias dim = attributes["dim"].value().to_int() if attributes["dim"] else 0
alias sections = attributes["sections"].value().to_shape()
var n_chunks = Self.calc_chunks(inputs[0].shape, dim)
var chunks = List[Int]()
var chunk_offsets = List[Int](0)
for i in range(len(outputs)):
chunks.append(outputs[i].shape.num_elements() // n_chunks)
chunk_offsets.append(chunk_offsets[i] + chunks[i])
var res_grad = Tensor[dtype](inputs[input_id].shape)
for i in range(n_chunks):
for j in range(len(outputs)):
memcpy(
res_grad.data()
+ i * chunk_offsets[len(outputs)]
+ chunk_offsets[j],
parameters.grads[outputs[j]].data() + i * chunks[j],
chunks[j],
)
return res_grad ^
| basalt/basalt/autograd/ops/dynamics.mojo | false |
<filename>basalt/basalt/autograd/ops/matmul.mojo
from basalt.utils.tensorutils import transpose_2D
from algorithm import vectorize, parallelize
@always_inline
fn calculate_block[
M: Int, N: Int, K: Int, BLOCK_M: Int, BLOCK_N: Int, nelts: Int
](
res: DTypePointer[dtype],
t1: DTypePointer[dtype],
t2: DTypePointer[dtype],
bm: Int,
bn: Int,
):
# Compute tile
var acc = stack_allocation[BLOCK_M * BLOCK_N, dtype]()
memset_zero[dtype](acc, BLOCK_M * BLOCK_N)
for k in range(K):
@unroll
for m in range(BLOCK_M):
@parameter
fn inner_n[nelts: Int](n: Int):
acc.store[width=nelts](
m * BLOCK_N + n,
SIMD[dtype, nelts]
.splat(t1[(bm + m) * K + k])
.fma(
t2.load[width=nelts](k * N + (bn + n)),
acc.load[width=nelts](m * BLOCK_N + n),
),
)
vectorize[inner_n, nelts](BLOCK_N)
# Store tile
for m in range(BLOCK_M):
@parameter
fn vec_store[nelts: Int](n: Int):
res.store[width=nelts](
(bm + m) * N + (bn + n), acc.load[width=nelts](m * BLOCK_N + n)
)
vectorize[vec_store, nelts](BLOCK_N)
@parameter
@always_inline
fn dot[
t1_shape: TensorShape, t2_shape: TensorShape
](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]):
dot[t1_shape, t2_shape](res.data(), t1.data(), t2.data())
@parameter
@always_inline
fn dot[
t1_shape: TensorShape, t2_shape: TensorShape
](res: DTypePointer[dtype], t1: DTypePointer[dtype], t2: DTypePointer[dtype]):
alias M = t1_shape[0] # t1[0]
alias K = t1_shape[1] # t1[1], t2[0]
alias N = t2_shape[1] # t2[1]
# simdwidthof[dtype]() = 8 for float32
alias nelts = simdwidthof[dtype]()
alias BLOCK_N = 8 * 2
alias BLOCK_M = 6
alias THREADS = 6 # num_logical_cores()
alias BLOCK_N_REMAINDER = N % BLOCK_N
alias BLOCK_M_REMAINDER = M % BLOCK_M
@parameter
fn bm_par(m_outer: Int):
var bm = m_outer * BLOCK_M
for n_outer in range(0, N // BLOCK_N):
var bn = n_outer * BLOCK_N
calculate_block[M, N, K, BLOCK_M, BLOCK_N, nelts](res, t1, t2, bm, bn)
# Handle the remainder of N
@parameter
if BLOCK_N_REMAINDER > 0:
var bn = N - BLOCK_N_REMAINDER
calculate_block[M, N, K, BLOCK_M, BLOCK_N_REMAINDER, nelts](
res, t1, t2, bm, bn
)
parallelize[bm_par](M // BLOCK_M, M // BLOCK_M)
# Handle the remainder of M
@parameter
if BLOCK_M_REMAINDER > 0:
var bm = M - BLOCK_M_REMAINDER
for n_outer in range(0, N // BLOCK_N):
var bn = n_outer * BLOCK_N
calculate_block[M, N, K, BLOCK_M_REMAINDER, BLOCK_N, nelts](
res, t1, t2, bm, bn
)
# Handle corner remainder
@parameter
if BLOCK_N_REMAINDER > 0:
var bn = N - BLOCK_N_REMAINDER
calculate_block[M, N, K, BLOCK_M_REMAINDER, BLOCK_N_REMAINDER, nelts](
res, t1, t2, bm, bn
)
fn dot_transpose_t2[
A_shape: TensorShape, B_shape: TensorShape
](inout C: DTypePointer[dtype], A: DTypePointer[dtype], B: DTypePointer[dtype]):
dot[A_shape, TensorShape(B_shape[1], B_shape[0])](C, A, transpose_2D[B_shape](B))
fn dot_transpose_t2[
A_shape: TensorShape, B_shape: TensorShape
](inout C: Tensor[dtype], A: Tensor[dtype], B: Tensor[dtype]):
memset_zero[dtype](C.data(), C.num_elements())
dot[A_shape, TensorShape(B_shape[1], B_shape[0])](C, A, transpose_2D[B_shape](B))
# @parameter
# fn calc_row(i: Int):
# for j in range(B_shape[0]):
# @parameter
# fn calc_row_A_B[nelts: Int](k: Int):
# var A_pos = i * A.dim(1) + k
# var B_pos = j * A.dim(1) + k
# var t_new_pos = i * C.dim(1) + j
# C[t_new_pos] += (
# A.load[nelts](A_pos) * B.load[nelts](B_pos)
# ).reduce_add()
# vectorize[calc_row_A_B, nelts, size=A_shape[1]]()
# parallelize[calc_row](A_shape[0], 1)
fn dot_transpose_t1[
A_shape: TensorShape, B_shape: TensorShape
](inout C: Tensor[dtype], A: Tensor[dtype], B: Tensor[dtype]):
memset_zero[dtype](C.data(), C.num_elements())
dot[TensorShape(A_shape[1], A_shape[0]), B_shape](C, transpose_2D[A_shape](A), B)
# @parameter
# fn calc_row(i: Int):
# for j in range(A_shape[0]):
# @parameter
# fn calc_row_t_new_B[nelts: Int](k: Int):
# var A_pos = j * A.dim(1) + i
# var B_pos = j * B.dim(1) + k
# var t_new_pos = i * C.dim(1) + k
# C.store[nelts](
# t_new_pos,
# C.load[nelts](t_new_pos)
# + A[A_pos] * B.load[nelts](B_pos),
# )
# vectorize[calc_row_t_new_B, nelts, size=B_shape[1]]()
# parallelize[calc_row](A_shape[1], 1)
| basalt/basalt/autograd/ops/matmul.mojo | false |
from algorithm import vectorize, parallelize
from math import exp, pow, max, min, abs
from math.limit import min_finite, max_finite
from basalt import Tensor, TensorShape
from basalt.utils.tensorutils import elwise_transform
from basalt.autograd.attributes import Attribute, AttributeVector
@value
struct SIGMOID:
@staticmethod
fn result_shape(t1_shape: TensorShape) -> TensorShape:
return t1_shape
@staticmethod
@always_inline
fn sigmoid[
type: DType, simd_width: Int
](x: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
return 1 / (1 + exp(-x))
@staticmethod
@always_inline
fn sidmoid_bw[
type: DType, simd_width: Int
](x: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
return Self.sigmoid(x) * (1 - Self.sigmoid(x))
@staticmethod
fn forward[
t1_shape: TensorShape,
](inout res: Tensor[dtype], t1: Tensor[dtype]):
"""Forward operation of sigmoid."""
elwise_transform[Self.sigmoid](res, t1)
@staticmethod
fn backward[
ug_shape: TensorShape,
t1_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of sigmoid."""
# d(sigmod(x))/dx = sigmoid(x) * (1 - sigmoid(x))
var res_grad = Tensor[dtype](ug_shape)
@parameter
fn vec_sigmoid_bw[nelts: Int](idx: Int):
res_grad.store[nelts](
idx,
Self.sidmoid_bw(t1.load[nelts](idx)) * ug.load[nelts](idx),
)
vectorize[vec_sigmoid_bw, nelts](ug_shape.num_elements())
return res_grad ^
struct RELU:
@staticmethod
fn result_shape(t1_shape: TensorShape) -> TensorShape:
return t1_shape
@staticmethod
@always_inline
fn relu[
type: DType, simd_width: Int
](x: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
# x if x > 0 else 0
return (x > 0).select(x, 0)
@staticmethod
@always_inline
fn relu_bw[
type: DType, simd_width: Int
](x: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
# 1 if x > 0 else 0
return (x > 0).select[type](1, 0)
@staticmethod
fn forward[
t1_shape: TensorShape,
](inout res: Tensor[dtype], t1: Tensor[dtype]):
"""Forward operation of relu."""
elwise_transform[Self.relu](res, t1)
@staticmethod
fn backward[
ug_shape: TensorShape,
t1_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of relu."""
# d(relu(x))/dx = 1 if x > 0 else 0. We also give 0 to x = 0 instead of undefined.
var res_grad = Tensor[dtype](ug_shape)
@parameter
fn vec_relu_bw[nelts: Int](idx: Int):
res_grad.store[nelts](
idx, Self.relu_bw(t1.load[nelts](idx)) * ug.load[nelts](idx)
)
vectorize[vec_relu_bw, nelts](ug_shape.num_elements())
return res_grad ^
struct TANH:
@staticmethod
fn result_shape(t1_shape: TensorShape) -> TensorShape:
return t1_shape
@staticmethod
@always_inline
fn tanh[
type: DType, simd_width: Int
](x: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
return (exp(x) - exp(-x)) / (exp(x) + exp(-x))
@staticmethod
@always_inline
fn tanh_bw[
type: DType, simd_width: Int
](x: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
return 1 - pow(Self.tanh(x), 2)
@staticmethod
fn forward[
t1_shape: TensorShape,
](inout res: Tensor[dtype], t1: Tensor[dtype]):
"""Forward operation of tanh."""
elwise_transform[Self.tanh](res, t1)
@staticmethod
fn backward[
ug_shape: TensorShape,
t1_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of tanh."""
# d(tanh(x))/dx = 1 - tanh(x) ** 2
var res_grad = Tensor[dtype](ug_shape)
@parameter
fn vec_tanh_bw[nelts: Int](idx: Int):
res_grad.store[nelts](
idx, Self.tanh_bw(t1.load[nelts](idx)) * ug.load[nelts](idx)
)
vectorize[vec_tanh_bw, nelts](ug_shape.num_elements())
return res_grad ^
struct CLIP:
@staticmethod
fn result_shape(t_shape: TensorShape) -> TensorShape:
return t_shape
@staticmethod
fn forward[
t_shape: TensorShape, attributes: AttributeVector
](inout res: Tensor[dtype], t: Tensor[dtype]):
"""
Forward pass of the clip operation.
"""
alias min_attr = attributes["min"]
alias max_attr = attributes["max"]
var min_val = min_attr.value().to_scalar[dtype]() if min_attr else min_finite[
dtype
]()
var max_val = max_attr.value().to_scalar[dtype]() if max_attr else max_finite[
dtype
]()
@parameter
fn vec_clip[nelts: Int](i: Int):
res.store[nelts](i, t.load[nelts](i).min(max_val).max(min_val))
vectorize[vec_clip, nelts, size = t_shape.num_elements()]()
@staticmethod
fn backward[
ug_shape: TensorShape,
t_shape: TensorShape,
attributes: AttributeVector = AttributeVector(),
](ug: Tensor[dtype], t: Tensor[dtype]) -> Tensor[dtype]:
"""Backward operation of clip."""
alias min_attr = attributes["min"]
alias max_attr = attributes["max"]
var min_val = min_attr.value().to_scalar[dtype]() if min_attr else min_finite[
dtype
]()
var max_val = max_attr.value().to_scalar[dtype]() if max_attr else max_finite[
dtype
]()
var res_grad = Tensor[dtype](t_shape)
@parameter
fn vec_clip_bw[nelts: Int](i: Int):
var val = t.load[nelts](i)
res_grad.store[nelts](
i,
((val >= min_val) * (val <= max_val)).select(ug.load[nelts](i), 0),
)
vectorize[vec_clip_bw, nelts, size = t_shape.num_elements()]()
return res_grad ^
struct SQUEEZE:
@staticmethod
fn result_shape(t1_shape: TensorShape, attributes: AttributeVector) -> TensorShape:
var dim = attributes["dims"]
var dims_to_squeeze = dim.value().to_shape() if dim else TensorShape()
var new_shape = List[Int]()
for i in range(t1_shape.rank()):
if (not dim and t1_shape[i] == 1) or (
i in dims_to_squeeze and t1_shape[i] == 1
):
continue
new_shape.append(t1_shape[i])
return TensorShape(new_shape)
@staticmethod
fn forward[
t1_shape: TensorShape,
attributes: AttributeVector,
](inout res: Tensor[dtype], t1: Tensor[dtype]):
memcpy(res.data(), t1.data(), t1.num_elements())
@staticmethod
fn backward[
ug_shape: TensorShape,
t1_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]:
var res_grad = Tensor[dtype](t1_shape)
memcpy(res_grad.data(), ug.data(), ug.num_elements())
return res_grad ^
struct UNSQUEEZE:
@staticmethod
fn result_shape(t1_shape: TensorShape, attributes: AttributeVector) -> TensorShape:
var dim = attributes["dims"]
var dims_to_squeeze = dim.value().to_shape() if dim else TensorShape()
# Position in the expanded dims where the new dim (or dims) is placed.
var new_rank = t1_shape.rank() + dims_to_squeeze.rank()
var new_shape = List[Int]()
var j = 0
for i in range(new_rank):
if i in dims_to_squeeze or i - new_rank in dims_to_squeeze:
new_shape.append(1)
else:
new_shape.append(t1_shape[j])
j += 1
return TensorShape(new_shape)
@staticmethod
fn forward[
t1_shape: TensorShape,
attributes: AttributeVector,
](inout res: Tensor[dtype], t1: Tensor[dtype]):
memcpy(res.data(), t1.data(), t1.num_elements())
@staticmethod
fn backward[
ug_shape: TensorShape,
t1_shape: TensorShape,
](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]:
var res_grad = Tensor[dtype](t1_shape)
memcpy(res_grad.data(), ug.data(), ug.num_elements())
return res_grad ^
struct SLICE:
@staticmethod
fn adjust_boundary(slice: Int, dim_size: Int) -> Int:
# Adjust negative indices & ensure they are within bounds.
var s = slice if slice >= 0 else dim_size + slice
return max(min(s, dim_size), 0)
@staticmethod
fn default_starts(shape: TensorShape) -> List[Int]:
var starts = List[Int]()
for i in range(shape.rank()):
starts.append(0)
return starts^
@staticmethod
fn default_ends(shape: TensorShape) -> List[Int]:
var ends = List[Int]()
for i in range(shape.rank()):
ends.append(shape[i])
return ends^
@staticmethod
fn default_steps(shape: TensorShape) -> List[Int]:
var steps = List[Int]()
for i in range(shape.rank()):
steps.append(1)
return steps^
@staticmethod
fn default_axes(shape: TensorShape) -> List[Int]:
# NOTE: axes can't be negative
var axes = List[Int]()
for i in range(shape.rank()):
axes.append(i)
return axes^
@staticmethod
fn result_shape(t1_shape: TensorShape, attributes: AttributeVector) -> TensorShape:
# NOTE: Starts and ends have to be of the same size
# NOTE: If axes not provided, starts and ends have to be of the same size as t1_shape
var starts = attributes["starts"].value().to_shape()
var ends = attributes["ends"].value().to_shape()
var steps = attributes["steps"].value().to_shape() if attributes["steps"] else Self.default_steps(starts)
var axes = attributes["axes"].value().to_shape() if attributes["axes"] else Self.default_axes(t1_shape)
var new_shape = t1_shape
for i in range(starts.rank()):
var axis = axes[i]
new_shape[axis] = len(range(
start = Self.adjust_boundary(starts[i], t1_shape[axis]),
end = Self.adjust_boundary(ends[i], t1_shape[axis]),
step = steps[i]
))
return new_shape
@staticmethod
fn reorder_positions[id: Int](original: TensorShape, axes: TensorShape, t1_shape: TensorShape) -> List[Int]:
# Reorder the starts (id=0), ends (id=1) or steps (id=2) to match the order of the axes
var updated: List[Int]
@parameter
if id == 0: updated = Self.default_starts(t1_shape)
elif id == 1: updated = Self.default_ends(t1_shape)
else: updated = Self.default_steps(t1_shape)
for i in range(axes.rank()):
var axis = axes[i]
updated[axis] = original[i] if id == 2 else Self.adjust_boundary(original[i], t1_shape[axis])
return updated^
# NOTE: For now you can't have recursive function as parameter functions.
# NOTE: From testing it seems a recursive function is almost the same speed as doing multiple nested for loops.
@staticmethod
fn recursive_iters_slice[
shape: TensorShape,
original_shape: TensorShape,
steps: List[Int],
starts: List[Int],
ends: List[Int],
backward_op: Bool = False
](
inout res: Tensor[dtype],
t1: Tensor[dtype],
last_dims: Int,
position: Int,
last_position: Int,
idx: Int,
idx_original: Int,
):
alias strides = shape.strides()
alias t1_strides = original_shape.strides()
var idx_temp = idx
var idx_original_temp = starts[position] * t1_strides[position] + idx_original
if position == last_position + 1:
# Work on the last dimensions
alias position = shape.rank() - 1
alias stride = t1_strides[position] * steps[position]
@parameter
fn v_slice[nelts: Int](k : Int):
@parameter
if not backward_op:
@parameter
if steps[position] == 1:
res.store[nelts](idx_temp + k, t1.load[nelts](idx_original_temp))
else:
res.store[nelts](
idx_temp + k,
t1.data().offset(idx_original_temp).simd_strided_load[nelts](stride)
)
else:
@parameter
if steps[position] == 1:
res.store[nelts](idx_original_temp, t1.load[nelts](idx_temp + k))
else:
res.data().offset(idx_original_temp).simd_strided_store[nelts](
t1.load[nelts](idx_temp + k),
stride
)
idx_original_temp += stride * nelts
vectorize[v_slice, nelts](last_dims)
return
for _ in range(shape[position]):
Self.recursive_iters_slice[shape, original_shape, steps, starts, ends, backward_op](
res, t1, last_dims, position + 1, last_position, idx_temp, idx_original_temp
)
idx_temp += strides[position]
idx_original_temp += steps[position] * t1_strides[position]
@staticmethod
fn slice_kernel[
res_shape: TensorShape,
original_shape: TensorShape,
steps: List[Int],
starts: List[Int],
ends: List[Int],
backward_op: Bool = False
](inout res: Tensor[dtype], t1: Tensor[dtype]):
alias strides = original_shape.strides()
# Get the dimensions for vectorization
var last_dims = 1
var positions_to_skip = 0
for i in range(res_shape.rank() - 1, -1, -1):
if steps[i] != 1 and i != res_shape.rank() - 1:
break
last_dims *= res_shape[i]
positions_to_skip += 1
if starts[i] != 0 or ends[i] != original_shape[i] or steps[i] != 1:
break
# Get the dimensions for the first loop
var first_dims = 1
var start_position = 0
for i in range(res_shape.rank() - positions_to_skip):
if steps[i] != 1 or starts[i] != 0 or ends[i] != original_shape[i]:
break
first_dims *= res_shape[i]
start_position += 1
var middle_dims = res_shape.num_elements() // last_dims // first_dims
@parameter
fn p_slice(i: Int):
Self.recursive_iters_slice[
res_shape, original_shape, steps, starts, ends, backward_op
](
res, t1, last_dims, start_position, res_shape.rank() - 1 - positions_to_skip,
i * middle_dims * last_dims, i * strides[start_position - 1]
)
parallelize[p_slice](first_dims)
@staticmethod
fn forward[
t1_shape: TensorShape,
attributes: AttributeVector,
](inout res: Tensor[dtype], t1: Tensor[dtype]):
alias axes = attributes["axes"].value().to_shape() if attributes["axes"] else Self.default_axes(t1_shape)
alias starts = Self.reorder_positions[0](attributes["starts"].value().to_shape(), axes, t1_shape)
alias ends = Self.reorder_positions[1](attributes["ends"].value().to_shape(), axes, t1_shape)
alias steps = Self.reorder_positions[2](attributes["steps"].value().to_shape(), axes, t1_shape) if attributes["steps"] else Self.default_steps(t1_shape)
alias res_shape = Self.result_shape(t1_shape, attributes)
Self.slice_kernel[res_shape, t1_shape, steps, starts, ends, False](res, t1)
@staticmethod
fn backward[
ug_shape: TensorShape,
t1_shape: TensorShape,
attributes: AttributeVector = AttributeVector(),
](ug: Tensor[dtype], t1: Tensor[dtype]) -> Tensor[dtype]:
alias axes = attributes["axes"].value().to_shape() if attributes["axes"] else Self.default_axes(t1_shape)
alias starts = Self.reorder_positions[0](attributes["starts"].value().to_shape(), axes, t1_shape)
alias ends = Self.reorder_positions[1](attributes["ends"].value().to_shape(), axes, t1_shape)
alias steps = Self.reorder_positions[2](attributes["steps"].value().to_shape(), axes, t1_shape) if attributes["steps"] else Self.default_steps(t1_shape)
var res_grad = Tensor[dtype](t1_shape)
Self.slice_kernel[ug_shape, t1_shape, steps, starts, ends, True](res_grad, ug)
return res_grad ^ | basalt/basalt/autograd/ops/mlops.mojo | false |
from .basics import (
ADD,
SUB,
MUL,
DIV,
EXP,
LOG,
POW,
DOT,
SUM,
MEAN,
MAX,
FLATTEN,
RESHAPE,
TRANSPOSE,
FMA,
)
from .mlops import SIGMOID, RELU, TANH, CLIP, SQUEEZE, UNSQUEEZE, SLICE
from .dynamics import CONCAT, SPLIT
from .conv import CONV2D
from .pool import MAXPOOL2D
from basalt import Tensor, TensorShape
from basalt.nn.model import Parameters
from basalt.utils.bytes import Bytes
from basalt.utils.tensorutils import broadcast_shapes, accumulate_grad
from ..attributes import AttributeVector
# Define operators as named parameter expression
@value
@register_passable("trivial")
struct OP(Stringable):
"""
Compile time Operators list.
"""
alias ADD = OP(0, "ADD")
alias SUB = OP(1, "SUB")
alias MUL = OP(2, "MUL")
alias DIV = OP(3, "DIV")
alias EXP = OP(4, "EXP")
alias LOG = OP(5, "LOG")
alias POW = OP(6, "POW")
alias DOT = OP(7, "DOT")
alias SUM = OP(8, "SUM")
alias MEAN = OP(9, "MEAN")
alias MAX = OP(10, "MAX")
alias FLATTEN = OP(11, "FLATTEN")
alias RESHAPE = OP(12, "RESHAPE")
alias SIGMOID = OP(13, "SIGMOID")
alias RELU = OP(14, "RELU")
alias TANH = OP(15, "TANH")
alias CONV2D = OP(16, "CONV2D")
alias TRANSPOSE = OP(17, "TRANSPOSE")
alias MAXPOOL2D = OP(18, "MAXPOOL2D")
alias FMA = OP(19, "FMA")
alias CLIP = OP(20, "CLIP")
alias SQUEEZE = OP(21, "SQUEEZE")
alias UNSQUEEZE = OP(22, "UNSQUEEZE")
alias CONCAT = OP(23, "CONCAT", dynamic=True)
alias SPLIT = OP(24, "SPLIT", dynamic=True)
alias SLICE = OP(25, "SLICE")
var id: UInt8
var name: Bytes[16]
var dynamic: Bool
fn __init__(inout self, id: UInt8, name: String, dynamic: Bool = False):
self.id = id
self.name = Bytes[16](name)
self.dynamic = dynamic
fn __eq__(self, other: OP) -> Bool:
return self.id == other.id
fn __str__(self) -> String:
return str(self.name)
fn static_result_shape(
op: OP, operands: VariadicList[Symbol], attributes: AttributeVector
) -> TensorShape:
"""
Static result shape for operators.
"""
if len(operands) == 1:
return static_result_shape(op, operands[0].shape, attributes)
elif len(operands) == 2:
return static_result_shape(op, operands[0].shape, operands[1].shape, attributes)
elif len(operands) == 3:
return static_result_shape(
op, operands[0].shape, operands[1].shape, operands[2].shape, attributes
)
else:
print("Error: Invalid number of operands")
return TensorShape()
fn static_result_shape(
op: OP, t1_shape: TensorShape, attributes: AttributeVector
) -> TensorShape:
"""
Static result shape for unary operators.
"""
if op == OP.EXP:
return EXP.result_shape(t1_shape)
elif op == OP.LOG:
return LOG.result_shape(t1_shape)
elif op == OP.SUM:
return SUM.result_shape(t1_shape, attributes)
elif op == OP.MEAN:
return MEAN.result_shape(t1_shape, attributes)
elif op == OP.MAX:
return MAX.result_shape(t1_shape, attributes)
elif op == OP.FLATTEN:
return FLATTEN.result_shape(t1_shape)
elif op == OP.RESHAPE:
return RESHAPE.result_shape(t1_shape, attributes)
elif op == OP.SIGMOID:
return SIGMOID.result_shape(t1_shape)
elif op == OP.RELU:
return RELU.result_shape(t1_shape)
elif op == OP.TANH:
return TANH.result_shape(t1_shape)
elif op == OP.TRANSPOSE:
return TRANSPOSE.result_shape(t1_shape, attributes)
elif op == OP.MAXPOOL2D:
return MAXPOOL2D.result_shape(t1_shape, attributes)
elif op == OP.CLIP:
return CLIP.result_shape(t1_shape)
elif op == OP.SQUEEZE:
return SQUEEZE.result_shape(t1_shape, attributes)
elif op == OP.UNSQUEEZE:
return UNSQUEEZE.result_shape(t1_shape, attributes)
elif op == OP.SLICE:
return SLICE.result_shape(t1_shape, attributes)
else:
print("[ERROR] Operator not found.")
return TensorShape(-1)
fn static_result_shape(
op: OP,
t1_shape: TensorShape,
t2_shape: TensorShape,
attributes: AttributeVector,
) -> TensorShape:
"""
Static result shape for binary operators.
"""
if op == OP.ADD:
return ADD.result_shape(t1_shape, t2_shape)
elif op == OP.SUB:
return SUB.result_shape(t1_shape, t2_shape)
elif op == OP.MUL:
return MUL.result_shape(t1_shape, t2_shape)
elif op == OP.DIV:
return DIV.result_shape(t1_shape, t2_shape)
elif op == OP.POW:
return POW.result_shape(t1_shape, t2_shape)
elif op == OP.DOT:
return DOT.result_shape(t1_shape, t2_shape)
else:
# We can't print at compile time (at least for now it crashes at comp time with an error)
print("[ERROR] Operator not found.")
return TensorShape(-1, -1)
fn static_result_shape(
op: OP,
t1_shape: TensorShape,
t2_shape: TensorShape,
t3_shape: TensorShape,
attributes: AttributeVector,
) -> TensorShape:
"""
Static result shape for ternary operators.
"""
if op == OP.CONV2D:
return CONV2D.result_shape(t1_shape, t2_shape, t3_shape, attributes)
elif op == OP.FMA:
return FMA.result_shape(t1_shape, t2_shape, t3_shape)
else:
print("[ERROR] Operator not found.")
return TensorShape(-1, -1)
fn dynamic_result_shape(
op: OP,
operands: VariadicList[Symbol],
attributes: AttributeVector,
) -> List[TensorShape]:
"""
Static result shape for dynamic operators.
"""
# Unknown number of inputs and outputs.
var input_shapes = List[TensorShape]()
for operand in operands:
input_shapes.append(operand.shape)
if op == OP.CONCAT:
return CONCAT.result_shape(input_shapes, attributes)
elif op == OP.SPLIT:
return SPLIT.result_shape(input_shapes, attributes)
else:
print("[ERROR] Operator not found.")
return List[TensorShape](TensorShape(-1))
fn forward_op[
op: OP, t1_shape: TensorShape, attributes: AttributeVector
](inout res: Tensor[dtype], t1: Tensor[dtype]):
"""
Forward pass for unary operators.
"""
@parameter
if op == OP.EXP:
EXP.forward[t1_shape](res, t1)
elif op == OP.LOG:
LOG.forward[t1_shape](res, t1)
elif op == OP.SUM:
SUM.forward[t1_shape, attributes](res, t1)
elif op == OP.MEAN:
MEAN.forward[t1_shape, attributes](res, t1)
elif op == OP.MAX:
MAX.forward[t1_shape, attributes](res, t1)
elif op == OP.FLATTEN:
FLATTEN.forward[t1_shape](res, t1)
elif op == OP.RESHAPE:
RESHAPE.forward[t1_shape](res, t1)
elif op == OP.SIGMOID:
SIGMOID.forward[t1_shape](res, t1)
elif op == OP.RELU:
RELU.forward[t1_shape](res, t1)
elif op == OP.TANH:
TANH.forward[t1_shape](res, t1)
elif op == OP.TRANSPOSE:
TRANSPOSE.forward[t1_shape, attributes](res, t1)
elif op == OP.MAXPOOL2D:
MAXPOOL2D.forward[t1_shape, attributes](res, t1)
elif op == OP.CLIP:
CLIP.forward[t1_shape, attributes](res, t1)
elif op == OP.SQUEEZE:
SQUEEZE.forward[t1_shape, attributes](res, t1)
elif op == OP.UNSQUEEZE:
UNSQUEEZE.forward[t1_shape, attributes](res, t1)
elif op == OP.SLICE:
SLICE.forward[t1_shape, attributes](res, t1)
else:
print("[ERROR] Operator not found.")
fn forward_op[
op: OP, t1_shape: TensorShape, t2_shape: TensorShape, attributes: AttributeVector
](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]):
"""
Forward pass for binary operators.
"""
@parameter
if op == OP.ADD:
ADD.forward[t1_shape, t2_shape](res, t1, t2)
elif op == OP.SUB:
SUB.forward[t1_shape, t2_shape](res, t1, t2)
elif op == OP.MUL:
MUL.forward[t1_shape, t2_shape](res, t1, t2)
elif op == OP.DIV:
DIV.forward[t1_shape, t2_shape](res, t1, t2)
elif op == OP.POW:
POW.forward[t1_shape, t2_shape](res, t1, t2)
elif op == OP.DOT:
DOT.forward[t1_shape, t2_shape](res, t1, t2)
else:
print("[ERROR] Operator not found.")
fn forward_op[
op: OP,
t1_shape: TensorShape,
t2_shape: TensorShape,
t3_shape: TensorShape,
attributes: AttributeVector,
](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype], t3: Tensor[dtype]):
"""
Forward pass for ternary operators.
"""
@parameter
if op == OP.CONV2D:
CONV2D.forward[t1_shape, t2_shape, t3_shape, attributes](res, t1, t2, t3)
elif op == OP.FMA:
FMA.forward[t1_shape, t2_shape, t3_shape](res, t1, t2, t3)
else:
print("[ERROR] Operator not found.")
fn forward_op[
op: OP,
attributes: AttributeVector,
](
inputs: List[Symbol],
outputs: List[Symbol],
parameters: Parameters,
):
"""
Forward pass for dynamic operators.
"""
if op == OP.CONCAT:
CONCAT.forward[attributes](inputs, outputs, parameters)
elif op == OP.SPLIT:
SPLIT.forward[attributes](inputs, outputs, parameters)
else:
print("[ERROR] Operator not found.")
fn backward_op[
tensor_id: Int,
op: OP,
ug_shape: TensorShape,
t1_shape: TensorShape,
attributes: AttributeVector,
](ug: Tensor[dtype], t1: Tensor[dtype], inout grad: Tensor[dtype]):
"""
Backward pass for unary operators.
"""
var res_grad: Tensor[dtype]
@parameter
if op == OP.EXP:
res_grad = EXP.backward[ug_shape, t1_shape](ug, t1)
elif op == OP.LOG:
res_grad = LOG.backward[ug_shape, t1_shape](ug, t1)
elif op == OP.SUM:
res_grad = SUM.backward[ug_shape, t1_shape, attributes](ug, t1)
elif op == OP.MEAN:
res_grad = MEAN.backward[ug_shape, t1_shape, attributes](ug, t1)
elif op == OP.MAX:
res_grad = MAX.backward[ug_shape, t1_shape, attributes](ug, t1)
elif op == OP.FLATTEN:
res_grad = FLATTEN.backward[ug_shape, t1_shape](ug, t1)
elif op == OP.RESHAPE:
res_grad = RESHAPE.backward[ug_shape, t1_shape](ug, t1)
elif op == OP.SIGMOID:
res_grad = SIGMOID.backward[ug_shape, t1_shape](ug, t1)
elif op == OP.RELU:
res_grad = RELU.backward[ug_shape, t1_shape](ug, t1)
elif op == OP.TANH:
res_grad = TANH.backward[ug_shape, t1_shape](ug, t1)
elif op == OP.TRANSPOSE:
res_grad = TRANSPOSE.backward[ug_shape, t1_shape, attributes](ug, t1)
elif op == OP.MAXPOOL2D:
res_grad = MAXPOOL2D.backward[ug_shape, t1_shape, attributes](ug, t1)
elif op == OP.CLIP:
res_grad = CLIP.backward[ug_shape, t1_shape, attributes](ug, t1)
elif op == OP.SQUEEZE:
res_grad = SQUEEZE.backward[ug_shape, t1_shape](ug, t1)
elif op == OP.UNSQUEEZE:
res_grad = UNSQUEEZE.backward[ug_shape, t1_shape](ug, t1)
elif op == OP.SLICE:
res_grad = SLICE.backward[ug_shape, t1_shape, attributes](ug, t1)
else:
print("[ERROR] Operator not found.")
res_grad = Tensor[dtype](-1)
accumulate_grad(grad, res_grad)
fn backward_op[
tensor_id: Int,
op: OP,
ug_shape: TensorShape,
t1_shape: TensorShape,
t2_shape: TensorShape,
attributes: AttributeVector,
](ug: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype], inout grad: Tensor[dtype]):
"""
Backward pass for binary operators.
"""
var res_grad: Tensor[dtype]
@parameter
if op == OP.ADD:
res_grad = ADD.backward[tensor_id, ug_shape, t1_shape, t2_shape](ug, t1, t2)
elif op == OP.SUB:
res_grad = SUB.backward[tensor_id, ug_shape, t1_shape, t2_shape](ug, t1, t2)
elif op == OP.MUL:
res_grad = MUL.backward[tensor_id, ug_shape, t1_shape, t2_shape](ug, t1, t2)
elif op == OP.DIV:
res_grad = DIV.backward[tensor_id, ug_shape, t1_shape, t2_shape](ug, t1, t2)
elif op == OP.POW:
res_grad = POW.backward[tensor_id, ug_shape, t1_shape, t2_shape](ug, t1, t2)
elif op == OP.DOT:
res_grad = DOT.backward[tensor_id, ug_shape, t1_shape, t2_shape](ug, t1, t2)
else:
print("[ERROR] Operator not found.")
res_grad = Tensor[dtype](-1, -1)
fn broadcastable(op: OP) -> Bool:
return op == OP.ADD or op == OP.SUB or op == OP.MUL or op == OP.DIV
@parameter
if broadcastable(op):
accumulate_grad[
grad_shape = t1_shape if tensor_id == 0 else t2_shape,
res_grad_shape = broadcast_shapes(t1_shape, t2_shape),
](grad, res_grad)
else:
accumulate_grad(grad, res_grad)
fn backward_op[
tensor_id: Int,
op: OP,
ug_shape: TensorShape,
t1_shape: TensorShape,
t2_shape: TensorShape,
t3_shape: TensorShape,
attributes: AttributeVector,
](
ug: Tensor[dtype],
t1: Tensor[dtype],
t2: Tensor[dtype],
t3: Tensor[dtype],
inout grad: Tensor[dtype],
):
"""
Backward pass for ternary operators.
"""
var res_grad: Tensor[dtype]
@parameter
if op == OP.CONV2D:
res_grad = CONV2D.backward[
tensor_id, ug_shape, t1_shape, t2_shape, t3_shape, attributes
](ug, t1, t2, t3)
elif op == OP.FMA:
res_grad = FMA.backward[tensor_id, ug_shape, t1_shape, t2_shape, t3_shape](
ug, t1, t2, t3
)
else:
print("[ERROR] Operator not found.")
res_grad = Tensor[dtype](-1, -1)
accumulate_grad(grad, res_grad)
fn backward_op[
input_id: Int,
op: OP,
attributes: AttributeVector,
](
inputs: List[Symbol],
outputs: List[Symbol],
inout grad: Tensor[dtype],
parameters: Parameters,
):
"""
Backward pass for dynamic operators.
"""
var res_grad: Tensor[dtype]
if op == OP.CONCAT:
res_grad = CONCAT.backward[input_id, attributes](inputs, outputs, parameters)
elif op == OP.SPLIT:
res_grad = SPLIT.backward[input_id, attributes](inputs, outputs, parameters)
else:
print("[ERROR] Operator not found.")
res_grad = Tensor[dtype](-1, -1)
accumulate_grad(grad, res_grad)
| basalt/basalt/autograd/ops/ops.mojo | false |
<filename>basalt/basalt/autograd/ops/pool.mojo
from math.limit import neginf
from basalt import Tensor, TensorShape
from basalt.autograd.attributes import AttributeVector
from basalt.autograd.ops.conv import get_result_shape
struct MAXPOOL2D:
@staticmethod
fn result_shape(
input_shape: TensorShape, attributes: AttributeVector
) -> TensorShape:
var kernel_size = attributes["kernel_size"].value().to_static[2]()
var padding = attributes["padding"].value().to_static[2]()
var stride = attributes["stride"].value().to_static[2]()
var dilation = attributes["dilation"].value().to_static[2]()
var res = get_result_shape(
input_shape,
TensorShape(kernel_size[0], kernel_size[1]),
padding,
stride,
dilation,
)
return TensorShape(input_shape[0], input_shape[1], res[0], res[1])
@staticmethod
fn forward[
input_shape: TensorShape, attributes: AttributeVector
](inout outputs: Tensor[dtype], inputs: Tensor[dtype]):
"""
Returns the max value of each kernel in the input tensor.
inputs.shape [batch_size, channels, iX, iY]
with kernel_size = (kX, kY)
outputs.shape [batch_size, channels, oX, oY].
"""
alias kernel_size = attributes["kernel_size"].value().to_static[2]()
alias padding = attributes["padding"].value().to_static[2]()
alias stride = attributes["stride"].value().to_static[2]()
alias dilation = attributes["dilation"].value().to_static[2]()
alias inputs_strides = input_shape.strides()
alias output_shape = Self.result_shape(input_shape, attributes)
alias outputs_strides = output_shape.strides()
for batch in range(input_shape[0]):
for in_ch in range(input_shape[1]):
for x in range(output_shape[2]):
for y in range(output_shape[3]):
var max_val: Scalar[dtype] = neginf[dtype]()
var ix_base = x * stride[0] - padding[0]
var iy_base = y * stride[1] - padding[1]
for kx in range(kernel_size[0]):
for ky in range(kernel_size[1]):
var ix = ix_base + kx * dilation[0]
var iy = iy_base + ky * dilation[1]
if (
ix < 0
or iy < 0
or ix >= input_shape[2]
or iy >= input_shape[3]
):
continue
var idx = (
batch * inputs_strides[0]
+ in_ch * inputs_strides[1]
+ ix * inputs_strides[2]
+ iy
)
var val = inputs[idx]
if val > max_val:
max_val = val
var out_idx = (
batch * outputs_strides[0]
+ in_ch * outputs_strides[1]
+ x * outputs_strides[2]
+ y
)
outputs[out_idx] = max_val
@staticmethod
fn backward[
ug_shape: TensorShape, input_shape: TensorShape, attributes: AttributeVector
](ug: Tensor[dtype], inputs: Tensor[dtype]) -> Tensor[dtype]:
"""
Backward operation of MAXPOOL2D.
Upper gradient of shape: [batch_size, channels, uX, uY]
"""
alias kernel_size = attributes["kernel_size"].value().to_static[2]()
alias padding = attributes["padding"].value().to_static[2]()
alias stride = attributes["stride"].value().to_static[2]()
alias dilation = attributes["dilation"].value().to_static[2]()
alias ug_strides = ug_shape.strides()
alias inputs_strides = input_shape.strides()
var res = Tensor[dtype](input_shape)
for batch in range(input_shape[0]):
for in_ch in range(input_shape[1]):
for x in range(ug_shape[2]):
for y in range(ug_shape[3]):
var max_val: Scalar[dtype] = neginf[dtype]()
var max_idx: Int = -1
var ix_base = x * stride[0] - padding[0]
var iy_base = y * stride[1] - padding[1]
for kx in range(kernel_size[0]):
for ky in range(kernel_size[1]):
var ix = ix_base + kx * dilation[0]
var iy = iy_base + ky * dilation[1]
if (
ix < 0
or iy < 0
or ix >= input_shape[2]
or iy >= input_shape[3]
):
continue
var idx = (
batch * inputs_strides[0]
+ in_ch * inputs_strides[1]
+ ix * inputs_strides[2]
+ iy
)
var val = inputs[idx]
if val > max_val:
max_val = val
max_idx = idx
var ug_idx = (
batch * ug_strides[0]
+ in_ch * ug_strides[1]
+ x * ug_strides[2]
+ y
)
res[max_idx] += ug[ug_idx]
return res
| basalt/basalt/autograd/ops/pool.mojo | false |
<filename>basalt/basalt/autograd/ops/__init__.mojo
from .ops import (
OP,
static_result_shape,
dynamic_result_shape,
forward_op,
backward_op,
)
| basalt/basalt/autograd/ops/__init__.mojo | false |
<filename>basalt/basalt/nn/activations.mojo
from basalt import Tensor, TensorShape
from basalt import Graph, Symbol, OP
from basalt.autograd.attributes import Attribute, AttributeVector
# '''Activation functions.'''
fn ReLU(inout g: Graph, input: Symbol) -> Symbol:
return g.op(OP.RELU, input)
fn Sigmoid(inout g: Graph, input: Symbol) -> Symbol:
return g.op(OP.SIGMOID, input)
fn Tanh(inout g: Graph, input: Symbol) -> Symbol:
return g.op(OP.TANH, input)
fn Softmax(inout g: Graph, input: Symbol, axis: Int) -> Symbol:
# softmax: exp(x_i) / sum(exp(x_j))
# stable softmax: exp(x_i - max(x_j)) / sum(exp(x_j - max(x_j)))
var max_values = g.op(
OP.MAX, input, attributes=AttributeVector(Attribute("axis", axis))
)
var input_minus_max = g.op(OP.SUB, input, max_values)
var exp_values = g.op(OP.EXP, input_minus_max)
var sum_values = g.op(
OP.SUM, exp_values, attributes=AttributeVector(Attribute("axis", axis))
)
return g.op(OP.DIV, exp_values, sum_values)
fn LogSoftmax(inout g: Graph, input: Symbol, axis: Int) -> Symbol:
# stable logsoftmax: log(exp(x_i - max(x_j)) / sum(exp(x_j - max(x_j))))
# stable logsoftmax: x_i - max(x_j) - log(sum(exp(x_j - max(x_j))))
var max_values = g.op(
OP.MAX, input, attributes=AttributeVector(Attribute("axis", axis))
)
var input_minus_max = g.op(OP.SUB, input, max_values)
var exp_values = g.op(OP.EXP, input_minus_max)
var sum_values = g.op(
OP.SUM, exp_values, attributes=AttributeVector(Attribute("axis", axis))
)
var log_values = g.op(OP.LOG, sum_values)
return g.op(OP.SUB, input_minus_max, log_values)
| basalt/basalt/nn/activations.mojo | false |
from math import sqrt
from basalt import dtype
from basalt import Tensor, TensorShape
from basalt.utils.rand_utils import rand_normal, rand_uniform
fn initialize_tensor(
shape: TensorShape, type: String, data: List[Scalar[dtype]]
) -> Tensor[dtype]:
if type == "random_uniform":
var low = data[0]
var high = data[1]
var t = Tensor[dtype](shape)
rand_uniform(t, low=low, high=high)
return t
elif type == "random_normal":
var mean = data[0].cast[DType.float64]()
var std = data[1].cast[DType.float64]()
var t = Tensor[dtype](shape)
rand_normal(t, mean=mean, std=std)
return t
# elif type == "kaiming_uniform":
# # mode, nonlinearity
# var mode_id = data[0]
# var mode = "fan_in" if mode_id == 0 else "fan_out"
# return kaiming_uniform(shape, mode = mode)
# elif type == "kaiming_normal":
# # mode, nonlinearity
# var mode_id = data[0]
# var mode = "fan_in" if mode_id == 0 else "fan_out"
# return kaiming_normal(shape, mode = mode)
else:
print("[ERROR] Unsupported initialization type: " + type)
return Tensor[dtype]()
fn calculate_fan(shape: TensorShape, mode: String) -> Scalar[dtype]:
"""
Calculate the fan-in and fan-out of any tensor.
"""
# NOTE: shape.rank() should be > 2
# mode: "fan_in" or "fan_out"
if shape.rank() < 2:
print(
"[ERROR] Fan in and fan out can not be calculated for tensor with less than"
" 2 dimensions"
)
var num_input_fmaps = shape[1]
var num_output_fmaps = shape[0]
var receptive_field_size = 1
if shape.rank() > 2:
for i in range(2, shape.rank()):
receptive_field_size *= shape[i]
var fan_in = num_input_fmaps * receptive_field_size
var fan_out = num_output_fmaps * receptive_field_size
if mode == "fan_in":
return fan_in
else:
return fan_out
# # TODO: https://pytorch.org/docs/stable/_modules/torch/nn/init.html
# fn kaiming_uniform(shape: TensorShape, mode: String = "fan_in", nonlinearity: String = "leaky_relu") -> Tensor[dtype]:
# var fan = calculate_fan(shape, mode)
# # TODO: add support for other gains: https://github.com/pytorch/pytorch/blob/main/torch/nn/init.py#L68
# # Gain for linear and conv layers is 1
# var gain = 1
# var std = gain / sqrt(fan)
# # var bound = sqrt(3) * std.cast[dtype]()
# var bound = std.cast[dtype]()
# # print("Shape", shape, "Fan", fan, "Bound", bound)
# var t = Tensor[dtype](shape)
# rand_uniform(t, low = -bound, high = bound)
# return t^
# # TODO: https://pytorch.org/docs/stable/_modules/torch/nn/init.html
# fn kaiming_normal(shape: TensorShape, mode: String = "fan_in", nonlinearity: String = "leaky_relu") -> Tensor[dtype]:
# var fan = calculate_fan(shape, mode)
# # TODO: add support for other gains: https://github.com/pytorch/pytorch/blob/main/torch/nn/init.py#L68
# # Gain for linear and conv layers is 1
# var gain = 1
# var std = gain / sqrt(fan)
# var t = Tensor[dtype](shape)
# rand_normal(t, mean = 0, std = std.cast[DType.float64]())
# return t^
| basalt/basalt/nn/initializers.mojo | false |
<filename>basalt/basalt/nn/loss.mojo
import basalt.nn as nn
from basalt import Tensor, TensorShape
from basalt import Graph, Symbol, OP
fn MSELoss(
inout g: Graph,
y_pred: Symbol,
y_true: Symbol,
) -> Symbol:
# 1/N * sum( (outputs - targets)^2 )
var diff = g.op(OP.SUB, y_true, y_pred)
var loss = g.op(OP.POW, diff, 2)
var mean_loss = g.op(OP.MEAN, loss)
return mean_loss
fn CrossEntropyLoss(
inout g: Graph,
y_pred: Symbol,
y_true: Symbol,
) -> Symbol:
# -1/N * sum( targets * log_softmax(outputs) )
var log_softmax = nn.LogSoftmax(g, y_pred, axis=1)
# CrossEntropy (reduction Mean)
var targets_log_softmax = g.op(OP.MUL, y_true, log_softmax)
var ret = g.op(OP.SUM, targets_log_softmax)
var negDivN = g.op(OP.MUL, ret, -1.0 / y_pred.shape[0])
return negDivN
| basalt/basalt/nn/loss.mojo | false |
<filename>basalt/basalt/nn/model.mojo
from collections.optional import Optional, OptionalReg
from pathlib import Path
from sys import env_get_int
from basalt import Graph, Symbol, Tensor, TensorShape
from basalt.autograd.ops import forward_op, backward_op
from basalt.utils.collection import Collection
from basalt.utils.tensorutils import fill
from .initializers import initialize_tensor
from basalt.utils.perf_utils import PerfMetrics
from basalt.utils.onnx_utils import load_onnx_model, export_onnx_model
# When runing mojo -D DEBUG=1 -I . file, a crash happens at some point at runtime because of an error in linking it seems (because of using -I .)
# For now it seems one has to change this variable manually to be able to run model with performance metrics.
alias DEBUG = env_get_int["DEBUG", 0]()
# TODO: remove when ability to concatenate graphs (modules)
fn dv_contains(dv: List[Symbol], symbol: Symbol) -> Bool:
for i in range(len(dv)):
if dv[i] == symbol:
return True
return False
# TODO: remove when ability to concatenate graphs (modules)
fn n_inference_nodes(g: Graph) -> OptionalReg[Int]:
"""
Calculate the index of the node up to wich the forward pass should be executed for a model inference.
When looping in revers: Equals the first index on which the node output is also a graph output.
The number of inference nodes is that index + 1.
"""
for i in range(len(g.nodes) - 1, -1, -1):
for j in range(len(g.nodes[i].outputs)):
if dv_contains(g.outputs, g.nodes[i].outputs[j]):
return i + 1
return None
@value
struct Parameters:
var tensors: Collection
var grads: Collection
fn __init__(inout self):
self.tensors = Collection()
self.grads = Collection()
struct Model[
g: Graph,
n_inference_nodes: OptionalReg[Int] = n_inference_nodes(g),
]():
var parameters: Parameters
var perf_metrics: PerfMetrics
fn __init__(inout self, inference_only: Bool = False):
self.parameters = Parameters()
@parameter
if DEBUG == 1:
self.perf_metrics = PerfMetrics(g)
else:
self.perf_metrics = PerfMetrics()
self.allocate_tensor_memory()
self.allocate_grad_memory()
# TODO: remove this when ability to concatenate graphs (modules)
# NOTE: inference_only only used for surpressing the warning.
if not inference_only and not g.loss_out:
print("\n\n[WARNING]: No loss defined, model.forward() unavailable!\n\n")
if not n_inference_nodes:
print(
"\n\n[WARNING]: No graph out defined, model.inference()"
" unavailable!\n\n"
)
# TODO: remove when ability to concatenate graphs (modules)
# Removes the need for splitting in forward and inference mode
fn forward(inout self, *t_inputs: Tensor[dtype]) -> Tensor[dtype]:
# NOTE: Important detail here is that the order of the inputs must be the same as the order the inputs were defined in the graph.
# Example: If you were te define the y_true before the x when creating the graph
#
# var g = Graph()
# var y_true = g.input(TensorShape(batch_size, n_outputs))
# var x = g.input(TensorShape(batch_size, n_inputs))
#
# Then the order of the inputs in the forward call must be the same:
#
# model.forward(batch.labels, batch.inputs)
# 1. Execute a full forward pass (model inference + loss)
self.execute[g.nodes.size](t_inputs ^)
# 2. Return loss from allocated output memory
# TODO: known copy (reference?)
return self.parameters.tensors[g.loss_out.value()]
fn inference(inout self, *t_inputs: Tensor[dtype]) -> List[Tensor[dtype]]:
# 1. Execute forward pass up to model out
self.execute[n_inference_nodes.value()](t_inputs)
# 2. Return outputs from allocated output memory
# TODO: known copies (reference?)
var outputs = List[Tensor[dtype]]()
for i in range(len(g.outputs)):
outputs.append(self.parameters.tensors[g.outputs[i]])
return outputs ^
fn execute[num_nodes: Int](inout self, t_input: VariadicListMem[Tensor[dtype]]):
# 1. Write inputs to allocated input memory
for i in range(len(g.inputs)):
self.parameters.tensors[g.inputs[i]] = t_input[i]
# 2. Loop over all nodes and execute forward operations
@parameter
fn fw_unroll[i: Int]():
alias op = g.nodes[i].operator
alias attrs = g.nodes[i].attributes
# Save start time for performance metrics
@parameter
if DEBUG == 1:
self.perf_metrics.start_forward_pass()
@parameter
if op.dynamic:
forward_op[op, attrs](
g.nodes[i].inputs,
g.nodes[i].outputs,
self.parameters,
)
else:
# Statically known shapes and number of operands
alias num_operands = len(g.nodes[i].inputs)
alias t1 = g.nodes[i].inputs[0]
alias out = g.nodes[i].outputs[0]
@parameter
if num_operands == 1:
# Unary operator
forward_op[op, t1.shape, attrs](
self.parameters.tensors[out], self.parameters.tensors[t1]
)
elif num_operands == 2:
# Binary operator
alias t2 = g.nodes[i].inputs[1]
forward_op[op, t1.shape, t2.shape, attrs](
self.parameters.tensors[out],
self.parameters.tensors[t1],
self.parameters.tensors[t2],
)
elif num_operands == 3:
# Ternary operator
alias t2 = g.nodes[i].inputs[1]
alias t3 = g.nodes[i].inputs[2]
forward_op[op, t1.shape, t2.shape, t3.shape, attrs](
self.parameters.tensors[out],
self.parameters.tensors[t1],
self.parameters.tensors[t2],
self.parameters.tensors[t3],
)
# Save end time for performance metrics
@parameter
if DEBUG == 1:
self.perf_metrics.end_forward_pass(i)
unroll[fw_unroll, num_nodes]()
fn backward(inout self, *upper_grads: Tensor[dtype]):
"""
Main entrypoint of backward pass.
"""
# 1. Initialize output gradient at the beginning of the backward pass
if len(upper_grads) == 0:
# TODO remove loss_out tag
fill(self.parameters.grads[g.loss_out.value()], 1.0)
else:
var node_outputs = g.nodes[g.nodes.size - 1].outputs
if len(upper_grads) != node_outputs.size:
print(
"[WARNING] Number of upper grads does not match number of node"
" outputs!"
)
for i in range(node_outputs.size):
self.parameters.grads[node_outputs[i]] = upper_grads[i]
# 2. Loop over all nodes in reverse order and execute backward operations
@parameter
fn bw_unroll[i: Int]():
alias reverse_i = g.nodes.size - i - 1
alias op = g.nodes[reverse_i].operator
alias attrs = g.nodes[reverse_i].attributes
alias num_operands = len(g.nodes[reverse_i].inputs)
# Save start time for performance metrics
@parameter
if DEBUG == 1:
self.perf_metrics.start_backward_pass()
@parameter
if op.dynamic:
@parameter
fn unroll_dynamic[j: Int]():
@parameter
if g.nodes[reverse_i].inputs[j].trainable:
backward_op[j, op, attrs](
g.nodes[reverse_i].inputs,
g.nodes[reverse_i].outputs,
self.parameters.grads[g.nodes[reverse_i].inputs[j]],
self.parameters,
)
unroll[unroll_dynamic, num_operands]()
else:
# Statically known shapes and number of operands
alias out = g.nodes[reverse_i].outputs[0] # or upper_grad symbol
alias t1 = g.nodes[reverse_i].inputs[0]
@parameter
if num_operands == 1:
# Unary operator
@parameter
if t1.trainable:
backward_op[0, op, out.shape, t1.shape, attrs](
self.parameters.grads[out],
self.parameters.tensors[t1],
self.parameters.grads[t1], # grad to be updated: inputs[0]
)
elif num_operands == 2:
# Binary operator
alias t2 = g.nodes[reverse_i].inputs[1]
@parameter
if t1.trainable:
backward_op[0, op, out.shape, t1.shape, t2.shape, attrs](
self.parameters.grads[out],
self.parameters.tensors[t1],
self.parameters.tensors[t2],
self.parameters.grads[t1], # grad to be updated: inputs[0]
)
@parameter
if t2.trainable:
backward_op[1, op, out.shape, t1.shape, t2.shape, attrs](
self.parameters.grads[out],
self.parameters.tensors[t1],
self.parameters.tensors[t2],
self.parameters.grads[t2], # grad to be updated: inputs[1]
)
elif num_operands == 3:
# Ternary operator
alias t2 = g.nodes[reverse_i].inputs[1]
alias t3 = g.nodes[reverse_i].inputs[2]
@parameter
if t1.trainable:
backward_op[
0, op, out.shape, t1.shape, t2.shape, t3.shape, attrs
](
self.parameters.grads[out],
self.parameters.tensors[t1],
self.parameters.tensors[t2],
self.parameters.tensors[t3],
self.parameters.grads[t1], # grad to be updated: inputs[0]
)
@parameter
if t2.trainable:
backward_op[
1, op, out.shape, t1.shape, t2.shape, t3.shape, attrs
](
self.parameters.grads[out],
self.parameters.tensors[t1],
self.parameters.tensors[t2],
self.parameters.tensors[t3],
self.parameters.grads[t2], # grad to be updated: inputs[1]
)
@parameter
if t3.trainable:
backward_op[
2, op, out.shape, t1.shape, t2.shape, t3.shape, attrs
](
self.parameters.grads[out],
self.parameters.tensors[t1],
self.parameters.tensors[t2],
self.parameters.tensors[t3],
self.parameters.grads[t3], # grad to be updated: inputs[2]
)
# Save end time for performance metrics
@parameter
if DEBUG == 1:
self.perf_metrics.end_backward_pass(i)
unroll[bw_unroll, g.nodes.size]()
fn allocate_tensor_memory(inout self):
for i in range(len(g.inputs)):
self.parameters.tensors.append(
Tensor[dtype](g.inputs[i].shape), g.inputs[i]
)
for i in range(len(g.params)):
var p = g.params.symbols[i]
var p_init = g.params.values[i]
var par: Tensor[dtype]
if p_init.initializer:
# 1. Specific parameter initialization defined
var initializer_attr = p_init.initializer.value()[]
par = initialize_tensor(
shape=p.shape,
type=initializer_attr.to_string(),
data=p_init.data.value()[],
)
elif p_init.data:
# 2. Parameter initialized with data only
# Data is assumed to contain the tensor
par = g.params.get_tensor(i)
else:
# Default parameter initialization to zero
par = Tensor[dtype](p.shape)
self.parameters.tensors.append(par ^, p)
for i in range(len(g.nodes)):
# Assumption: An input or a param cannot be an output of a node
for j in range(len(g.nodes[i].outputs)):
self.parameters.tensors.append(
Tensor[dtype](g.nodes[i].outputs[j].shape), g.nodes[i].outputs[j]
)
fn allocate_grad_memory(inout self):
# Gradient have same shape as the tensor
for i in range(len(g.inputs)):
if g.inputs[i].trainable:
self.parameters.grads.append(
Tensor[dtype](g.inputs[i].shape), g.inputs[i]
)
for i in range(len(g.params)):
var grad = g.params.symbols[i]
if grad.trainable:
self.parameters.grads.append(Tensor[dtype](grad.shape), grad)
for i in range(len(g.nodes)):
for j in range(len(g.nodes[i].outputs)):
var out = g.nodes[i].outputs[j]
if out.trainable:
self.parameters.grads.append(Tensor[dtype](out.shape), out)
fn print_perf_metrics(self, time_format: String = "ns", print_shape: Bool = False):
self.perf_metrics.print_forward_perf_metrics(time_format, print_shape)
self.perf_metrics.print_backward_perf_metrics(time_format, print_shape)
fn load_model_data(inout self, model_path: String):
var path = Path(model_path)
print("Loading model data from:", path)
try:
if path.suffix() == ".onnx":
load_onnx_model(model_path, self.parameters, self.g)
else:
print("Model file format not supported:", path.suffix())
except e:
print("Error loading model data:", e)
fn export_model(self, model_path: String):
var path = Path(model_path)
print("Exporting model to:", path)
try:
if path.suffix() == ".onnx":
export_onnx_model(model_path, self.parameters, self.g)
else:
print("Model file format not supported:", path.suffix())
except e:
print("Error exporting model:", e) | basalt/basalt/nn/model.mojo | false |
<filename>basalt/basalt/nn/optim.mojo
from math import add, mul, div, sqrt, sub
from algorithm import vectorize, parallelize
from .model import Parameters
from basalt import Graph, Tensor, TensorShape
from basalt.utils.collection import Collection
fn get_trainable_parameters(g: Graph) -> List[Symbol]:
"""
Get all symbols of trainable parameters.
"""
var trainable_parameters = List[Symbol]()
for i in range(len(g.params)):
if g.params.symbols[i].trainable:
trainable_parameters.append(g.params.symbols[i])
return trainable_parameters ^
struct Adam[
g: Graph,
mutability: __mlir_type.i1,
lifetime: AnyLifetime[mutability].type,
trainable_parameters: List[Symbol] = get_trainable_parameters(g),
]:
var parameters: Reference[Parameters, mutability, lifetime]
var lr: Scalar[dtype]
var beta1: Scalar[dtype]
var beta2: Scalar[dtype]
var epsilon: Scalar[dtype]
var iter: Int
var rms_grads: Collection
var momentum_grads: Collection
fn __init__(
inout self,
parameters: Reference[Parameters, mutability, lifetime],
lr: Scalar[dtype] = 0.001,
beta1: Scalar[dtype] = 0.9,
beta2: Scalar[dtype] = 0.999,
epsilon: Scalar[dtype] = 1e-8,
):
self.parameters = parameters
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.iter = 0
# Capacity of the collections should be the n of trainable parameters
self.rms_grads = Collection(capacity=len(trainable_parameters))
self.momentum_grads = Collection(capacity=len(trainable_parameters))
self.allocate_rms_and_momentum()
fn zero_grad(inout self):
"""Set all gradients to zero."""
self.parameters[].grads.set_zero()
fn step(inout self):
"""Update model parameters."""
self.iter += 1
# Loop over all trainable parameters
@parameter
fn p_step(i: Int):
var param = trainable_parameters[i]
@parameter
fn v_step[nelts: Int](j: Int):
var momentum_grads = self.momentum_grads[param].load[nelts](j)
var rms_grads = self.rms_grads[param].load[nelts](j)
var grads = self.parameters[].grads[param].load[nelts](j)
var params = self.parameters[].tensors[param].load[nelts](j)
# Momentum beta 1
# f1 = beta1 * momentum + (1 - beta1) * grad
momentum_grads = self.beta1 * momentum_grads + (1 - self.beta1) * grads
self.momentum_grads[param].store[nelts](j, momentum_grads)
# Bias correction
# f2 = f1 / (1 - beta1 ** iter)
momentum_grads = momentum_grads / (1 - self.beta1**self.iter)
# RMS beta 2
# f1 = beta2 * rms + (1 - beta2) * grad ** 2
rms_grads = self.beta2 * rms_grads + (1 - self.beta2) * grads * grads
self.rms_grads[param].store[nelts](j, rms_grads)
# Bias correction
# f2 = f1 / (1 - beta2 ** iter)
rms_grads = rms_grads / (1 - self.beta2**self.iter)
# tensor = tensor - lr * (f2 / (sqrt(rms) + epsilon))
params = params - self.lr * (
momentum_grads / (sqrt(rms_grads) + self.epsilon)
)
self.parameters[].tensors[param].store[nelts](j, params)
vectorize[v_step, 1](param.shape.num_elements())
parallelize[p_step](len(trainable_parameters))
fn allocate_rms_and_momentum(inout self):
# They are initialized to zero
# Loop over all trainable parameters
for i in range(len(trainable_parameters)):
var param = trainable_parameters[i]
self.rms_grads.append(Tensor[dtype](param.shape), param)
self.momentum_grads.append(Tensor[dtype](param.shape), param)
| basalt/basalt/nn/optim.mojo | false |
from math import min
from testing import assert_true
from algorithm import vectorize
from tensor import Tensor as _Tensor
from tensor import TensorShape as _TensorShape
alias MAX_RANK = 8
@register_passable("trivial")
struct TensorShape(Stringable):
var _rank: Int
var _shape: StaticIntTuple[MAX_RANK]
@always_inline("nodebug")
fn __init__(inout self, *shape: Int):
self._rank = len(shape)
self._shape = StaticIntTuple[MAX_RANK]()
for i in range(min(self._rank, MAX_RANK)):
self._shape[i] = shape[i]
@always_inline("nodebug")
fn __init__(inout self, shapes: VariadicList[Int]):
self._rank = len(shapes)
self._shape = StaticIntTuple[MAX_RANK]()
for i in range(min(self._rank, MAX_RANK)):
self._shape[i] = shapes[i]
@always_inline("nodebug")
fn __init__(inout self, shape: List[Int]):
self._rank = len(shape)
self._shape = StaticIntTuple[MAX_RANK]()
for i in range(min(self._rank, MAX_RANK)):
self._shape[i] = shape[i]
@always_inline("nodebug")
fn __init__[num: Int](inout self, shape: StaticIntTuple[num]):
self._rank = num
self._shape = StaticIntTuple[MAX_RANK]()
for i in range(min(self._rank, MAX_RANK)):
self._shape[i] = shape[i]
@always_inline("nodebug")
fn __init__(inout self, rank: Int, shape: StaticIntTuple[MAX_RANK]):
self._rank = rank
self._shape = shape
@always_inline("nodebug")
fn __init__(inout self, owned shape: _TensorShape):
self._rank = shape.rank()
self._shape = StaticIntTuple[MAX_RANK]()
for i in range(min(self._rank, MAX_RANK)):
self._shape[i] = shape[i]
@always_inline("nodebug")
fn __getitem__(self, index: Int) -> Int:
return self._shape[index if index >= 0 else self._rank + index]
@always_inline("nodebug")
fn __setitem__(inout self, index: Int, value: Int):
self._shape[index if index >= 0 else self._rank + index] = value
@always_inline("nodebug")
fn rank(self) -> Int:
return self._rank
@always_inline("nodebug")
fn num_elements(self) -> Int:
var result = 1
for i in range(self._rank):
result *= self._shape[i]
return result
@always_inline("nodebug")
fn strides(self) -> StaticIntTuple[MAX_RANK]:
var result = StaticIntTuple[MAX_RANK](0)
result[self._rank - 1] = 1
for i in range(self._rank - 2, -1, -1):
result[i] = result[i + 1] * self._shape[i + 1]
return result
@always_inline("nodebug")
fn _std_shape(self) -> _TensorShape:
var s = List[Int](capacity=self.rank())
for i in range(self.rank()):
s.append(self[i])
return _TensorShape(s)
@always_inline("nodebug")
fn __str__(self) -> String:
return str(self._std_shape())
@always_inline("nodebug")
fn __eq__(self, other: TensorShape) -> Bool:
if self.rank() != other.rank():
return False
for i in range(self.rank()):
if self[i] != other[i]:
return False
return True
@always_inline("nodebug")
fn __ne__(self, other: TensorShape) -> Bool:
return not self.__eq__(other)
@always_inline("nodebug")
fn __contains__(self, value: Int) -> Bool:
for i in range(self.rank()):
if self[i] == value:
return True
return False
struct Tensor[dtype: DType](Stringable, Movable, CollectionElement):
var _data: DTypePointer[dtype]
var _shape: TensorShape
@always_inline("nodebug")
fn __init__(inout self, *dims: Int):
self._shape = TensorShape(dims)
self._data = DTypePointer[dtype].alloc(self._shape.num_elements())
memset_zero(self._data, self._shape.num_elements())
@always_inline("nodebug")
fn __init__(inout self, owned shape: TensorShape):
self._data = DTypePointer[dtype].alloc(shape.num_elements())
memset_zero(self._data, shape.num_elements())
self._shape = shape
@always_inline("nodebug")
fn __init__(
inout self, owned data: DTypePointer[dtype], owned shape: TensorShape
):
# NOTE: Remember to use _ = your_tensor that you passed, so there is no weird behavior in this function
self._data = DTypePointer[dtype].alloc(shape.num_elements())
self._shape = shape
memcpy(self._data, data, self._shape.num_elements())
_ = data
@always_inline("nodebug")
fn __init__(inout self, owned tensor: _Tensor[dtype]):
self._data = DTypePointer[dtype].alloc(tensor.num_elements())
self._shape = tensor.shape()
memcpy(self._data, tensor.data(), self._shape.num_elements())
_ = tensor
@always_inline("nodebug")
fn __moveinit__(inout self, owned other: Tensor[dtype]):
self._data = other._data
self._shape = other._shape
@always_inline("nodebug")
fn __copyinit__(inout self, other: Tensor[dtype]):
# print("[WARNING] Copying tensor")
self._data = DTypePointer[dtype].alloc(other._shape.num_elements())
memcpy(self._data, other._data, other.num_elements())
self._shape = other._shape
@always_inline("nodebug")
fn __getitem__(self, index: Int) -> Scalar[dtype]:
return self._data[index]
@always_inline("nodebug")
fn __setitem__(self, index: Int, value: Scalar[dtype]):
self._data[index] = value
@always_inline("nodebug")
fn data(self) -> DTypePointer[dtype]:
return self._data
@always_inline("nodebug")
fn shape(self) -> TensorShape:
return self._shape
@always_inline("nodebug")
fn load[simd_width: Int](self, index: Int) -> SIMD[dtype, simd_width]:
return self._data.load[width=simd_width](index)
@always_inline("nodebug")
fn store[simd_width: Int](self, index: Int, value: SIMD[dtype, simd_width]):
self._data.store[width=simd_width](index, value)
@always_inline("nodebug")
fn strides(self) -> StaticIntTuple[MAX_RANK]:
return self._shape.strides()
@always_inline("nodebug")
fn rank(self) -> Int:
return self._shape.rank()
@always_inline("nodebug")
fn num_elements(self) -> Int:
return self._shape.num_elements()
@always_inline("nodebug")
fn dim(self, index: Int) -> Int:
return self._shape[index]
@always_inline("nodebug")
fn zero(self):
memset_zero(self._data, self.num_elements())
@always_inline("nodebug")
fn ireshape(inout self, new_shape: TensorShape) raises:
# NOTE Consider not raising on error
assert_true(self.num_elements() == new_shape.num_elements())
self._shape = new_shape
@always_inline("nodebug")
fn __str__(self) -> String:
var new_data = DTypePointer[dtype].alloc(self.num_elements())
var std_shape = self._shape._std_shape()
memcpy(new_data, self._data, self.num_elements())
return str(_Tensor[dtype](ptr=new_data, shape=std_shape))
@always_inline("nodebug")
fn __del__(owned self):
self._data.free()
| basalt/basalt/nn/tensor.mojo | false |
<filename>basalt/basalt/nn/__init__.mojo
from .tensor import Tensor, TensorShape
from .model import Model
from .layers.linear import Linear
from .layers.conv import Conv2d
from .layers.pool import MaxPool2d
from .loss import MSELoss, CrossEntropyLoss
from .activations import Softmax, LogSoftmax, ReLU, Sigmoid, Tanh
| basalt/basalt/nn/__init__.mojo | false |
from basalt import Graph, Symbol, OP
from basalt import Tensor, TensorShape
from basalt.utils import q_sqrt
from basalt.autograd.params import Param
from basalt.autograd.attributes import AttributeVector, Attribute
fn Conv2d(
inout g: Graph,
inputs: Symbol,
out_channels: Int,
kernel_size: StaticIntTuple[2],
padding: StaticIntTuple[2] = 0,
stride: StaticIntTuple[2] = 1,
dilation: StaticIntTuple[2] = 1,
) -> Symbol:
"""
A 2D Convolution Layer.
Parameters
inputs.shape [batch, in_channels, iX, iY]
kernel.shape [out_channels, in_channels, kX, kY] (or weights)
bias.shape [out_channels].
output.shape [batch, out_channels, oX, oY].
"""
var in_channels: Int = inputs.shape[1]
var fan_in: Scalar[dtype] = in_channels * kernel_size[0] * kernel_size[1]
var bound = q_sqrt(fan_in)
var weights = g.param(
TensorShape(out_channels, in_channels, kernel_size[0], kernel_size[1]),
init=Param("random_uniform", -bound, bound)
# init=Param("kaiming_uniform", 0)
)
var bias = g.param(
TensorShape(out_channels), init=Param("random_uniform", -bound, bound)
)
return g.op(
OP.CONV2D,
inputs,
weights,
bias,
attributes=AttributeVector(
Attribute("padding", padding),
Attribute("stride", stride),
Attribute("dilation", dilation),
),
)
| basalt/basalt/nn/layers/conv.mojo | false |
<filename>basalt/basalt/nn/layers/linear.mojo
from basalt import Tensor, TensorShape
from basalt import Graph, Symbol, OP
from basalt.utils import q_sqrt
from basalt.autograd.params import Param
fn Linear(
inout g: Graph,
inputs: Symbol,
n_outputs: Int,
) -> Symbol:
"""
A fully connected layer.
"""
var fan_in: Scalar[dtype] = inputs.shape[1]
var bound = q_sqrt(fan_in)
var weights = g.param(
TensorShape(inputs.shape[1], n_outputs),
init=Param("random_uniform", -bound, bound)
# init=Param("random_uniform", 1) # NOTE: mode: fan_out required as weight are defined transposed
)
var b = g.param(TensorShape(n_outputs), init=Param("random_uniform", -bound, bound))
var res = g.op(OP.DOT, inputs, weights)
return g.op(OP.ADD, res, b)
| basalt/basalt/nn/layers/linear.mojo | false |
from basalt import Tensor, TensorShape
from collections.optional import Optional
from basalt import Graph, Symbol, OP
from basalt.autograd.attributes import AttributeVector, Attribute
fn set_static_stride(
kernel_size: StaticIntTuple[2], stride: Optional[Int] = None
) -> StaticIntTuple[2]:
if stride:
return StaticIntTuple[2](stride.value()[], stride.value()[])
else:
return kernel_size
fn MaxPool2d(
inout g: Graph,
inputs: Symbol,
kernel_size: StaticIntTuple[2],
stride: Optional[Int] = None,
padding: StaticIntTuple[2] = 0,
dilation: StaticIntTuple[2] = 1,
) -> Symbol:
"""
A 2D Max Pooling Layer.
Kernel is unaware of the in_channels and out_channels of the input tensor.
kernel.size (kX, kY)
"""
# TODO: assert padding <= kernel_size / 2 (at compile time)
var stride_temp = set_static_stride(kernel_size, stride)
return MaxPool2d(g, inputs, kernel_size, stride_temp, padding, dilation)
fn MaxPool2d(
inout g: Graph,
inputs: Symbol,
kernel_size: StaticIntTuple[2],
stride: StaticIntTuple[2], # stride should be 1 or more
padding: StaticIntTuple[2] = 0,
dilation: StaticIntTuple[2] = 1,
) -> Symbol:
"""
A 2D Max Pooling Layer.
Kernel is unaware of the in_channels and out_channels of the input tensor.
kernel.size (kX, kY)
"""
# TODO: assert padding <= kernel_size / 2 (at compile time)
return g.op(
OP.MAXPOOL2D,
inputs,
attributes=AttributeVector(
Attribute("kernel_size", kernel_size),
Attribute("padding", padding),
Attribute("stride", stride),
Attribute("dilation", dilation),
),
)
# # TODO
| basalt/basalt/nn/layers/pool.mojo | false |
from math import nan
from math.limit import inf
alias ScalarBytes = DType.uint64.sizeof()
@register_passable("trivial")
struct Bytes[capacity: Int](Stringable, CollectionElement, EqualityComparable):
"""
Static sequence of bytes.
"""
var data: StaticTuple[UInt8, capacity]
@always_inline("nodebug")
fn __init__(inout self):
var data = StaticTuple[UInt8, capacity]()
@unroll
for i in range(capacity):
data[i] = 0
self.data = data
@always_inline("nodebug")
fn __init__(inout self, s: String):
var data = StaticTuple[UInt8, capacity]()
var length = len(s)
@unroll
for i in range(capacity):
data[i] = ord(s[i]) if i < length else 0
self.data = data
@always_inline("nodebug")
fn __len__(self) -> Int:
return capacity
@always_inline("nodebug")
fn __setitem__(inout self, index: Int, value: UInt8):
self.data[index] = value
@always_inline("nodebug")
fn __getitem__(self, index: Int) -> UInt8:
return self.data[index]
@always_inline("nodebug")
fn __eq__(self, other: Self) -> Bool:
@unroll
for i in range(capacity):
if self[i] != other[i]:
return False
return True
@always_inline("nodebug")
fn __ne__(self, other: Self) -> Bool:
@unroll
for i in range(capacity):
if self[i] != other[i]:
return True
return False
@always_inline("nodebug")
fn __str__(self) -> String:
var result: String = ""
@unroll
for i in range(capacity):
var val = self[i]
if val != 0:
result += chr(int(val))
return result
fn scalar_to_bytes[
dtype: DType, Size: Int = ScalarBytes
](value: Scalar[dtype]) -> Bytes[Size]:
constrained[Size >= ScalarBytes, "Size must be at least ${ScalarBytes}"]()
var bits = bitcast[DType.uint64](value.cast[expand_type[dtype]()]())
var data = Bytes[Size]()
@unroll
for i in range(ScalarBytes):
data[i] = (bits >> (i << 3)).cast[DType.uint8]()
return data
fn bytes_to_scalar[dtype: DType](data: Bytes) -> Scalar[dtype]:
constrained[data.capacity >= ScalarBytes, "Size must be at least ${ScalarBytes}"]()
var bits: UInt64 = 0
@unroll
for i in range(ScalarBytes):
bits |= data[i].cast[DType.uint64]() << (i << 3)
return bitcast[expand_type[dtype]()](bits).cast[dtype]()
fn expand_type[dtype: DType]() -> DType:
@parameter
if dtype.is_floating_point():
return DType.float64
elif dtype.is_signed():
return DType.int64
elif dtype.is_integral():
return DType.uint64
constrained[False, "Type must be numeric"]()
return DType.invalid
| basalt/basalt/utils/bytes.mojo | false |
<filename>basalt/basalt/utils/collection.mojo
from math import max, divmod
from memory.unsafe_pointer import UnsafePointer, initialize_pointee_move, destroy_pointee
from basalt import Tensor, Symbol
struct Collection(CollectionElement, Sized):
"""
A collection of tensors with associated symbols.
"""
var size: Int
var capacity: Int
var data: UnsafePointer[Tensor[dtype]]
var symbols: DTypePointer[DType.uint32]
@always_inline("nodebug")
fn __init__(inout self, *, capacity: Int = 0):
"""
Initializes a new Collection with the given capacity.
"""
self.size = 0
self.capacity = capacity
self.data = UnsafePointer[Tensor[dtype]].alloc(capacity)
self.symbols = DTypePointer[DType.uint32].alloc(capacity)
@always_inline("nodebug")
fn __moveinit__(inout self, owned existing: Self):
"""
Move initializes a Collection from an existing one.
"""
self.size = existing.size
self.capacity = existing.capacity
self.data = existing.data
self.symbols = existing.symbols
@always_inline("nodebug")
fn __copyinit__(inout self, existing: Self):
"""
Copy initializes a Collection from an existing one.
"""
self.capacity = existing.capacity
self.size = existing.size
self.data = UnsafePointer[Tensor[dtype]].alloc(existing.capacity)
self.symbols = DTypePointer[DType.uint32].alloc(existing.capacity)
memcpy(self.symbols, existing.symbols, existing.capacity)
for i in range(existing.size):
initialize_pointee_move((self.data + i), (existing.data + i)[])
@always_inline("nodebug")
fn __del__(owned self):
"""
Destructor for the Collection.
"""
for i in range(self.size):
destroy_pointee((self.data + i))
if self.data:
self.data.free()
if self.symbols:
self.symbols.free()
@always_inline("nodebug")
fn __len__(self) -> Int:
"""
Returns the number of elements in the Collection.
"""
return self.size
@always_inline("nodebug")
fn _realloc(inout self, new_capacity: Int):
"""
Reallocates the Collection to the new capacity.
"""
var new_data = UnsafePointer[Tensor[dtype]].alloc(new_capacity)
var new_symbols = DTypePointer[DType.uint32].alloc(new_capacity)
for i in range(self.size):
initialize_pointee_move((new_data + i), (self.data + i)[])
new_symbols[i] = self.symbols[i]
self.data.free()
self.symbols.free()
self.data = new_data
self.symbols = new_symbols
self.capacity = new_capacity
@always_inline("nodebug")
fn append(inout self, owned value: Tensor[dtype], symbol: Symbol):
"""
Appends a tensor and its associated symbol to the Collection.
"""
self.append(value ^, symbol.name)
@always_inline("nodebug")
fn append(inout self, owned value: Tensor[dtype], symbol_name: UInt32):
"""
Appends a tensor and its associated symbol name to the Collection.
"""
if self.size >= self.capacity:
self._realloc(max(1, self.capacity * 2))
initialize_pointee_move((self.data + self.size), value ^)
self.symbols[self.size] = symbol_name
self.size += 1
@always_inline("nodebug")
fn get_index(self, symbol_name: UInt32) -> Int:
"""
Returns the index of the tensor with the given symbol name.
"""
alias factor = 8
# 2 -> 5.32s MNIST
# 4 -> 4.95s MNIST
# 8 -> 4.85s MNIST
# 16 -> 5.19s MNIST
# NOTE: This ideally should just be a hashmap
for i in range(0, self.size, factor):
var elems = self.symbols.load[width=factor](i) == symbol_name
for j in range(factor):
if elems[j]:
return i + j
var split = divmod(self.size, factor)
for i in range(split[1]):
var index = split[0] + i
if self.symbols[index] == symbol_name:
return index
return -1
@always_inline("nodebug")
fn __refitem__[
mutability: __mlir_type.i1,
lifetime: AnyLifetime[mutability].type,
](
self: Reference[Self, mutability, lifetime]._mlir_type,
symbol: Symbol,
) -> Reference[Tensor[dtype], mutability, lifetime]:
"""
Returns a reference to the tensor with the given symbol.
"""
var index = Reference(self)[].get_index(symbol.name)
return (Reference(self)[].data + index)[]
@always_inline("nodebug")
fn clear(inout self):
"""
Clears the Collection, removing all tensors and symbols.
"""
for i in range(self.size):
destroy_pointee((self.data + i))
memset_zero(self.symbols, self.capacity)
self.size = 0
@always_inline("nodebug")
fn set_zero(self):
"""
Zeroes out all the tensors in the collection.
"""
for i in range(self.size):
self.data[i].zero()
| basalt/basalt/utils/collection.mojo | false |
from testing import assert_equal
from math import min
from memory import memcpy
from basalt import dtype, nelts
from basalt import Tensor, TensorShape
@value
struct Batch[dtype: DType](CollectionElement):
var data: Tensor[dtype]
var labels: Tensor[dtype]
fn __init__(inout self, batch_data: Tensor[dtype], batch_labels: Tensor[dtype]):
self.data = batch_data
self.labels = batch_labels
fn __init__(
inout self,
df_data: Tensor[dtype],
df_labels: Tensor[dtype],
start: Int,
batch_data_shape: TensorShape,
batch_labels_shape: TensorShape,
):
# TODO: find a better way to do this
# Links to the copies of the input tensors in model.forward()
self.data = Tensor[dtype](batch_data_shape)
self.labels = Tensor[dtype](batch_labels_shape)
memcpy(
self.data.data(),
df_data.data().offset(start * batch_data_shape.strides()[0]),
batch_data_shape.num_elements(),
)
memcpy(
self.labels.data(),
df_labels.data().offset(start * batch_labels_shape.strides()[0]),
batch_labels_shape.num_elements(),
)
fn __getitem__(self, index: Int) -> Tensor[dtype]:
if index == 0:
return self.data
elif index == 1:
return self.labels
else:
print("[ERROR] Batch.__getitem__(): Index out of bounds")
return Tensor[dtype]()
@value
struct DataLoader:
var data: Tensor[dtype]
var labels: Tensor[dtype]
var batch_size: Int
var _current_index: Int
var _num_batches: Int
var _data_batch_shape: TensorShape
var _label_batch_shape: TensorShape
fn __init__(
inout self,
data: Tensor[dtype],
labels: Tensor[dtype],
batch_size: Int,
):
self.data = data
self.labels = labels
self.batch_size = batch_size
# Number of batches to iter, NOTE: ignore the remainder for now
# var remainder = 1 if self.data.dim(0) % self.batch_size != 0 else 0
self._current_index = 0
self._num_batches = self.data.dim(0) // self.batch_size # + remainder
# Batch shapes
self._data_batch_shape = self.data.shape()
self._label_batch_shape = self.labels.shape()
self._data_batch_shape[0] = self.batch_size
self._label_batch_shape[0] = self.batch_size
@always_inline
fn __len__(self) -> Int:
"""
Returns the number of the batches left in the dataset.
"""
return self._num_batches
fn __iter__(self) -> Self:
# TODO: Starting the iterator requires to return (COPY!) the whole dataloader which containts the whole dataset
# Does this mean that the whole dataset is copied every epoch ?!
return self
fn __next__(inout self) -> Batch[dtype]:
# NOTE: ignore the remainder for now
# var end = min(self._current_index + self.batch_size, self.data.dim(0))
# self._data_shape[0] = end - self._current_index
# self._label_shape[0] = end - self._current_index
var temp_current_index = self._current_index
self._current_index += self.batch_size
self._num_batches -= 1
return Batch[dtype](
self.data,
self.labels,
temp_current_index,
self._data_batch_shape,
self._label_batch_shape,
)
| basalt/basalt/utils/dataloader.mojo | false |
from algorithm import vectorize
from math import div
from basalt import dtype
from basalt import Tensor, TensorShape
from basalt.utils.tensorutils import elwise_op, tmean, tstd
struct BostonHousing:
alias n_inputs = 13
var data: Tensor[dtype]
var labels: Tensor[dtype]
fn __init__(inout self, file_path: String) raises:
var s = read_file(file_path)
# Skip the first and last lines
# This does assume your last line in the file has a newline at the end
var list_of_lines = s.split("\n")[1:-1]
# Length is number of lines
var N = len(list_of_lines)
self.data = Tensor[dtype](N, self.n_inputs) # All columns except the last one
self.labels = Tensor[dtype](N, 1) # Only the last column (MEDV)
var line: List[String] = List[String]()
# Load data in Tensor
for item in range(N):
line = list_of_lines[item].split(",")
self.labels[item] = cast_string[dtype](line[-1])
for n in range(self.n_inputs):
self.data[item * self.n_inputs + n] = cast_string[dtype](line[n])
# Normalize data
# TODO: redo when tensorutils tmean2 and tstd2 are implemented
alias nelts = simdwidthof[dtype]()
var col = Tensor[dtype](N)
for j in range(self.n_inputs):
for k in range(N):
col[k] = self.data[k * self.n_inputs + j]
for i in range(N):
self.data[i * self.n_inputs + j] = (self.data[i * self.n_inputs + j] - tmean(col)) / tstd(col)
struct MNIST:
var data: Tensor[dtype]
var labels: Tensor[dtype]
fn __init__(inout self, file_path: String) raises:
var s = read_file(file_path)
# Skip the first and last lines
# This does assume your last line in the file has a newline at the end
var list_of_lines = s.split("\n")[1:-1]
# Length is number of lines
var N = len(list_of_lines)
self.data = Tensor[dtype](N, 1, 28, 28)
self.labels = Tensor[dtype](N)
var line: List[String] = List[String]()
# Load data in Tensor
for item in range(N):
line = list_of_lines[item].split(",")
self.labels[item] = atol(line[0])
for i in range(self.data.shape()[2]):
for j in range(self.data.shape()[3]):
self.data[item * 28 * 28 + i * 28 + j] = atol(line[i * 28 + j + 1])
# Normalize data
alias nelts = simdwidthof[dtype]()
@parameter
fn vecdiv[nelts: Int](idx: Int):
self.data.store[nelts](idx, div(self.data.load[nelts](idx), 255.0))
vectorize[vecdiv, nelts](self.data.num_elements())
fn read_file(file_path: String) raises -> String:
var s: String
with open(file_path, "r") as f:
s = f.read()
return s
fn find_first(s: String, delimiter: String) -> Int:
for i in range(len(s)):
if s[i] == delimiter:
return i
return -1
fn cast_string[dtype: DType](s: String) raises -> Scalar[dtype]:
"""
Cast a string with decimal to a SIMD vector of dtype.
"""
var idx = find_first(s, delimiter=".")
var x: Scalar[dtype] = -1
if idx == -1:
# No decimal point
x = atol(s)
return x
else:
var c_int: Scalar[dtype]
var c_frac: Scalar[dtype]
c_int = atol(s[:idx])
c_frac = atol(s[idx + 1 :])
x = c_int + c_frac / (10 ** len(s[idx + 1 :]))
return x
| basalt/basalt/utils/datasets.mojo | false |
from python import Python
from pathlib import Path
from collections import Set
from basalt.nn.model import Parameters
from basalt.nn.tensor import Tensor, TensorShape
from basalt.autograd.attributes import Attribute, AttributeType
from basalt.autograd.ops import OP
from basalt.autograd.graph import Node
from .tensor_creation_utils import to_numpy, copy_np_data
# NOTE: Maybe we could create our own model representation and from there convert to onnx or others (well we already have it in reallity)
# NOTE: Torch doesn't import onnx, need onnx2torch and it doesn't support operators like reshape?
fn make_onnx_attribute(op: OP, attr: Attribute) raises -> PythonObject:
var onnx = Python.import_module("onnx")
var onnx_helper = Python.import_module("onnx.helper")
var attr_name = str(attr.name)
var attr_value: PythonObject
if attr.type == AttributeType.FLOAT:
attr_value = attr.to_scalar[DType.float64]()
elif attr.type == AttributeType.INT:
attr_value = attr.to_int()
elif attr.type == AttributeType.STRING:
attr_value = attr.to_string()
elif attr.type == AttributeType.INTS:
var temp = attr.to_shape()
var shape = PythonObject([])
for i in range(temp.rank()):
shape.append(temp[i])
attr_value = shape
else:
raise Error("Unsupported attribute type")
if op == OP.CONV2D or op == OP.MAXPOOL2D:
if attr_name == "dilation":
attr_name = "dilations"
elif attr_name == "kernel_size":
attr_name = "kernel_shape"
elif attr_name == "stride":
attr_name = "strides"
elif attr_name == "padding":
attr_name = "pads"
else:
raise Error("Unsupported attribute name for operator " + str(op))
if (op == OP.CONV2D or op == OP.MAXPOOL2D) and attr_name == "pads":
# Special case for pads in conv and maxpool, onnx wants pads to be [x1_begin, x2_begin…x1_end, x2_end,…],
attr_value.append(attr_value[0])
attr_value.append(attr_value[1])
return onnx_helper.make_attribute(attr_name, attr_value)
fn make_onnx_operator_type(op_type: OP) raises -> String:
if op_type == OP.ADD:
return "Add"
elif op_type == OP.SUB:
return "Sub"
elif op_type == OP.MUL:
return "Mul"
elif op_type == OP.DOT:
return "MatMul"
elif op_type == OP.DIV:
return "Div"
elif op_type == OP.EXP:
return "Exp"
elif op_type == OP.LOG:
return "Log"
elif op_type == OP.SUM:
# Special case, axis isn't an attribute, instead it is an input, because it can be dynamic
raise Error(str(op_type) + " is not supported right now for conversion to onnx")
# return "ReduceSum"
elif op_type == OP.MEAN:
raise Error(str(op_type) + " is not supported right now for conversion to onnx")
# return "ReduceMean"
elif op_type == OP.MAX:
raise Error(str(op_type) + " is not supported right now for conversion to onnx")
# return "ReduceMax"
elif op_type == OP.CONV2D:
return "Conv"
elif op_type == OP.MAXPOOL2D:
return "MaxPool"
elif op_type == OP.RELU:
return "Relu"
elif op_type == OP.TANH:
return "Tanh"
elif op_type == OP.SIGMOID:
return "Sigmoid"
elif op_type == OP.RESHAPE:
return "Reshape"
elif op_type == OP.TRANSPOSE:
return "Transpose"
elif op_type == OP.FLATTEN:
return "Flatten"
elif op_type == OP.SQUEEZE:
return "Squeeze"
elif op_type == OP.UNSQUEEZE:
return "Unsqueeze"
elif op_type == OP.CONCAT:
return "Concat"
elif op_type == OP.SPLIT:
return "Split"
elif op_type == OP.CLIP:
return "Clip"
elif op_type == OP.FMA:
raise Error(str(op_type) + " operator is not supported in onnx")
else:
raise Error("Unsupported operator type " + str(op_type))
# --- Loader and exporter ---
fn load_onnx_model(
model_path: Path, inout model_parameters: Parameters, g: Graph
) raises:
# Simple onnx data loader where we load the data in order (so we need to have the correct order of the weights and biases in the model. We don't use the names for the loading)
var onnx = Python.import_module("onnx")
var onnx_model = onnx.load(str(model_path))
for i in range(len(onnx_model.graph.initializer)):
var tensor = onnx_model.graph.initializer[i]
if (
tensor.data_type == onnx.TensorProto.FLOAT
or tensor.data_type == onnx.TensorProto.INT32
or tensor.data_type == onnx.TensorProto.INT64
):
var data_np = onnx.numpy_helper.to_array(tensor)
# Get the shape of data onnx
var temp = List[Int]()
for j in range(len(data_np.shape)):
temp.append(int(data_np.shape[j].to_float64()))
var data_shape = TensorShape(temp)
# Compare the shape of the data with the shape of the model tensor
var model_tensor_shape = g.params.symbols[i].shape
if data_shape != model_tensor_shape:
# check if the shape is transposed (reversed), we do this comparison because torch can save sove weights transposed (like gemm operator)
var raise_error_flag = True
if data_shape.rank() == model_tensor_shape.rank():
var count = 0
for j in range(model_tensor_shape.rank()):
if (
data_shape[data_shape.rank() - j - 1]
!= model_tensor_shape[j]
):
break
count += 1
if count == model_tensor_shape.rank():
raise_error_flag = False
data_np = data_np.transpose()
if raise_error_flag:
raise Error(
"Shape mismatch for tensor "
+ str(i)
+ ". Expected shape: "
+ model_tensor_shape
+ ", got shape: "
+ data_shape
)
copy_np_data(model_parameters.tensors[g.params.symbols[i]], data_np)
else:
raise Error("Unsupported data type")
fn create_attributes_and_constant_inputs(node: Node, node_number: Int) raises -> (List[PythonObject], List[PythonObject]):
var onnx = Python.import_module("onnx")
var np = Python.import_module("numpy")
var attributes = List[PythonObject]()
var inputs = List[PythonObject]()
for i in range(len(node.attributes)):
var attr = node.attributes[i]
@parameter
fn to_np_array(attr: Attribute) raises -> PythonObject:
if not attr.type == AttributeType.INTS:
raise Error("Attribute is not a shape")
var values_np: PythonObject
if attr.type == AttributeType.INTS:
var shape = attr.to_shape()
values_np = PythonObject([])
for j in range(shape.rank()):
values_np.append(shape[j])
elif attr.type == AttributeType.FLOAT:
values_np = attr.to_scalar[DType.float64]()
elif attr.type == AttributeType.INT:
values_np = attr.to_int()
else:
raise Error("Unsupported attribute type")
var np_array = np.array(values_np, dtype=np.int64)
return onnx.numpy_helper.from_array(np_array)
# Special cases where attributes are considered as inputs, so we create Constant inputs
if node.operator == OP.RESHAPE:
if str(attr.name) == "shape":
var outputs = PythonObject([])
outputs.append(str(node.operator) + "_" + str(attr.name) + "_" + str(node_number))
var temp_node = onnx.helper.make_node(
op_type="Constant",
inputs=[],
outputs=outputs,
value=to_np_array(attr),
)
inputs.append(temp_node)
elif node.operator == OP.CLIP:
if str(attr.name) == "min" or str(attr.name) == "max":
var outputs = PythonObject([])
outputs.append(str(node.operator) + "_" + str(attr.name) + "_" + str(node_number))
var temp_node = onnx.helper.make_node(
op_type="Constant",
inputs=[],
outputs=outputs,
value=to_np_array(attr),
)
inputs.append(temp_node)
elif node.operator == OP.SQUEEZE or node.operator == OP.UNSQUEEZE:
if str(attr.name) == "dims":
var outputs = PythonObject([])
outputs.append(str(node.operator) + "_" + str(attr.name) + "_" + str(node_number))
var temp_node = onnx.helper.make_node(
op_type="Constant",
inputs=[],
outputs=outputs,
value=to_np_array(attr),
)
inputs.append(temp_node)
else:
var attr_value = make_onnx_attribute(node.operator, attr)
attributes.append(attr_value)
return (attributes, inputs)
fn export_onnx_model(model_path: Path, model_parameters: Parameters, g: Graph) raises:
# Create onnx model with data and nodes
var onnx = Python.import_module("onnx")
var onnx_helper = Python.import_module("onnx.helper")
var graph = onnx_helper.make_graph(
nodes=[],
name="basalt_model",
inputs=[],
outputs=[],
initializer=[],
value_info=[],
)
var visited = Set[String]()
# Create onnx initializers
for i in range(len(g.params.symbols)):
var tensor = g.params.symbols[i]
var tensor_data = model_parameters.tensors[tensor]
var tensor_np = to_numpy(tensor_data)
# Create onnx tensor
var onnx_tensor_data = onnx_helper.make_tensor(
name=str(tensor.name),
data_type=onnx.TensorProto.FLOAT,
dims=tensor_np.shape,
vals=tensor_np,
)
graph.initializer.append(onnx_tensor_data)
# Create onnx nodes
for i in range(len(g.nodes)):
var node = g.nodes[i]
var op_type = make_onnx_operator_type(node.operator)
var inputs = PythonObject([])
var outputs = PythonObject([])
var name = str(node.operator) + "_node" + i
for j in range(len(node.inputs)):
inputs.append(str(node.inputs[j].name))
for j in range(len(node.outputs)):
outputs.append(str(node.outputs[j].name))
# Process intermediate
if str(node.outputs[j].name) not in visited:
visited.add(str(node.outputs[j].name))
var intermediate_tensor = node.outputs[j]
var intermediate_shape = intermediate_tensor.shape
var name = str(intermediate_tensor.name)
var dtype = onnx.TensorProto.FLOAT # TODO
var shape = PythonObject([])
for j in range(intermediate_shape.rank()):
shape.append(intermediate_shape[j])
# Create onnx tensor information
var onnx_output = onnx_helper.make_tensor_value_info(name, dtype, shape)
graph.value_info.append(onnx_output)
# Process attributes
var attributes_and_inputs = create_attributes_and_constant_inputs(node, i)
var attributes = attributes_and_inputs[0]
var inputs_constant = attributes_and_inputs[1]
for j in range(len(inputs_constant)):
inputs.append(inputs_constant[j].output[0])
graph.node.append(inputs_constant[j])
# Create onnx node
var onnx_node = onnx_helper.make_node(
op_type,
inputs,
outputs,
name,
)
for attribute in attributes:
onnx_node.attribute.append(attribute[])
graph.node.append(onnx_node)
# Create onnx inputs
for i in range(len(g.inputs)):
var input_tensor = g.inputs[i]
var input_shape = input_tensor.shape
var name = str(input_tensor.name)
var dtype = onnx.TensorProto.FLOAT # TODO
var shape = PythonObject([])
for j in range(input_shape.rank()):
shape.append(input_shape[j])
# Create onnx tensor information
var onnx_input = onnx_helper.make_tensor_value_info(name, dtype, shape)
graph.input.append(onnx_input)
# Create onnx outputs
for i in range(len(g.outputs)):
var output_tensor = g.outputs[i]
var output_shape = output_tensor.shape
var name = str(output_tensor.name)
var dtype = onnx.TensorProto.FLOAT # TODO
var shape = PythonObject([])
for j in range(output_shape.rank()):
shape.append(output_shape[j])
# Create onnx tensor information
var onnx_output = onnx_helper.make_tensor_value_info(name, dtype, shape)
graph.output.append(onnx_output)
# Create onnx model
var onnx_model = onnx_helper.make_model(graph, producer_name="basalt")
# Save onnx model
onnx.checker.check_model(onnx_model)
onnx.save(onnx_model, str(model_path))
| basalt/basalt/utils/onnx_utils.mojo | false |
from time import now
from math import min
from memory import memset
from basalt.autograd.node import Node
@always_inline("nodebug")
fn fit_string[num: Int](s: String) -> String:
var data = DTypePointer[DType.int8]().alloc(num + 1)
var copy_len = min(num, len(s))
memcpy(data, s._as_ptr(), copy_len)
memset(data + copy_len, ord(" "), num - copy_len)
data[num] = 0
return String(data, num + 1)
@always_inline("nodebug")
fn truncate_decimals[num: Int](s: String) -> String:
try:
var parts = s.split(delimiter=".")
var truncated = parts[0]
if len(parts) > 1:
var decimal_parts = parts[1].split(delimiter="e")
truncated += "." + fit_string[num](decimal_parts[0])
if len(decimal_parts) > 1:
truncated += "e" + decimal_parts[1]
return truncated
except e:
print("[WARNING] could not truncate decimals: ", e)
return s
@value
struct PerfMetricsValues:
var node: Node
var ns: Float64
@value
struct PerfMetrics:
var forward_perf_metrics: List[PerfMetricsValues]
var backward_perf_metrics: List[PerfMetricsValues]
var epochs_forward: Int
var epochs_backward: Int
var start: Int
fn __init__(inout self):
self.forward_perf_metrics = List[PerfMetricsValues]()
self.backward_perf_metrics = List[PerfMetricsValues]()
self.epochs_forward = 0
self.epochs_backward = 0
self.start = 0
fn __init__(inout self, graph: Graph):
self.forward_perf_metrics = List[PerfMetricsValues]()
self.backward_perf_metrics = List[PerfMetricsValues]()
self.forward_perf_metrics.reserve(graph.nodes.size)
self.backward_perf_metrics.reserve(graph.nodes.size)
for i in range(graph.nodes.size):
self.forward_perf_metrics.append(PerfMetricsValues(graph.nodes[i], 0.0))
self.backward_perf_metrics.append(PerfMetricsValues(graph.nodes[i], 0.0))
self.epochs_forward = 0
self.epochs_backward = 0
self.start = 0
fn start_forward_pass(inout self):
self.start = now()
fn end_forward_pass(inout self, pos: Int):
self.forward_perf_metrics[pos].ns += now() - self.start
self.epochs_forward += 1
fn start_backward_pass(inout self):
self.start = now()
fn end_backward_pass(inout self, pos: Int):
self.backward_perf_metrics[pos].ns += now() - self.start
self.epochs_backward += 1
fn print_perf_metrics[
type_part: String
](self, time_format: String = "ns", print_shape: Bool = False):
constrained[type_part == "Forward" or type_part == "Backward", "Only 'Forward' or 'Backward' are accepted types."]()
alias is_forward = type_part == "Forward"
var metrics = self.forward_perf_metrics if is_forward else self.backward_perf_metrics
var epochs = self.epochs_forward if is_forward else self.epochs_backward
var size = len(metrics)
var total_time: Float64 = 0
if size == 0:
return
if is_forward:
print("\n\nForward pass performance metrics:")
else:
print("\n\nBackward pass performance metrics:")
for i in range(size):
total_time += metrics[i].ns
var header = (
fit_string[5]("Node")
+ "| "
+ fit_string[15]("Operator")
+ "| "
+ fit_string[20]("Time [" + time_format + "]")
+ "| "
+ fit_string[20]("Percentage [%]")
)
if print_shape:
header += "| " + fit_string[70]("Shape\t <out> = OP( <in1>, <in2>, <in3> )")
print(header)
var header_length = len(header)
var seperator = DTypePointer[DType.int8]().alloc(header_length + 1)
memset(seperator, ord("-"), header_length)
seperator[header_length] = 0
print(String(seperator, len(header) + 1))
for i in range(size):
var value = metrics[i]
var time = value.ns / epochs
if time_format == "ms":
time /= 1e6
elif time_format == "s":
time /= 1e9
var percentage = (value.ns / total_time) * 100
var print_value = (
fit_string[5](str(i))
+ "| "
+ fit_string[15](value.node.operator)
+ "| "
+ fit_string[20](truncate_decimals[4](time))
+ "| "
+ fit_string[20](truncate_decimals[3](percentage) + " %")
+ "| "
)
if print_shape:
var shape_str = fit_string[15]("<" + str(value.node.outputs[0].shape) + ">")
for j in range(1, len(value.node.outputs)):
shape_str += ", " + fit_string[15]("<" + str(value.node.outputs[j].shape) + ">")
shape_str += fit_string[7](" = OP(") + fit_string[15]("<" + str(value.node.inputs[0].shape) + ">")
for j in range(1, len(value.node.inputs)):
shape_str += ", " + fit_string[15]("<" + str(value.node.inputs[j].shape) + ">")
shape_str += ")"
print(print_value, end="")
print(shape_str)
else:
print(print_value)
if time_format == "ms":
total_time /= 1e6
elif time_format == "s":
total_time /= 1e9
print(
"\nTotal average "
+ type_part
+ " time: "
+ str(total_time)
+ " "
+ time_format
)
fn print_forward_perf_metrics(self, time_format: String = "ns", print_shape: Bool = False):
self.print_perf_metrics["Forward"](time_format, print_shape)
fn print_backward_perf_metrics(self, time_format: String = "ns", print_shape: Bool = False):
self.print_perf_metrics["Backward"](time_format, print_shape)
| basalt/basalt/utils/perf_utils.mojo | false |
from basalt import Tensor
from random import rand, randn
from algorithm import vectorize
@always_inline
fn rand_uniform[dtype: DType](inout res: Tensor[dtype], low: Scalar[dtype], high: Scalar[dtype]):
var scale = high - low
rand[dtype](res.data(), res.num_elements())
@parameter
fn vecscale[nelts: Int](idx: Int):
res.store[nelts](idx, res.load[nelts](idx).fma(scale, low))
vectorize[vecscale, nelts](res.num_elements())
@always_inline
fn rand_normal[dtype: DType](inout res: Tensor[dtype], mean: Float64, std: Float64):
randn[dtype](res.data(), res.num_elements(), mean, std**2)
@register_passable("trivial")
struct MersenneTwister:
"""
Pseudo-random generator Mersenne Twister (MT19937-32bit).
"""
alias N: Int = 624
alias M: Int = 397
alias MATRIX_A: Int32 = 0x9908B0DF
alias UPPER_MASK: Int32 = 0x80000000
alias LOWER_MASK: Int32 = 0x7FFFFFFF
alias TEMPERING_MASK_B: Int32 = 0x9D2C5680
alias TEMPERING_MASK_C: Int32 = 0xEFC60000
var state: StaticTuple[Int32, Self.N]
var index: Int
fn __init__(inout self, seed: Int):
alias W: Int = 32
alias F: Int32 = 1812433253
alias D: Int32 = 0xFFFFFFFF
self.index = Self.N
self.state = StaticTuple[Int32, Self.N]()
self.state[0] = seed & D
for i in range(1, Self.N):
var prev = self.state[i - 1]
self.state[i] = (F * (prev ^ (prev >> (W - 2))) + i) & D
fn next(inout self) -> Int32:
if self.index >= Self.N:
for i in range(Self.N):
var x = (self.state[i] & Self.UPPER_MASK) + (self.state[(i + 1) % Self.N] & Self.LOWER_MASK)
var xA = x >> 1
if x % 2 != 0:
xA ^= Self.MATRIX_A
self.state[i] = self.state[(i + Self.M) % Self.N] ^ xA
self.index = 0
var y = self.state[self.index]
y ^= y >> 11
y ^= (y << 7) & Self.TEMPERING_MASK_B
y ^= (y << 15) & Self.TEMPERING_MASK_C
y ^= y >> 18
self.index += 1
return y
fn next_ui8(inout self) -> UInt8:
return self.next().value & 0xFF
| basalt/basalt/utils/rand_utils.mojo | false |
from sys.info import num_physical_cores
from algorithm import vectorize, parallelize, swap
from memory import memset_zero, memset, stack_allocation
from math import sqrt, pow, equal, max, min, add, div, divmod, abs
from random import rand
from basalt import Tensor, TensorShape
from basalt.nn.tensor import MAX_RANK
@always_inline
fn fill[dtype: DType](inout t: Tensor[dtype], val: Scalar[dtype]):
@parameter
fn fill_vec[nelts: Int](idx: Int):
t.store[nelts](idx, t.load[nelts](idx).splat(val))
vectorize[fill_vec, nelts](t.num_elements())
# ----- Functions to access positions in tensor data -----
@always_inline
fn get_real_index[
size: Int, strides_shape: StaticIntTuple[size], broadcast_shape: TensorShape
](i: Int) -> Int:
# broadcast_shape is of same rank as strides_shape (the not broadcasted shape), because of broadcast_calculate_strides
var index_res = 0
var linear_index = i
@parameter
fn unroll_dims[dim: Int]():
alias j = size - 1 - dim
alias stride_value = strides_shape[j]
alias shape_value = broadcast_shape[j]
var divmod_index = divmod(linear_index, shape_value)
index_res += divmod_index[1] * stride_value
linear_index = divmod_index[0]
unroll[unroll_dims, size]()
return index_res
# ----- Broadcast functions -----
@always_inline
fn broadcast_shapes(s1: TensorShape, s2: TensorShape) -> TensorShape:
var ndim = max(s1.rank(), s2.rank())
var diff = abs(s1.rank() - s2.rank())
var big = s1 if s1.rank() > s2.rank() else s2
var small = s2 if s1.rank() > s2.rank() else s1
var res = StaticIntTuple[MAX_RANK](-1)
for i in range(ndim - 1, diff - 1, -1):
var a = big[i]
var b = small[i - diff]
if b == a:
res[i] = a
elif a == 1 or b == 1:
res[i] = a * b
else:
print("[ERROR] Shapes " + str(s1) + " and " + str(s2) + " cannot be broadcasted together.")
for i in range(diff - 1, -1, -1):
res[i] = big[i]
return TensorShape(rank=ndim, shape=res)
@always_inline
fn broadcast_shapes(*s: TensorShape) -> TensorShape:
var result_shape = s[0]
for i in range(1, len(s)):
result_shape = broadcast_shapes(result_shape, s[i])
return result_shape
@always_inline
fn broadcast_calculate_strides[size: Int, shape: TensorShape, broadcast_shape: TensorShape]() -> StaticIntTuple[size]:
alias shape_rank = shape.rank()
alias diff = size - shape_rank
var strides = StaticIntTuple[size](0)
var stride = 1
for i in range(shape_rank - 1, -1, -1):
if shape[i] != 1:
strides[i + diff] = stride
stride *= shape[i]
return strides
# ----- Element-wise unary operations -----
@always_inline
fn elwise_transform[
func: fn[dtype: DType, nelts: Int] (x: SIMD[dtype, nelts]) -> SIMD[dtype, nelts],
](inout res: Tensor[dtype], t: Tensor[dtype]):
@parameter
fn vecmath[nelts: Int](idx: Int):
res.store[nelts](idx, func[dtype, nelts](t.load[nelts](idx)))
vectorize[vecmath, nelts](t.num_elements())
# ----- Element-wise binary operations -----
@always_inline
fn elwise_pow(inout res: Tensor[dtype], t: Tensor[dtype], x: Int):
@parameter
fn vecpow[nelts: Int](idx: Int):
res.store[nelts](idx, pow(t.load[nelts](idx), x))
vectorize[vecpow, nelts](t.num_elements())
@always_inline
fn elwise_op[
t1_shape: TensorShape,
t2_shape: TensorShape,
func: fn[dtype: DType, nelts: Int] (
x: SIMD[dtype, nelts], y: SIMD[dtype, nelts]
) -> SIMD[dtype, nelts],
](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]):
alias broadcast: Bool = (t1_shape != t2_shape)
alias is_scalar: Bool = (t2_shape == TensorShape(1))
@parameter
if t2_shape == TensorShape(1):
elwise_op[func](res, t1, t2[0])
elif t1_shape == TensorShape(1):
elwise_op[func](res, t1[0], t2)
elif broadcast and not is_scalar:
alias res_shape = broadcast_shapes(t1_shape, t2_shape)
broadcast_elwise_op[t1_shape, t2_shape, res_shape, func](res, t1, t2)
else:
elwise_op[func](res, t1, t2)
@always_inline
fn elwise_op[
func: fn[dtype: DType, nelts: Int] (
x: SIMD[dtype, nelts], y: SIMD[dtype, nelts]
) -> SIMD[dtype, nelts],
](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]):
"""Element-wise operation on two tensors of equal shape."""
@parameter
fn vecmath[nelts: Int](idx: Int):
res.store[nelts](
idx, func[dtype, nelts](t1.load[nelts](idx), t2.load[nelts](idx))
)
vectorize[vecmath, nelts](t1.num_elements())
@always_inline
fn elwise_op[
func: fn[dtype: DType, nelts: Int] (
x: SIMD[dtype, nelts], y: SIMD[dtype, nelts]
) -> SIMD[dtype, nelts],
](inout res: Tensor[dtype], t1: Tensor[dtype], a: Scalar[dtype]):
"""Element-wise operation on a tensor and a scalar."""
@parameter
fn vecmath[nelts: Int](idx: Int):
res.store[nelts](idx, func[dtype, nelts](t1.load[nelts](idx), a))
vectorize[vecmath, nelts](t1.num_elements())
@always_inline
fn elwise_op[
func: fn[dtype: DType, nelts: Int] (
x: SIMD[dtype, nelts], y: SIMD[dtype, nelts]
) -> SIMD[dtype, nelts],
](inout res: Tensor[dtype], a: Scalar[dtype], t1: Tensor[dtype]):
"""Element-wise operation on a tensor and a scalar."""
@parameter
fn vecmath[nelts: Int](idx: Int):
res.store[nelts](idx, func[dtype, nelts](a, t1.load[nelts](idx)))
vectorize[vecmath, nelts](t1.num_elements())
fn broadcast_elwise_op[
t1_shape: TensorShape,
t2_shape: TensorShape,
res_shape: TensorShape,
func: fn[dtype: DType, nelts: Int] (
x: SIMD[dtype, nelts], y: SIMD[dtype, nelts]
) -> SIMD[dtype, nelts],
](inout res: Tensor[dtype], t1: Tensor[dtype], t2: Tensor[dtype]):
alias size = res_shape.rank()
alias strides1 = broadcast_calculate_strides[size, t1_shape, res_shape]()
alias strides2 = broadcast_calculate_strides[size, t2_shape, res_shape]()
@parameter
fn vec_op[nelts: Int](i: Int):
var index1 = get_real_index[size, strides1, res_shape](i)
var index2 = get_real_index[size, strides2, res_shape](i)
res.store[nelts](
i,
func[dtype, nelts](t1.load[nelts](index1), t2.load[nelts](index2)),
)
# TODO: Check how to vectorize this
vectorize[vec_op, 1](res.num_elements())
@always_inline
fn accumulate_grad(inout grad: Tensor[dtype], res_grad: Tensor[dtype]):
# Accumulate gradient without checking for broadcasting
elwise_op[add](grad, grad, res_grad)
@always_inline
fn accumulate_grad[
grad_shape: TensorShape, res_grad_shape: TensorShape
](inout grad: Tensor[dtype], res_grad: Tensor[dtype]):
@parameter
if grad_shape == res_grad_shape:
elwise_op[add](grad, grad, res_grad)
elif res_grad_shape == TensorShape(1):
elwise_op[add](grad, grad, res_grad[0])
elif grad_shape != res_grad_shape:
# Backward resulting gradient (res_grad) was formed from an operation that required broadcasting.
# In order to accumulate res_grad to the gradient, the res_grad tensor needs to be unbroadcasted.
# The following is equivalent to: Summing along the axes that were expanded during the broadcasting process.
alias size = res_grad_shape.rank()
alias strides_grad = broadcast_calculate_strides[
size, grad_shape, res_grad_shape
]()
@parameter
fn vec_op[nelts: Int](i: Int):
var index = get_real_index[size, strides_grad, res_grad_shape](i)
grad[index] += res_grad.load[nelts](i).reduce_add()
# TODO: Check how to vectorize this
vectorize[vec_op, 1](res_grad.num_elements())
# ---- Transform functions -----
@always_inline
fn transpose_2D[t_shape: TensorShape](t: Tensor[dtype]) -> Tensor[dtype]:
var t_new = Tensor[dtype](t_shape[1], t_shape[0])
alias stride = t_shape[0]
@parameter
fn proc_row(i: Int):
@parameter
fn proc_column[nelts: Int](j: Int):
t_new.data().offset(j * t_shape[0] + i).simd_strided_store[nelts](
t.load[nelts](i * t_shape[1] + j), stride
)
vectorize[proc_column, nelts](t.dim(1))
parallelize[proc_row](t_shape[0])
return t_new ^
@always_inline
fn transpose_2D[t_shape: TensorShape](t: DTypePointer[dtype]) -> DTypePointer[dtype]:
var t_new = DTypePointer[dtype].alloc(t_shape[1] * t_shape[0])
alias stride = t_shape[0]
@parameter
fn proc_row(i: Int):
@parameter
fn proc_column[nelts: Int](j: Int):
t_new.offset(j * t_shape[0] + i).simd_strided_store[nelts](
t.load[width=nelts](i * t_shape[1] + j), stride
)
vectorize[proc_column, nelts](t_shape[1])
parallelize[proc_row](t_shape[0])
return t_new
# ----- Reduction functions -----
@always_inline
fn reduce[
op: fn[type: DType, simd_width: Int] (
x: SIMD[type, simd_width], y: SIMD[type, simd_width]
) -> SIMD[type, simd_width],
reduce_op: fn[type: DType, simd_width: Int] (x: SIMD[type, simd_width]) -> SIMD[
type, 1
],
](t: Tensor[dtype], starting_value: SIMD[dtype, nelts]) -> Scalar[dtype]:
var m: SIMD[dtype, nelts] = starting_value
@parameter
fn vecreduce[_nelts: Int](idx: Int):
@parameter
if _nelts == 1:
m[0] = op(m[0], t.load[_nelts](idx)[0])
else:
m = op(m, t.load[nelts](idx))
vectorize[vecreduce, nelts](t.num_elements())
return reduce_op(m)
fn get_reduce_shape(t: TensorShape, axis: Int) -> TensorShape:
var rank = t.rank()
var new_shape = StaticIntTuple[MAX_RANK]()
for i in range(rank):
if i == axis:
new_shape[i] = 1
else:
new_shape[i] = t[i]
return TensorShape(rank=rank, shape=new_shape)
@always_inline
fn reduce[
op: fn[type: DType, simd_width: Int] (
x: SIMD[type, simd_width], y: SIMD[type, simd_width]
) -> SIMD[type, simd_width],
reduce_op: fn[type: DType, simd_width: Int] (x: SIMD[type, simd_width]) -> SIMD[
type, 1
],
](
inout res: Tensor[dtype],
t: Tensor[dtype],
axis: Int,
starting_value: SIMD[dtype, nelts],
):
var strides = t.strides()
@parameter
fn parallel_reduce(i: Int):
var m: SIMD[dtype, nelts] = starting_value
var index_base = (i % strides[axis]) + (i // strides[axis]) * (
strides[axis] * t.dim(axis)
)
@parameter
fn axisreduce[_nelts: Int](j: Int):
var index = index_base + j * strides[axis]
if _nelts == 1:
m[0] = op(
m[0],
t.data().offset(index).simd_strided_load[_nelts](strides[axis])[0],
)
else:
m = op(
m, t.data().offset(index).simd_strided_load[nelts](strides[axis])
)
vectorize[axisreduce, nelts](t.dim(axis))
res[i] = reduce_op(m)
parallelize[parallel_reduce](t.num_elements() // t.dim(axis))
_ = strides
@always_inline
fn _reduce_sum[
type: DType, simd_width: Int
](x: SIMD[type, simd_width]) -> Scalar[type]:
return x.reduce_add()
@always_inline
fn tsum(t: Tensor[dtype]) -> Scalar[dtype]:
var starting_value = 0
return reduce[add, _reduce_sum](t, starting_value)
@always_inline
fn tmean(t: Tensor[dtype]) -> Scalar[dtype]:
return tsum(t) / t.num_elements()
@always_inline
fn tstd(t: Tensor[dtype]) -> Scalar[dtype]:
var mu: Scalar[dtype] = tmean(t)
var variance: Scalar[dtype] = 0
@parameter
fn vecvar[nelts: Int](idx: Int):
var diff = t.load[nelts](idx) - mu
variance += (diff * diff).reduce_add()
vectorize[vecvar, nelts](t.num_elements())
return sqrt(variance / t.num_elements())
@always_inline
fn tsum(inout res: Tensor[dtype], t: Tensor[dtype], axis: Int):
var starting_value = 0
reduce[add, _reduce_sum](res, t, axis, starting_value)
@always_inline
fn tmean(inout res: Tensor[dtype], t: Tensor[dtype], axis: Int):
var num_elements_axis: Scalar[dtype] = t.dim(axis)
tsum(res, t, axis)
elwise_op[div](res, res, num_elements_axis)
@always_inline
fn tstd(inout res: Tensor[dtype], t: Tensor[dtype], axis: Int):
var mu = Tensor[dtype](get_reduce_shape(t.shape(), axis))
tmean(mu, t, axis)
var num_elements_axis: Scalar[dtype] = t.dim(axis)
var strides = t.strides()
var strides_mu = mu.strides()
@parameter
fn get_t_index(
i: Int, j: Int, axis: Int, shape: TensorShape, strides: StaticIntTuple[MAX_RANK]
) -> Int:
var index_res = 0
for k in range(shape.rank()):
if k == axis:
index_res += j * strides[k]
else:
index_res += (i % shape[k]) * strides[k]
return index_res
@parameter
fn get_mu_index(
i: Int, axis: Int, shape: TensorShape, strides: StaticIntTuple[MAX_RANK]
) -> Int:
var index_res = 0
for k in range(shape.rank()):
if k != axis:
index_res += (i % shape[k]) * strides[k]
return index_res
for i in range(t.num_elements() // t.dim(axis)):
var mu_index = get_mu_index(i, axis, mu.shape(), strides_mu)
@parameter
fn vecvar[nelts: Int](j: Int):
var t_index = get_t_index(i, j, axis, t.shape(), strides)
var diff = t.load[nelts](t_index) - mu[mu_index]
res[i] += (diff * diff).reduce_add()
vectorize[vecvar, nelts](t.dim(axis))
res[i] /= num_elements_axis
_ = (strides, strides_mu)
elwise_transform[sqrt](res, res)
@always_inline
fn _reduce_max[
type: DType, simd_width: Int
](x: SIMD[type, simd_width]) -> Scalar[type]:
return x.reduce_max()
@always_inline
fn tmax(t: Tensor[dtype]) -> Scalar[dtype]:
var starting_value = math.limit.min_finite[dtype]()
return reduce[max, _reduce_max](t, starting_value)
@always_inline
fn tmax(inout res: Tensor[dtype], t: Tensor[dtype], axis: Int):
var starting_value = math.limit.min_finite[dtype]()
reduce[max, _reduce_max](res, t, axis, starting_value)
# @always_inline
# fn transpose[
# dtype: DType, nelts: Int
# ](t: Tensor[dtype], dim_0: Int, dim_1: Int) -> Tensor[dtype]:
# """
# Create a new tensor transposing dim_0 and dim_1.
# """
# var axes = DynamicVector[Int](t.rank())
# for i in range(t.rank()):
# if i == dim_0:
# axes.push_back(dim_1)
# elif i == dim_1:
# axes.push_back(dim_0)
# else:
# axes.push_back(i)
# return transpose[dtype, nelts](t, axes)
# @always_inline
# fn transpose(inout res: Tensor[dtype], t: Tensor[dtype]):
# """
# Create a new transposed tensor of the given tensor t.
# """
# var axes = DynamicVector[Int](capacity=t.rank())
# for i in range(t.rank() - 1, -1, -1):
# axes.push_back(i)
# var axes_shape = TensorShape(axes)
# transpose(res, t, axes_shape)
# @always_inline
# fn transpose(t: Tensor[dtype], axes: DynamicVector[Int]) -> Tensor[dtype]:
# var new_shape = DynamicVector[Int](capacity=t.rank())
# for i in range(t.rank()):
# new_shape.push_back(t.dim(axes[i]))
# var t_new_shape = TensorShape(new_shape)
# var t_new = Tensor[dtype](t_new_shape)
# transpose(t_new, t, t_new_shape)
# return t_new
@always_inline
fn get_transpose_shape(t: TensorShape, axes: TensorShape) -> TensorShape:
var rank = t.rank()
var new_shape = StaticIntTuple[MAX_RANK]()
for i in range(rank):
new_shape[i] = t[axes[i]]
return TensorShape(rank=rank, shape=new_shape)
@always_inline
fn transpose(t: Tensor[dtype], axes: TensorShape) -> Tensor[dtype]:
var t_new_shape = get_transpose_shape(t.shape(), axes)
var t_new = Tensor[dtype](t_new_shape)
transpose(t_new, t, axes)
return t_new ^
@always_inline
fn transpose(inout res: Tensor[dtype], t: Tensor[dtype], axes: TensorShape):
"""
Create a new transposed tensor of the given tensor t.
"""
# NOTE: The rank of of the t tensor should be 2 or more
# NOTE: Axes should be the same size as the rank of t
var original_strides = t.strides()
var transposed_strides = res.strides()
var position_of_last_rank_new_shape = 0
# Get position of where the last dim of the old shape is in the new shape
for i in range(axes.rank()):
if t.rank() - 1 == axes[i]:
position_of_last_rank_new_shape = i
@parameter
fn p_transpose(i: Int):
@parameter
fn v_transpose[nelts: Int](j: Int):
var new_index = 0
var original_index = i * t.dim(t.rank() - 1) + j
var linear_index = original_index
for k in range(t.rank()):
# axes tells us the position of where the dim in the transposed shape is located in the original shape
var stride = original_strides[axes[k]]
var index = linear_index // stride % t.dim(axes[k])
new_index += index * transposed_strides[k]
res.data().offset(new_index).simd_strided_store[nelts](
t.load[nelts](original_index),
transposed_strides[position_of_last_rank_new_shape],
)
vectorize[v_transpose, nelts](t.dim(t.rank() - 1))
parallelize[p_transpose](t.num_elements() // t.dim(t.rank() - 1))
_ = (original_strides, transposed_strides)
# # NOTE: This function can be used for later for optimziation (Many operations in gpu is preferred to pad the tensors when using conv or matmul operations)
# # TODO: Deprecate this function, as it is not used anymore
# @always_inline
# fn pad_zeros[
# dtype: DType, nelts: Int
# ](t: Tensor[dtype], pad_with: DynamicVector[Int]) -> Tensor[dtype]:
# """
# Pad a tensor with zeros along the specified axes of an N dimensional tensor.
# Number of values padded to the edges of each axis.
# Example: ((before_1, after_1), ... (before_N, after_N)).
# """
# # NOTE: The rank of of the t tensor should be equal to the size of pad_with devided by 2.
# # As pad_with contains (before, after) number of paddings for each axis.
# var new_shape = DynamicVector[Int](t.rank())
# for i in range(t.rank()):
# new_shape.push_back(t.dim(i) + pad_with[i * 2] + pad_with[i * 2 + 1])
# var t_new = Tensor[dtype](new_shape)
# var original_strides = t.strides()
# var result_strides = t_new.strides()
# # Parallelize over the first axis
# # NOTE: Possible dynamically choose the axis to parallelize over
# @parameter
# fn p_pad(i: Int):
# for j in range(t.num_elements() // t.dim(0)):
# var original_index = i * original_strides[0] + j
# # Padding contribution of the first dimention
# var dest_index = (i + pad_with[0]) * result_strides[0]
# # Calculate the contribution from each dimension
# var remaining_index = j % original_strides[0]
# for dim in range(1, t.rank()):
# var stride = original_strides[dim]
# var index = remaining_index // stride
# remaining_index = remaining_index % stride
# dest_index += (index + pad_with[dim * 2]) * result_strides[dim]
# # TODO: figure out vectorization
# t_new[dest_index] = t[original_index]
# parallelize[p_pad](t.dim(0))
# _ = (original_strides, result_strides)
# return t_new
| basalt/basalt/utils/tensorutils.mojo | false |
<filename>basalt/basalt/utils/tensor_creation_utils.mojo
from python import Python
# maybe this functions should be from the Tensor struct (like tensor.to_numpy()) and tensor.__init__(np_array: PythonObject) to create a tensor from a numpy array and tensor.copy_np_data(np_array: PythonObject) to copy the numpy array to the tensor.
fn to_numpy(tensor: Tensor) -> PythonObject:
try:
var np = Python.import_module("numpy")
np.set_printoptions(4)
var rank = tensor.rank()
var dims = PythonObject([])
for i in range(rank):
dims.append(tensor.dim(i))
var pyarray: PythonObject = np.empty(dims, dtype=np.float32)
var pointer = int(pyarray.__array_interface__["data"][0].to_float64())
var pointer_d = DTypePointer[tensor.dtype](address=pointer)
memcpy(pointer_d, tensor.data(), tensor.num_elements())
_ = tensor
return pyarray^
except e:
print("Error in to numpy", e)
return PythonObject()
fn to_tensor(np_array: PythonObject) raises -> Tensor[dtype]:
var shape = List[Int]()
for i in range(np_array.ndim):
shape.append(int(np_array.shape[i].to_float64()))
if np_array.ndim == 0:
# When the numpy array is a scalar, you need or the reshape to a size 1 ndarray or do this, if not the memcpy gets a memory error (Maybe because it is a register value?).
var tensor = Tensor[dtype](TensorShape(1))
tensor[0] = np_array.to_float64().cast[dtype]()
return tensor^
var tensor = Tensor[dtype](TensorShape(shape))
var np_array_2 = np_array.copy()
try:
var np = Python.import_module("numpy")
np_array_2 = np.float32(np_array_2)
except e:
print("Error in to tensor", e)
var pointer = int(np_array_2.__array_interface__["data"][0].to_float64())
var pointer_d = DTypePointer[tensor.dtype](address=pointer)
memcpy(tensor.data(), pointer_d, tensor.num_elements())
_ = np_array_2
_ = np_array
return tensor^
fn copy_np_data(tensor: Tensor, np_array: PythonObject) raises:
var np_array_2 = np_array.copy()
try:
var np = Python.import_module("numpy")
np_array_2 = np.float32(np_array_2)
except e:
print("Error in to tensor", e)
var pointer = int(np_array_2.__array_interface__["data"][0].to_float64())
var pointer_d = DTypePointer[tensor.dtype](address=pointer)
memcpy(tensor.data(), pointer_d, tensor.num_elements())
_ = np_array_2
_ = np_array
_ = tensor
| basalt/basalt/utils/tensor_creation_utils.mojo | false |
from memory.unsafe import bitcast
@always_inline("nodebug")
fn q_sqrt(value: Float32) -> Float32:
var y = bitcast[DType.float32](0x5F3759DF - (bitcast[DType.uint32](value) >> 1))
return -y * ((0.5 * value * y).fma(y, -1.5))
| basalt/basalt/utils/__init__.mojo | false |
<filename>basalt/examples/housing.mojo
from time.time import now
import basalt.nn as nn
from basalt import Tensor, TensorShape
from basalt import Graph, Symbol, OP
from basalt.utils.datasets import BostonHousing
from basalt.utils.dataloader import DataLoader
fn linear_regression(batch_size: Int, n_inputs: Int, n_outputs: Int) -> Graph:
var g = Graph()
var x = g.input(TensorShape(batch_size, n_inputs))
var y_true = g.input(TensorShape(batch_size, n_outputs))
var y_pred = nn.Linear(g, x, n_outputs)
g.out(y_pred)
var loss = nn.MSELoss(g, y_pred, y_true)
g.loss(loss)
return g ^
fn main():
# Train Parameters
alias batch_size = 32
alias num_epochs = 200
alias learning_rate = 0.02
alias graph = linear_regression(batch_size, 13, 1)
# try: graph.render("operator")
# except: print("Could not render graph")
var model = nn.Model[graph]()
var optim = nn.optim.Adam[graph](Reference(model.parameters), lr=learning_rate)
# Batchwise data loader
print("Loading data...")
var train_data: BostonHousing
try:
train_data = BostonHousing(file_path="./examples/data/housing.csv")
except:
print("Could not load data")
return
var training_loader = DataLoader(
data=train_data.data, labels=train_data.labels, batch_size=batch_size
)
print("Training started.")
var start = now()
for epoch in range(num_epochs):
var num_batches: Int = 0
var epoch_loss: Float32 = 0.0
for batch in training_loader:
# Forward pass
var loss = model.forward(batch.data, batch.labels)
# Backward pass
optim.zero_grad()
model.backward()
optim.step()
epoch_loss += loss[0]
num_batches += 1
print(
"Epoch: [",
epoch + 1,
"/",
num_epochs,
"] \t Avg loss per epoch:",
epoch_loss / num_batches,
)
print("Training finished: ", (now() - start) / 1e9, "seconds")
# print("\n\nInferencing model...\n")
# for batch in training_loader:
# var output = model.inference(batch.data)
# # Print first (and only output)
# print("Predicted: ", output[0])
| basalt/examples/housing.mojo | false |
from time.time import now
import basalt.nn as nn
from basalt import Tensor, TensorShape
from basalt import Graph, Symbol, OP, dtype
from basalt.utils.datasets import MNIST
from basalt.utils.dataloader import DataLoader
from basalt.autograd.attributes import AttributeVector, Attribute
# def plot_image(data: Tensor, num: Int):
# from python.python import Python, PythonObject
# np = Python.import_module("numpy")
# plt = Python.import_module("matplotlib.pyplot")
# var pyimage: PythonObject = np.empty((28, 28), np.float64)
# for m in range(28):
# for n in range(28):
# pyimage.itemset((m, n), data[num * 28 * 28 + m * 28 + n])
# plt.imshow(pyimage)
# plt.show()
fn create_CNN(batch_size: Int) -> Graph:
var g = Graph()
var x = g.input(TensorShape(batch_size, 1, 28, 28))
var x1 = nn.Conv2d(g, x, out_channels=16, kernel_size=5, padding=2)
var x2 = nn.ReLU(g, x1)
var x3 = nn.MaxPool2d(g, x2, kernel_size=2)
var x4 = nn.Conv2d(g, x3, out_channels=32, kernel_size=5, padding=2)
var x5 = nn.ReLU(g, x4)
var x6 = nn.MaxPool2d(g, x5, kernel_size=2)
var x7 = g.op(
OP.RESHAPE,
x6,
attributes=AttributeVector(
Attribute(
"shape",
TensorShape(x6.shape[0], x6.shape[1] * x6.shape[2] * x6.shape[3]),
)
),
)
var out = nn.Linear(g, x7, n_outputs=10)
g.out(out)
var y_true = g.input(TensorShape(batch_size, 10))
var loss = nn.CrossEntropyLoss(g, out, y_true)
# var loss = nn.MSELoss(g, out, y_true)
g.loss(loss)
return g ^
fn main():
alias num_epochs = 20
alias batch_size = 4
alias learning_rate = 1e-3
alias graph = create_CNN(batch_size)
# try: graph.render("operator")
# except: print("Could not render graph")
var model = nn.Model[graph]()
var optim = nn.optim.Adam[graph](Reference(model.parameters), lr=learning_rate)
print("Loading data ...")
var train_data: MNIST
try:
train_data = MNIST(file_path="./examples/data/mnist_test_small.csv")
# _ = plot_image(train_data.data, 1)
except e:
print("Could not load data")
print(e)
return
var training_loader = DataLoader(
data=train_data.data, labels=train_data.labels, batch_size=batch_size
)
print("Training started/")
var start = now()
for epoch in range(num_epochs):
var num_batches: Int = 0
var epoch_loss: Float32 = 0.0
var epoch_start = now()
for batch in training_loader:
# [ONE HOT ENCODING!]
var labels_one_hot = Tensor[dtype](batch.labels.dim(0), 10)
for bb in range(batch.labels.dim(0)):
labels_one_hot[int((bb * 10 + batch.labels[bb]))] = 1.0
# Forward pass
var loss = model.forward(batch.data, labels_one_hot)
# Backward pass
optim.zero_grad()
model.backward()
optim.step()
epoch_loss += loss[0]
num_batches += 1
print(
"Epoch [",
epoch + 1,
"/",
num_epochs,
"],\t Step [",
num_batches,
"/",
train_data.data.dim(0) // batch_size,
"],\t Loss:",
epoch_loss / num_batches,
)
print("Epoch time: ", (now() - epoch_start) / 1e9, "seconds")
print("Training finished: ", (now() - start) / 1e9, "seconds")
model.print_perf_metrics("ms", True)
| basalt/examples/mnist.mojo | false |
from time.time import now
from pathlib import Path
import basalt.nn as nn
from basalt import Tensor, TensorShape
from basalt import Graph, Symbol, OP, dtype
from basalt.utils.datasets import MNIST
from basalt.utils.dataloader import DataLoader
from basalt.autograd.attributes import AttributeVector, Attribute
# def plot_image(data: Tensor, num: Int):
# from python.python import Python, PythonObject
# np = Python.import_module("numpy")
# plt = Python.import_module("matplotlib.pyplot")
# var pyimage: PythonObject = np.empty((28, 28), np.float64)
# for m in range(28):
# for n in range(28):
# pyimage.itemset((m, n), data[num * 28 * 28 + m * 28 + n])
# plt.imshow(pyimage)
# plt.show()
fn create_CNN(batch_size: Int) -> Graph:
var g = Graph()
var x = g.input(TensorShape(batch_size, 1, 28, 28))
var x1 = nn.Conv2d(g, x, out_channels=16, kernel_size=5, padding=2)
var x2 = nn.ReLU(g, x1)
var x3 = nn.MaxPool2d(g, x2, kernel_size=2)
var x4 = nn.Conv2d(g, x3, out_channels=32, kernel_size=5, padding=2)
var x5 = nn.ReLU(g, x4)
var x6 = nn.MaxPool2d(g, x5, kernel_size=2)
var x7 = g.op(
OP.RESHAPE,
x6,
attributes=AttributeVector(
Attribute(
"shape",
TensorShape(x6.shape[0], x6.shape[1] * x6.shape[2] * x6.shape[3]),
)
),
)
var out = nn.Linear(g, x7, n_outputs=10)
g.out(out)
return g ^
fn main():
alias num_epochs = 1
alias batch_size = 4
alias learning_rate = 1e-3
alias graph = create_CNN(batch_size)
# try: graph.render("operator")
# except: print("Could not render graph")
var model = nn.Model[graph]()
model.load_model_data("./examples/data/mnist_torch.onnx")
print("Loading data ...")
var train_data: MNIST
try:
train_data = MNIST(file_path="./examples/data/mnist_test_small.csv")
# _ = plot_image(train_data.data, 1)
except e:
print("Could not load data")
print(e)
return
var training_loader = DataLoader(
data=train_data.data, labels=train_data.labels, batch_size=batch_size
)
# Testing
print("Testing started")
var start = now()
var correct = 0
for batch in training_loader:
var labels_one_hot = Tensor[dtype](batch.labels.dim(0), 10)
for bb in range(batch.labels.dim(0)):
labels_one_hot[int(bb * 10 + batch.labels[bb])] = 1.0
var output = model.inference(batch.data, labels_one_hot)[0]
fn argmax(tensor: Tensor[dtype], dim: Int) -> Tensor[dtype]:
var result = Tensor[dtype](tensor.dim(0))
for i in range(tensor.dim(0)):
var max_val = tensor[i * 10]
var max_idx = 0
for j in range(1, 10):
if tensor[i * 10 + j] > max_val:
max_val = tensor[i * 10 + j]
max_idx = j
result[i] = max_idx
return result
var pred = argmax(output, dim=1)
for i in range(batch.labels.dim(0)):
if pred[i] == batch.labels[i]:
correct += 1
print("Accuracy: ", correct / train_data.data.dim(0) * 100, "%")
print("Testing finished: ", (now() - start) / 1e9, "seconds")
# model.print_perf_metrics("ms", True)
model.export_model("./output_model.onnx") | basalt/examples/mnist_load_model.mojo | false |
<filename>basalt/examples/sin_estimate.mojo
from random import rand
from time.time import now
import math
import basalt.nn as nn
from basalt import Tensor, TensorShape
from basalt import dtype
from basalt import Graph, Symbol, OP
from basalt.utils.tensorutils import fill
fn create_simple_nn(batch_size: Int, n_inputs: Int, n_outputs: Int) -> Graph:
var g = Graph()
var x = g.input(TensorShape(batch_size, n_inputs))
var y_true = g.input(TensorShape(batch_size, n_outputs))
var x1 = nn.Linear(g, x, n_outputs=32)
var x2 = nn.ReLU(g, x1)
var x3 = nn.Linear(g, x2, n_outputs=32)
var x4 = nn.ReLU(g, x3)
var y_pred = nn.Linear(g, x4, n_outputs=n_outputs)
g.out(y_pred)
var loss = nn.MSELoss(g, y_pred, y_true)
g.loss(loss)
g.compile()
return g ^
fn main():
alias batch_size = 32
alias n_inputs = 1
alias n_outputs = 1
alias learning_rate = 0.01
alias epochs = 20000
alias graph = create_simple_nn(batch_size, n_inputs, n_outputs)
# try: graph.render("operator")
# except: print("Could not render graph")
var model = nn.Model[graph]()
var optimizer = nn.optim.Adam[graph](Reference(model.parameters), lr=learning_rate)
var x_data = Tensor[dtype](batch_size, n_inputs)
var y_data = Tensor[dtype](batch_size, n_outputs)
print("Training started")
var start = now()
for i in range(epochs):
rand[dtype](x_data.data(), x_data.num_elements())
for j in range(batch_size):
x_data[j] = x_data[j] * 2 - 1
y_data[j] = math.sin(x_data[j])
var out = model.forward(x_data, y_data)
if (i + 1) % 1000 == 0:
print("[", i + 1, "/", epochs, "] \tLoss: ", out[0])
optimizer.zero_grad()
model.backward()
optimizer.step()
print("Training finished: ", (now() - start) / 1e9, "seconds")
| basalt/examples/sin_estimate.mojo | false |
<filename>basalt/tests/testing_utils.mojo
from python.python import Python
from collections import OptionalReg
from testing import assert_equal, assert_almost_equal
from basalt import dtype
from basalt.autograd import Graph, OP
from basalt.autograd.ops.ops import backward_op
from basalt.autograd.attributes import AttributeVector
from basalt.nn import Tensor, TensorShape, Model
from basalt.utils.tensor_creation_utils import to_numpy, to_tensor
# The below regex should be used to convert deprecated calls
# assert_tensors_equal\(([^,]+),\s*([^,]+),\s*"([^"]+)"\)
# assert_tensors_equal["$3"]($1, $2)
fn assert_tensors_equal[
mode: String = "exact", msg: String = "Error"
](t1: Tensor[dtype], t2: Tensor[dtype]) raises:
constrained[
mode == "exact" or mode == "almost", "Mode must be either 'exact' or 'almost'"
]()
assert_equal(t1.shape(), t2.shape(), "Tensor shape mismatch")
for i in range(t1.num_elements()):
if mode == "almost":
assert_almost_equal(t1[i], t2[i], rtol=1e-5, atol=1e-5, msg=msg)
else:
assert_equal(t1[i], t2[i], msg=msg)
fn test_unary_op[
op: OP, t1_shape: TensorShape, attrs: OptionalReg[AttributeVector] = None
](t1: Tensor[dtype], expected: Tensor[dtype]) raises:
fn create_graph() -> Graph:
var g = Graph()
var t1 = g.input(t1_shape)
if attrs:
var res = g.op(op, t1, attributes=attrs.value())
g.out(res)
return g ^
else:
var res = g.op(op, t1)
g.out(res)
return g ^
alias graph = create_graph()
assert_equal(len(graph.nodes), 1)
var model = Model[graph](inference_only=True)
var res = model.inference(t1)[0]
assert_tensors_equal["almost"](res, expected)
fn test_binary_op[
op: OP,
t1_shape: TensorShape,
t2_shape: TensorShape,
attrs: OptionalReg[AttributeVector] = None,
](t1: Tensor[dtype], t2: Tensor[dtype], expected: Tensor[dtype]) raises:
fn create_graph() -> Graph:
var g = Graph()
var t1 = g.input(t1_shape)
var t2 = g.input(t2_shape)
if attrs:
var res = g.op(op, t1, t2, attributes=attrs.value())
g.out(res)
return g ^
else:
var res = g.op(op, t1, t2)
g.out(res)
return g ^
alias graph = create_graph()
assert_equal(len(graph.nodes), 1)
var model = Model[graph](inference_only=True)
var res = model.inference(t1, t2)[0]
assert_tensors_equal["almost"](res, expected)
fn test_ternary_op[
op: OP, t1_shape: TensorShape, t2_shape: TensorShape, t3_shape: TensorShape
](
t1: Tensor[dtype], t2: Tensor[dtype], t3: Tensor[dtype], expected: Tensor[dtype]
) raises:
@parameter
fn create_graph() -> Graph:
var g = Graph()
var t1 = g.input(t1_shape)
var t2 = g.input(t2_shape)
var t3 = g.input(t3_shape)
var res = g.op(op, t1, t2, t3)
g.out(res)
return g ^
alias graph = create_graph()
assert_equal(len(graph.nodes), 1)
var model = Model[graph](inference_only=True)
var res = model.inference(t1, t2, t3)[0]
assert_tensors_equal["almost"](res, expected)
fn test_unary_op_backward[
op: OP,
t1_shape: TensorShape,
ug_shape: TensorShape,
attrs: AttributeVector = AttributeVector(),
](t1: Tensor[dtype], ug: Tensor[dtype], grad_1_expected: Tensor[dtype],) raises:
var grad_1 = Tensor[dtype](t1_shape)
backward_op[0, op, ug_shape, t1_shape, attrs](ug, t1, grad_1)
assert_tensors_equal["almost"](grad_1, grad_1_expected)
fn test_binary_op_backward[
op: OP,
t1_shape: TensorShape,
t2_shape: TensorShape,
ug_shape: TensorShape,
attrs: AttributeVector = AttributeVector(),
](
t1: Tensor[dtype],
t2: Tensor[dtype],
ug: Tensor[dtype],
grad_1_expected: Tensor[dtype],
grad_2_expected: Tensor[dtype],
) raises:
var grad_1 = Tensor[dtype](t1_shape)
backward_op[0, op, ug_shape, t1_shape, t2_shape, attrs](ug, t1, t2, grad_1)
assert_tensors_equal["almost"](grad_1, grad_1_expected)
var grad_2 = Tensor[dtype](t2_shape)
backward_op[1, op, ug_shape, t1_shape, t2_shape, attrs](ug, t1, t2, grad_2)
assert_tensors_equal["almost"](grad_2, grad_2_expected)
fn test_ternary_op_backward[
op: OP,
t1_shape: TensorShape,
t2_shape: TensorShape,
t3_shape: TensorShape,
ug_shape: TensorShape,
attrs: AttributeVector = AttributeVector(),
](
t1: Tensor[dtype],
t2: Tensor[dtype],
t3: Tensor[dtype],
ug: Tensor[dtype],
grad_1_expected: Tensor[dtype],
grad_2_expected: Tensor[dtype],
grad_3_expected: Tensor[dtype],
) raises:
var grad_1 = Tensor[dtype](t1_shape)
backward_op[0, op, ug_shape, t1_shape, t2_shape, t3_shape, attrs](
ug, t1, t2, t3, grad_1
)
assert_tensors_equal["almost"](grad_1, grad_1_expected)
var grad_2 = Tensor[dtype](t2_shape)
backward_op[1, op, ug_shape, t1_shape, t2_shape, t3_shape, attrs](
ug, t1, t2, t3, grad_2
)
assert_tensors_equal["almost"](grad_2, grad_2_expected)
var grad_3 = Tensor[dtype](t3_shape)
backward_op[2, op, ug_shape, t1_shape, t2_shape, t3_shape, attrs](
ug, t1, t2, t3, grad_3
)
assert_tensors_equal["almost"](grad_3, grad_3_expected)
fn create_graph_concat(
t1_shape: TensorShape, t2_shape: TensorShape, t3_shape: TensorShape, dim: Int
) -> Graph:
# Testing with 3 operands
var g = Graph()
var t1 = g.input(t1_shape, trainable=True)
var t2 = g.input(t2_shape, trainable=True)
var t3 = g.input(t3_shape, trainable=True)
var res = g.concat(t1, t2, t3, dim=dim)
g.out(res)
g.loss(res)
return g ^
fn create_graph_split(t_shape: TensorShape, sections: List[Int], dim: Int) -> Graph:
var g = Graph()
var t = g.input(t_shape, trainable=True)
var results = g.split(t, sections=sections, dim=dim)
for i in range(len(sections)):
g.out(results[i])
g.loss(results[0]) # Any one
return g ^
| basalt/tests/testing_utils.mojo | false |
<filename>basalt/tests/mojo/test_activations.mojo
from testing import assert_equal
from basalt import dtype
from basalt.nn import (
Tensor,
TensorShape,
Model,
Softmax,
LogSoftmax,
ReLU,
Sigmoid,
Tanh,
)
from basalt.autograd import Graph, Symbol
from basalt.utils.tensorutils import fill
from tests import assert_tensors_equal
alias Activation = fn (inout g: Graph, input: Symbol) -> Symbol
alias AxisActivation = fn (inout g: Graph, input: Symbol, axis: Int) -> Symbol
fn create_graph[
shape: TensorShape,
func: AxisActivation,
axis: Int,
]() -> Graph:
var g = Graph()
var x = g.input(shape)
var activation = func(g, x, axis)
g.out(activation)
return g ^
fn create_graph[shape: TensorShape, func: Activation]() -> Graph:
var g = Graph()
var x = g.input(shape)
var activation = func(g, x)
g.out(activation)
return g ^
fn test_graph[
shape: TensorShape,
func: AxisActivation,
nodes: Int,
axis: Int,
](input: Tensor[dtype], expected: Tensor[dtype]) raises:
alias graph = create_graph[shape, func, axis]()
var model = Model[graph](inference_only=True)
var res = model.inference(input)[0]
assert_tensors_equal["almost"](res, expected)
assert_equal(len(graph.nodes), nodes)
fn test_graph[
shape: TensorShape,
func: Activation,
nodes: Int,
](input: Tensor[dtype], expected: Tensor[dtype]) raises:
alias graph = create_graph[shape, func]()
var model = Model[graph](inference_only=True)
var res = model.inference(input)[0]
assert_tensors_equal["almost", "Tensor equality failed"](res, expected)
assert_equal(len(graph.nodes), nodes, "Node count failed")
fn test_SOFTMAX() raises:
alias shape = TensorShape(2, 3, 2)
alias nodes = 5
var input = Tensor[dtype](shape)
fill(input, 4)
var expected = Tensor[dtype](shape)
fill(expected, 0.5)
test_graph[shape, Softmax, nodes, 0](input, expected)
fill(expected, 1.0 / 3.0)
test_graph[shape, Softmax, nodes, 1](input, expected)
fill(expected, 0.5)
test_graph[shape, Softmax, nodes, 2](input, expected)
fn test_LOGSOFTMAX() raises:
alias shape = TensorShape(2, 3, 2)
alias nodes = 6
var input = Tensor[dtype](shape)
fill(input, 4)
var expected = Tensor[dtype](shape)
fill(expected, -0.69314718)
test_graph[shape, LogSoftmax, nodes, 0](input, expected)
fill(expected, -1.09861231)
test_graph[shape, LogSoftmax, nodes, 1](input, expected)
fill(expected, -0.69314718)
test_graph[shape, LogSoftmax, nodes, 2](input, expected)
fn test_RELU() raises:
alias shape = TensorShape(2, 3)
alias nodes = 1
var input = Tensor[dtype](shape)
for i in range(6):
input[i] = 3 if i < 3 else -3
var expected = Tensor[dtype](shape)
for i in range(6):
expected[i] = 3 if i < 3 else 0
test_graph[shape, ReLU, nodes](input, expected)
fn test_SIGMOID() raises:
alias shape = TensorShape(2, 3)
alias nodes = 1
var input = Tensor[dtype](shape)
fill(input, 0)
var expected = Tensor[dtype](shape)
fill(expected, 0.5)
test_graph[shape, Sigmoid, nodes](input, expected)
fn test_TANH() raises:
alias shape = TensorShape(2, 3)
alias nodes = 1
var input = Tensor[dtype](shape)
fill(input, 0)
var expected = Tensor[dtype](shape)
fill(expected, 0.0)
test_graph[shape, Tanh, nodes](input, expected)
fn main():
try:
test_SOFTMAX()
test_LOGSOFTMAX()
test_RELU()
test_SIGMOID()
test_TANH()
except e:
print("[ERROR] Error in activations")
print(e)
| basalt/tests/mojo/test_activations.mojo | false |
from testing import assert_equal, assert_true
from basalt.nn import TensorShape
from basalt.autograd.attributes import Attribute
fn test_attribute_key() raises:
alias a = Attribute(name="test", value=-1)
assert_true(str(a.name) == "test")
fn test_attribute_int() raises:
alias value: Int = 1
alias a = Attribute(name="test", value=value)
assert_true(a.to_int() == 1)
fn test_attribute_string() raises:
alias value: String = "hello"
alias a = Attribute(name="test", value=value)
assert_true(a.to_string() == value)
fn test_attribute_tensor_shape() raises:
alias value: TensorShape = TensorShape(1, 2, 3)
alias a = Attribute(name="test", value=value)
assert_true(a.to_shape() == value)
fn test_attribute_static_int_tuple() raises:
alias value: StaticIntTuple[7] = StaticIntTuple[7](1, 2, 3, 4, 5, 6, 7)
alias a = Attribute(name="test", value=value)
assert_true(a.to_static[7]() == value)
fn test_attribute_scalar() raises:
fn test_float32() raises:
alias value_a: Float32 = 1.23456
alias a1 = Attribute(name="test", value=value_a)
assert_true(
a1.to_scalar[DType.float32]() == value_a,
"Float32 scalar attribute failed",
)
alias value_b: Float32 = 65151
alias a2 = Attribute(name="test", value=value_b)
assert_true(
a2.to_scalar[DType.float32]() == value_b,
"Float32 scalar attribute failed",
)
fn test_float_literal() raises:
alias value_c: FloatLiteral = -1.1
alias a3 = Attribute(name="test", value=value_c)
assert_true(
a3.to_scalar[DType.float32]() == value_c,
"FloatLiteral scalar attribute failed",
)
fn test_float64() raises:
alias value_a: Float64 = -1.23456
alias a1 = Attribute(name="test", value=value_a)
assert_true(
a1.to_scalar[DType.float64]() == value_a,
"Float64 scalar attribute failed",
)
alias value_b: Float64 = 123456
alias a2 = Attribute(name="test", value=value_b)
assert_true(
a2.to_scalar[DType.float64]() == value_b,
"Float64 scalar attribute failed",
)
fn test_int32() raises:
alias value_a: Int32 = 666
alias a1 = Attribute(name="test", value=value_a)
assert_true(
a1.to_scalar[DType.int32]() == value_a,
"Int32 scalar attribute failed",
)
alias value_b: Int32 = -666
alias a2 = Attribute(name="test", value=value_b)
assert_true(
a2.to_scalar[DType.int32]() == value_b,
"Int32 scalar attribute failed",
)
fn test_attribute_small_scalar() raises:
alias value_a: Float32 = 1e-18
alias a = Attribute(name="test", value=value_a)
assert_true(
a.to_scalar[DType.float32]() == value_a,
"SMALL scalar attribute failed",
)
fn test_attribute_big_scalar() raises:
alias value_a: Float32 = 1e40
alias a = Attribute(name="test", value=value_a)
assert_true(
a.to_scalar[DType.float32]() == value_a,
"BIG scalar attribute failed",
)
test_float32()
test_float_literal()
test_float64()
test_int32()
test_attribute_small_scalar()
test_attribute_big_scalar()
fn main():
try:
test_attribute_key()
test_attribute_int()
test_attribute_string()
test_attribute_tensor_shape()
test_attribute_static_int_tuple()
test_attribute_scalar()
except e:
print("[ERROR] Error in attributes")
print(e)
| basalt/tests/mojo/test_attributes.mojo | false |
from math import log, exp
from testing import assert_equal
from basalt import dtype, nelts
from basalt.autograd.attributes import AttributeVector, Attribute
from basalt.autograd import OP
from basalt.nn import Tensor, TensorShape
from basalt.utils.tensorutils import fill, tsum
from tests import (
test_unary_op_backward,
test_binary_op_backward,
test_ternary_op_backward,
)
fn test_ADD() raises:
alias t1_shape = TensorShape(2, 3)
alias t2_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 3)
var t1 = Tensor[dtype](t1_shape)
var t2 = Tensor[dtype](t2_shape)
var ug = Tensor[dtype](ug_shape)
fill(t1, 1.0)
fill(t2, 2.0)
fill(ug, 1.0)
var expected_grad = Tensor[dtype](ug_shape)
fill(expected_grad, 1.0)
test_binary_op_backward[OP.ADD, t1_shape, t2_shape, ug_shape](
t1, t2, ug, expected_grad, expected_grad
)
fn test_SUB() raises:
alias t1_shape = TensorShape(2, 3)
alias t2_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 3)
var t1 = Tensor[dtype](t1_shape)
var t2 = Tensor[dtype](t2_shape)
var ug = Tensor[dtype](ug_shape)
fill(t1, 2.0)
fill(t2, 1.0)
fill(ug, 1.0)
var expected_grad1 = Tensor[dtype](t1_shape)
var expected_grad2 = Tensor[dtype](t2_shape)
fill(expected_grad1, 1.0)
fill(expected_grad2, -1.0)
test_binary_op_backward[OP.SUB, t1_shape, t2_shape, ug_shape](
t1, t2, ug, expected_grad1, expected_grad2
)
fn test_MUL() raises:
alias t1_shape = TensorShape(2, 3)
alias t2_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 1.0)
fill(t2, 2.0)
fill(ug, 1.0)
var expected_grad1 = Tensor[dtype](t1_shape)
var expected_grad2 = Tensor[dtype](t2_shape)
fill(expected_grad1, 2.0)
fill(expected_grad2, 1.0)
test_binary_op_backward[OP.MUL, t1_shape, t2_shape, ug_shape](
t1, t2, ug, expected_grad1, expected_grad2
)
fn test_DIV() raises:
alias t1_shape = TensorShape(2, 3)
alias t2_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 1.0)
fill(t2, 2.0)
fill(ug, 1.0)
var expected_grad1 = Tensor[dtype](t1_shape)
var expected_grad2 = Tensor[dtype](t2_shape)
fill(expected_grad1, 1.0 / 2.0)
fill[dtype](expected_grad2, -1.0 / (2.0**2))
test_binary_op_backward[OP.DIV, t1_shape, t2_shape, ug_shape](
t1, t2, ug, expected_grad1, expected_grad2
)
fn test_DOT() raises:
alias t1_shape = TensorShape(2, 3)
alias t2_shape = TensorShape(3, 2)
alias ug_shape = TensorShape(2, 2)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 1.0)
fill(t2, 2.0)
fill(ug, 1.0)
var expected_grad1 = Tensor[dtype](t1_shape)
var expected_grad2 = Tensor[dtype](t2_shape)
fill(expected_grad1, 4.0)
fill(expected_grad2, 2.0)
test_binary_op_backward[OP.DOT, t1_shape, t2_shape, ug_shape](
t1, t2, ug, expected_grad1, expected_grad2
)
fn test_EXP() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 2.0)
fill(ug, 5.0)
var expected_grad1 = Tensor[dtype](t1_shape)
fill(expected_grad1, 5.0 * exp[dtype, 1](2.0))
test_unary_op_backward[OP.EXP, t1_shape, ug_shape](t1, ug, expected_grad1)
fn test_LOG() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 2.0)
fill(ug, 5.0)
var expected_grad1 = Tensor[dtype](t1_shape)
fill(expected_grad1, 5.0 / 2.0)
test_unary_op_backward[OP.LOG, t1_shape, ug_shape](t1, ug, expected_grad1)
fn test_POW() raises:
alias t1_shape = TensorShape(2, 3)
alias t2_shape = TensorShape(1)
alias ug_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 2.0)
t2[0] = 2
fill(ug, 1.0)
var expected_grad1 = Tensor[dtype](t1_shape)
var expected_grad2 = Tensor[dtype](t2_shape)
fill(expected_grad1, 4.0)
var temp = Tensor[dtype](2, 3)
fill(temp, (2**2) * log[dtype, 1](2))
expected_grad2[0] = tsum(temp)
test_binary_op_backward[OP.POW, t1_shape, t2_shape, ug_shape](t1, t2, ug, expected_grad1, expected_grad2)
fill(t1, 0.0)
fill(t2, 0)
fill(ug, 1.0)
fill(expected_grad1, 0.0)
fill(expected_grad2, 0.0)
test_binary_op_backward[OP.POW, t1_shape, t2_shape, ug_shape](t1, t2, ug, expected_grad1, expected_grad2)
fn test_SUM() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(1)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 1.0)
fill(ug, 9.0)
var expected_grad1 = Tensor[dtype](t1_shape)
fill(expected_grad1, 9.0)
test_unary_op_backward[OP.SUM, t1_shape, ug_shape](t1, ug, expected_grad1)
fn test_SUM_0() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(1, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 1.0)
ug[0] = 0.0
ug[1] = 1.0
ug[2] = 2.0
alias attributes = AttributeVector(Attribute("axis", 0))
var expected_grad1 = Tensor[dtype](t1_shape)
for i in range(expected_grad1.num_elements()):
expected_grad1[i] = i % 3
test_unary_op_backward[OP.SUM, t1_shape, ug_shape, attributes](
t1, ug, expected_grad1
)
fn test_SUM_1() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 1)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 1.0)
ug[0] = 0.0
ug[1] = 1.0
alias attributes = AttributeVector(Attribute("axis", 1))
var expected_grad1 = Tensor[dtype](t1_shape)
for i in range(expected_grad1.num_elements()):
expected_grad1[i] = 0 if i < 3 else 1
test_unary_op_backward[OP.SUM, t1_shape, ug_shape, attributes](
t1, ug, expected_grad1
)
fn test_MAX() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(1)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 1.0)
t1[0] = 2.0
t1[1] = 2.0
fill(ug, 9.0)
var expected_grad = Tensor[dtype](t1_shape)
expected_grad[0] = 4.5
expected_grad[1] = 4.5
test_unary_op_backward[OP.MAX, t1_shape, ug_shape](t1, ug, expected_grad)
fn test_MAX_0() raises:
alias t1_shape = TensorShape(2, 3, 2)
alias ug_shape = TensorShape(1, 3, 2)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
for i in range(t1.num_elements()):
t1[i] = i + 1
t1[0] = 7.0
fill(ug, 2.0)
alias attributes = AttributeVector(Attribute("axis", 0))
var expected_grad = Tensor[dtype](t1_shape)
expected_grad[0] = 1.0
expected_grad[6] = 1.0
expected_grad[7] = 2.0
expected_grad[8] = 2.0
expected_grad[9] = 2.0
expected_grad[10] = 2.0
expected_grad[11] = 2.0
test_unary_op_backward[OP.MAX, t1_shape, ug_shape, attributes](
t1, ug, expected_grad
)
fn test_MAX_1() raises:
alias t1_shape = TensorShape(2, 3, 2)
alias ug_shape = TensorShape(2, 1, 2)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
for i in range(t1.num_elements()):
t1[i] = i + 1
t1[0] = 5.0
fill(ug, 2.0)
alias attributes = AttributeVector(Attribute("axis", 1))
var expected_grad = Tensor[dtype](t1_shape)
expected_grad[0] = 1.0
expected_grad[4] = 1.0
expected_grad[5] = 2.0
expected_grad[10] = 2.0
expected_grad[11] = 2.0
test_unary_op_backward[OP.MAX, t1_shape, ug_shape, attributes](
t1, ug, expected_grad
)
fn test_MAX_2() raises:
alias t1_shape = TensorShape(2, 3, 2)
alias ug_shape = TensorShape(2, 3, 1)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
for i in range(t1.num_elements()):
t1[i] = i + 1
t1[0] = 2.0
fill(ug, 2.0)
alias attributes = AttributeVector(Attribute("axis", 2))
var expected_grad = Tensor[dtype](t1_shape)
expected_grad[0] = 1.0
expected_grad[1] = 1.0
expected_grad[3] = 2.0
expected_grad[5] = 2.0
expected_grad[7] = 2.0
expected_grad[9] = 2.0
expected_grad[11] = 2.0
test_unary_op_backward[OP.MAX, t1_shape, ug_shape, attributes](
t1, ug, expected_grad
)
fn test_MEAN() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(1)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 1.0)
fill(ug, 9.0)
var expected_grad = Tensor[dtype](t1_shape)
fill(expected_grad, 9.0 / 6.0)
test_unary_op_backward[OP.MEAN, t1_shape, ug_shape](t1, ug, expected_grad)
fn test_MEAN_0() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(1, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 1.0)
fill(ug, 3.0)
alias attributes = AttributeVector(Attribute("axis", 0))
var expected_grad = Tensor[dtype](t1_shape)
for i in range(expected_grad.num_elements()):
expected_grad[i] = 1.0 / t1_shape[0] * 3.0
test_unary_op_backward[OP.MEAN, t1_shape, ug_shape, attributes](
t1, ug, expected_grad
)
fn test_MEAN_1() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 1)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(t1, 1.0)
fill(ug, 3.0)
alias attributes = AttributeVector(Attribute("axis", 1))
var expected_grad = Tensor[dtype](t1_shape)
for i in range(expected_grad.num_elements()):
expected_grad[i] = 1.0 / t1_shape[1] * 3.0
test_unary_op_backward[OP.MEAN, t1_shape, ug_shape, attributes](
t1, ug, expected_grad
)
fn test_TRANSPOSE() raises:
alias t1_shape = TensorShape(2, 3, 4)
alias ug_shape = TensorShape(4, 3, 2)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fn arange(inout t: Tensor[dtype]):
var n = t.num_elements()
for i in range(n):
t[i] = i + 1
arange(t1)
arange(ug)
# No attributes is reversion the order
var expected_grad = Tensor[dtype](t1_shape)
var t1_strides = t1_shape.strides()
for i in range(ug_shape[0]):
for j in range(ug_shape[1]):
for k in range(ug_shape[2]):
expected_grad[k * t1_strides[0] + j * t1_strides[1] + i] = ug[
i * ug_shape[1] * ug_shape[2] + j * ug_shape[2] + k
]
test_unary_op_backward[OP.TRANSPOSE, t1_shape, ug_shape](t1, ug, expected_grad)
# Test Transpose 1, 2, 0
alias ug_shape_2 = TensorShape(3, 4, 2)
ug = Tensor[dtype](ug_shape_2)
arange(ug)
alias attributes_2 = AttributeVector(Attribute("axes", TensorShape(1, 2, 0)))
expected_grad = Tensor[dtype](t1_shape)
for i in range(ug_shape_2[0]):
for j in range(ug_shape_2[1]):
for k in range(ug_shape_2[2]):
expected_grad[k * t1_strides[0] + i * t1_strides[1] + j] = ug[
i * ug_shape_2[1] * ug_shape_2[2] + j * ug_shape_2[2] + k
]
test_unary_op_backward[OP.TRANSPOSE, t1_shape, ug_shape_2, attributes_2](
t1, ug, expected_grad
)
fn test_FLATTEN() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(t1_shape.num_elements())
var t1 = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(ug, 1.0)
assert_equal(ug.dim(0), 6)
var expected_grad1 = Tensor[dtype](t1_shape)
fill(expected_grad1, 1.0)
test_unary_op_backward[OP.FLATTEN, t1_shape, ug_shape](t1, ug, expected_grad1)
fn test_RESHAPE() raises:
alias t1_shape = TensorShape(2, 2, 5)
alias ug_shape = TensorShape(2, 10)
var t1 = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
var expected_grad = Tensor[dtype](t1_shape)
for i in range(20):
ug[i] = i + 1
expected_grad[i] = i + 1
test_unary_op_backward[OP.RESHAPE, t1_shape, ug_shape](t1, ug, expected_grad)
fn main():
try:
test_ADD()
test_SUB()
test_MUL()
test_DIV()
test_DOT()
test_EXP()
test_LOG()
test_POW()
test_SUM()
test_SUM_0()
test_SUM_1()
test_MAX()
test_MAX_0()
test_MAX_1()
test_MAX_2()
test_MEAN()
test_MEAN_0()
test_MEAN_1()
test_TRANSPOSE()
test_FLATTEN()
test_RESHAPE()
except e:
print(e)
print("[ERROR] Error in backward pass.")
| basalt/tests/mojo/test_backward.mojo | false |
from testing import assert_equal
from basalt import dtype
from basalt.nn import Tensor, TensorShape
from basalt.autograd import Symbol
from basalt.utils.collection import Collection
from basalt.utils.tensorutils import fill
from tests import assert_tensors_equal
fn test_append_tensors() raises:
alias t1_shape = TensorShape(1, 10)
alias t2_shape = TensorShape(2, 20)
var s1 = Symbol(0, dtype, t1_shape, True)
var s2 = Symbol(1, dtype, t2_shape, True)
var c = Collection(capacity=2)
assert_equal(c.capacity, 2)
assert_equal(c.size, 0)
c.append(Tensor[dtype](s1.shape), s1)
assert_equal(c.size, 1)
c.append(Tensor[dtype](s2.shape), s2)
assert_equal(c.size, 2)
fn test_get_tensor_reference() raises:
alias t1_shape = TensorShape(1, 10)
alias t2_shape = TensorShape(2, 20)
var s1 = Symbol(0, dtype, t1_shape, True)
var s2 = Symbol(1, dtype, t2_shape, True)
var t1 = Tensor[dtype](s1.shape)
var t2 = Tensor[dtype](s2.shape)
fill(t1, 1)
fill(t2, 2)
var c = Collection(capacity=2)
c.append(t1 ^, s1)
c.append(t2 ^, s2)
var t1_expected = Tensor[dtype](s1.shape)
var t2_expected = Tensor[dtype](s2.shape)
fill(t1_expected, 1)
fill(t2_expected, 2)
assert_tensors_equal(c[s1], t1_expected)
assert_tensors_equal(c[s2], t2_expected)
fn test_resize_collection() raises:
alias t1_shape = TensorShape(1, 10)
alias t2_shape = TensorShape(2, 20)
alias t3_shape = TensorShape(3, 30)
var s1 = Symbol(0, dtype, t1_shape, True)
var s2 = Symbol(1, dtype, t2_shape, True)
var s3 = Symbol(2, dtype, t3_shape, True)
var t1 = Tensor[dtype](s1.shape)
var t2 = Tensor[dtype](s2.shape)
var t3 = Tensor[dtype](s3.shape)
fill(t1, 1)
fill(t2, 2)
fill(t3, 3)
var c = Collection(capacity=1)
assert_equal(c.size, 0)
assert_equal(c.capacity, 1)
c.append(t1 ^, s1)
assert_equal(c.size, 1)
assert_equal(c.capacity, 1)
c.append(t2 ^, s2)
assert_equal(c.size, 2)
assert_equal(c.capacity, 2)
c.append(t3 ^, s3)
assert_equal(c.size, 3)
assert_equal(c.capacity, 4)
var t1_expected = Tensor[dtype](s1.shape)
var t2_expected = Tensor[dtype](s2.shape)
var t3_expected = Tensor[dtype](s3.shape)
fill(t1_expected, 1)
fill(t2_expected, 2)
fill(t3_expected, 3)
assert_tensors_equal(c[s1], t1_expected)
assert_tensors_equal(c[s2], t2_expected)
assert_tensors_equal(c[s3], t3_expected)
fn test_set_zero() raises:
alias t1_shape = TensorShape(1, 10)
alias t2_shape = TensorShape(2, 20)
var s1 = Symbol(0, dtype, t1_shape, True)
var s2 = Symbol(1, dtype, t2_shape, True)
var t1 = Tensor[dtype](s1.shape)
var t2 = Tensor[dtype](s2.shape)
fill(t1, 1)
fill(t2, 2)
var c = Collection(capacity=2)
c.append(t1 ^, s1)
c.append(t2 ^, s2)
var t1_expected = Tensor[dtype](s1.shape)
var t2_expected = Tensor[dtype](s2.shape)
fill(t1_expected, 1)
fill(t2_expected, 2)
assert_tensors_equal(c[s1], t1_expected)
assert_tensors_equal(c[s2], t2_expected)
c.set_zero()
assert_tensors_equal(c[s1], Tensor[dtype](t1_shape))
assert_tensors_equal(c[s2], Tensor[dtype](t2_shape))
fn test_operate_on_reference() raises:
alias res_shape = TensorShape(1, 10)
alias t1_shape = TensorShape(1, 10)
var sr = Symbol(0, dtype, t1_shape, True)
var s1 = Symbol(1, dtype, t1_shape, True)
var res = Tensor[dtype](res_shape)
var t1 = Tensor[dtype](s1.shape)
var c = Collection(capacity=2)
c.append(res ^, sr)
c.append(t1 ^, s1)
fn some_operation[
res_shape: TensorShape, t_shape: TensorShape
](inout res: Tensor[dtype], t1: Tensor[dtype]):
for i in range(res.num_elements()):
res[i] = t1[i]
for i in range(1, 10):
some_operation[res_shape, t1_shape](c[sr], c[s1])
fill(c[s1], i)
var res_expected = Tensor[dtype](res_shape)
var t1_expected = Tensor[dtype](t1_shape)
fill(res_expected, i - 1)
fill(t1_expected, i)
assert_tensors_equal(c[sr], res_expected)
assert_tensors_equal(c[s1], t1_expected)
fn main() raises:
try:
test_append_tensors()
test_get_tensor_reference()
test_resize_collection()
test_set_zero()
test_operate_on_reference()
except e:
print(e)
raise e
| basalt/tests/mojo/test_collection.mojo | false |
<filename>basalt/tests/mojo/test_dynamic_ops.mojo
from basalt import dtype, nelts
from basalt.autograd import Graph, Symbol, OP
from basalt.autograd.ops.dynamics import CONCAT, SPLIT
from basalt.nn import Model, Tensor, TensorShape
from basalt.utils.tensorutils import fill
from tests import assert_tensors_equal, create_graph_concat, create_graph_split
fn test_CONCAT_0() raises:
# default: dim = 0
# FORWARD
alias t1_shape = TensorShape(1, 2, 3)
alias t2_shape = TensorShape(1, 2, 3)
alias t3_shape = TensorShape(2, 2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
var t3: Tensor[dtype] = Tensor[dtype](t3_shape)
fill(t1, 5.0)
fill(t2, 10.0)
fill(t3, 15.0)
var expected = Tensor[dtype](4, 2, 3)
for i in range(4):
for j in range(2):
for k in range(3):
if i < 1: # i because dim = 0
expected[i * 2 * 3 + j * 3 + k] = 5.0
elif i >= 1 and i < 2:
expected[i * 2 * 3 + j * 3 + k] = 10.0
else:
expected[i * 2 * 3 + j * 3 + k] = 15.0
alias graph = create_graph_concat(t1_shape, t2_shape, t3_shape, dim=0)
var model = Model[graph]()
var res = model.forward(t1, t2, t3)
assert_tensors_equal["almost"](res, expected)
# BACKWARD
var ug = Tensor[dtype](4, 2, 3)
for i in range(4):
for j in range(2):
for k in range(3):
if i < 1: # i because dim = 0
ug[i * 2 * 3 + j * 3 + k] = 1.0
elif i >= 1 and i < 2:
ug[i * 2 * 3 + j * 3 + k] = 2.0
else:
ug[i * 2 * 3 + j * 3 + k] = 3.0
model.backward(ug)
var grad1_expected = Tensor[dtype](t1_shape)
var grad2_expected = Tensor[dtype](t2_shape)
var grad3_expected = Tensor[dtype](t3_shape)
fill(grad1_expected, 1.0)
fill(grad2_expected, 2.0)
fill(grad3_expected, 3.0)
# Extracting the gradients
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[0]], grad1_expected
)
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[1]], grad2_expected
)
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[2]], grad3_expected
)
fn test_CONCAT_1() raises:
# dim = 1
alias t1_shape = TensorShape(2, 2, 5)
alias t2_shape = TensorShape(2, 4, 5)
alias t3_shape = TensorShape(2, 1, 5)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
var t3: Tensor[dtype] = Tensor[dtype](t3_shape)
fill(t1, 5.0)
fill(t2, 10.0)
fill(t3, 15.0)
var expected = Tensor[dtype](2, 7, 5)
for i in range(2):
for j in range(7):
for k in range(5):
if j < 2: # j because dim = 1
expected[i * 7 * 5 + j * 5 + k] = 5.0
elif j >= 2 and j < 6:
expected[i * 7 * 5 + j * 5 + k] = 10.0
else:
expected[i * 7 * 5 + j * 5 + k] = 15.0
alias graph = create_graph_concat(t1_shape, t2_shape, t3_shape, dim=1)
var model = Model[graph]()
var res = model.forward(t1, t2, t3)
assert_tensors_equal["almost"](res, expected)
# BACKWARD
var ug = Tensor[dtype](2, 7, 5)
for i in range(2):
for j in range(7):
for k in range(5):
if j < 2: # j because dim = 1
ug[i * 7 * 5 + j * 5 + k] = 1.0
elif j >= 2 and j < 6:
ug[i * 7 * 5 + j * 5 + k] = 2.0
else:
ug[i * 7 * 5 + j * 5 + k] = 3.0
model.backward(ug)
var grad1_expected = Tensor[dtype](t1_shape)
var grad2_expected = Tensor[dtype](t2_shape)
var grad3_expected = Tensor[dtype](t3_shape)
fill(grad1_expected, 1.0)
fill(grad2_expected, 2.0)
fill(grad3_expected, 3.0)
# Extracting the gradients
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[0]], grad1_expected
)
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[1]], grad2_expected
)
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[2]], grad3_expected
)
fn test_CONCAT_2() raises:
# dim = 2
alias t1_shape = TensorShape(2, 3, 1)
alias t2_shape = TensorShape(2, 3, 2)
alias t3_shape = TensorShape(2, 3, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
var t3: Tensor[dtype] = Tensor[dtype](t3_shape)
fill(t1, 5.0)
fill(t2, 10.0)
fill(t3, 15.0)
var expected = Tensor[dtype](2, 3, 6)
for i in range(2):
for j in range(3):
for k in range(6):
if k < 1: # k because dim = 2
expected[i * 3 * 6 + j * 6 + k] = 5.0
elif k >= 1 and k < 3:
expected[i * 3 * 6 + j * 6 + k] = 10.0
else:
expected[i * 3 * 6 + j * 6 + k] = 15.0
alias graph = create_graph_concat(t1_shape, t2_shape, t3_shape, dim=2)
var model = Model[graph]()
var res = model.forward(t1, t2, t3)
assert_tensors_equal["almost"](res, expected)
# BACKWARD
var ug = Tensor[dtype](2, 3, 6)
for i in range(2):
for j in range(3):
for k in range(6):
if k < 1: # k because dim = 2
ug[i * 3 * 6 + j * 6 + k] = 1.0
elif k >= 1 and k < 3:
ug[i * 3 * 6 + j * 6 + k] = 2.0
else:
ug[i * 3 * 6 + j * 6 + k] = 3.0
model.backward(ug)
var grad1_expected = Tensor[dtype](t1_shape)
var grad2_expected = Tensor[dtype](t2_shape)
var grad3_expected = Tensor[dtype](t3_shape)
fill(grad1_expected, 1.0)
fill(grad2_expected, 2.0)
fill(grad3_expected, 3.0)
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[0]], grad1_expected
)
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[1]], grad2_expected
)
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[2]], grad3_expected
)
fn test_SPLIT_0() raises:
alias t_shape = TensorShape(4, 5, 6)
alias sections = List[Int](1, 2, 1)
var t: Tensor[dtype] = Tensor[dtype](t_shape)
for i in range(4):
for j in range(5):
for k in range(6):
if i < 1:
t[i * 5 * 6 + j * 6 + k] = 5.0
elif i >= 1 and i < 3:
t[i * 5 * 6 + j * 6 + k] = 10.0
else:
t[i * 5 * 6 + j * 6 + k] = 15.0
var expected1 = Tensor[dtype](1, 5, 6)
var expected2 = Tensor[dtype](2, 5, 6)
var expected3 = Tensor[dtype](1, 5, 6)
fill(expected1, 5.0)
fill(expected2, 10.0)
fill(expected3, 15.0)
alias graph = create_graph_split(t_shape, sections, dim=0)
var model = Model[graph]()
var results = model.inference(t)
assert_tensors_equal["almost"](results[0], expected1)
assert_tensors_equal["almost"](results[1], expected2)
assert_tensors_equal["almost"](results[2], expected3)
# BACKWARD
var ug1 = Tensor[dtype](1, 5, 6)
var ug2 = Tensor[dtype](2, 5, 6)
var ug3 = Tensor[dtype](1, 5, 6)
fill(ug1, 1.0)
fill(ug2, 2.0)
fill(ug3, 3.0)
model.backward(ug1, ug2, ug3)
var grad_expected = Tensor[dtype](t_shape)
for i in range(4):
for j in range(5):
for k in range(6):
if i < 1:
grad_expected[i * 5 * 6 + j * 6 + k] = 1.0
elif i >= 1 and i < 3:
grad_expected[i * 5 * 6 + j * 6 + k] = 2.0
else:
grad_expected[i * 5 * 6 + j * 6 + k] = 3.0
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[0]], grad_expected
)
fn test_SPLIT_1() raises:
alias t_shape = TensorShape(4, 5, 6)
alias sections = List[Int](1, 3, 1)
var t: Tensor[dtype] = Tensor[dtype](t_shape)
for i in range(4):
for j in range(5):
for k in range(6):
if j < 1:
t[i * 5 * 6 + j * 6 + k] = 5.0
elif j >= 1 and j < 4:
t[i * 5 * 6 + j * 6 + k] = 10.0
else:
t[i * 5 * 6 + j * 6 + k] = 15.0
var expected1 = Tensor[dtype](4, 1, 6)
var expected2 = Tensor[dtype](4, 3, 6)
var expected3 = Tensor[dtype](4, 1, 6)
fill(expected1, 5.0)
fill(expected2, 10.0)
fill(expected3, 15.0)
alias graph = create_graph_split(t_shape, sections, dim=1)
var model = Model[graph]()
var results = model.inference(t)
assert_tensors_equal["almost"](results[0], expected1)
assert_tensors_equal["almost"](results[1], expected2)
assert_tensors_equal["almost"](results[2], expected3)
# BACKWARD
var ug1 = Tensor[dtype](4, 1, 6)
var ug2 = Tensor[dtype](4, 3, 6)
var ug3 = Tensor[dtype](4, 1, 6)
fill(ug1, 1.0)
fill(ug2, 2.0)
fill(ug3, 3.0)
model.backward(ug1, ug2, ug3)
var grad_expected = Tensor[dtype](t_shape)
for i in range(4):
for j in range(5):
for k in range(6):
if j < 1:
grad_expected[i * 5 * 6 + j * 6 + k] = 1.0
elif j >= 1 and j < 4:
grad_expected[i * 5 * 6 + j * 6 + k] = 2.0
else:
grad_expected[i * 5 * 6 + j * 6 + k] = 3.0
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[0]], grad_expected
)
fn test_SPLIT_2() raises:
alias t_shape = TensorShape(4, 5, 6)
alias sections = List[Int](1, 4, 1)
var t: Tensor[dtype] = Tensor[dtype](t_shape)
for i in range(4):
for j in range(5):
for k in range(6):
if k < 1:
t[i * 5 * 6 + j * 6 + k] = 5.0
elif k >= 1 and k < 5:
t[i * 5 * 6 + j * 6 + k] = 10.0
else:
t[i * 5 * 6 + j * 6 + k] = 15.0
var expected1 = Tensor[dtype](4, 5, 1)
var expected2 = Tensor[dtype](4, 5, 4)
var expected3 = Tensor[dtype](4, 5, 1)
fill(expected1, 5.0)
fill(expected2, 10.0)
fill(expected3, 15.0)
alias graph = create_graph_split(t_shape, sections, dim=2)
var model = Model[graph]()
var results = model.inference(t)
assert_tensors_equal["almost"](results[0], expected1)
assert_tensors_equal["almost"](results[1], expected2)
assert_tensors_equal["almost"](results[2], expected3)
# BACKWARD
var ug1 = Tensor[dtype](4, 5, 1)
var ug2 = Tensor[dtype](4, 5, 4)
var ug3 = Tensor[dtype](4, 5, 1)
fill(ug1, 1.0)
fill(ug2, 2.0)
fill(ug3, 3.0)
model.backward(ug1, ug2, ug3)
var grad_expected = Tensor[dtype](t_shape)
for i in range(4):
for j in range(5):
for k in range(6):
if k < 1:
grad_expected[i * 5 * 6 + j * 6 + k] = 1.0
elif k >= 1 and k < 5:
grad_expected[i * 5 * 6 + j * 6 + k] = 2.0
else:
grad_expected[i * 5 * 6 + j * 6 + k] = 3.0
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[0]], grad_expected
)
fn main():
try:
test_CONCAT_0()
test_CONCAT_1()
test_CONCAT_2()
test_SPLIT_0()
test_SPLIT_1()
test_SPLIT_2()
except e:
print("[ERROR] Error in dynamic ops")
print(e)
return
| basalt/tests/mojo/test_dynamic_ops.mojo | false |
<filename>basalt/tests/mojo/test_loss.mojo
from testing import assert_equal, assert_almost_equal
from basalt import dtype, nelts
from basalt.autograd import Graph, Symbol, OP
from basalt.nn import Model, Tensor, TensorShape, MSELoss, CrossEntropyLoss
from basalt.utils.tensorutils import fill
fn test_MSE_perfect() raises:
alias y_pred_shape = TensorShape(2, 10) # batch of 2, 10 classes
alias y_true_shape = TensorShape(2, 10)
fn create_graph() -> Graph:
var g = Graph()
var y_pred = g.input(y_pred_shape)
var y_true = g.input(y_true_shape)
var loss = MSELoss(g, y_pred, y_true)
g.out(loss)
return g ^
alias graph = create_graph()
assert_equal(len(graph.nodes), 3)
var y_pred = Tensor[dtype](y_pred_shape)
var y_true = Tensor[dtype](y_true_shape)
fill(y_pred, 1)
fill(y_true, 1)
var model = Model[graph](inference_only=True)
var loss = model.inference(y_pred, y_true)[0]
assert_equal(loss.dim(0), 1) # MSE summed over all elements
assert_equal(loss[0], 0) # loss is 0
fn test_MSE_imperfect() raises:
alias y_pred_shape = TensorShape(1, 10) # batch of 1, 10 classes
alias y_true_shape = TensorShape(1, 10)
fn create_graph() -> Graph:
var g = Graph()
var y_pred = g.input(y_pred_shape)
var y_true = g.input(y_true_shape)
var loss = MSELoss(g, y_pred, y_true)
g.out(loss)
return g ^
alias graph = create_graph()
assert_equal(len(graph.nodes), 3)
var y_pred = Tensor[dtype](y_pred_shape)
var y_true = Tensor[dtype](y_true_shape)
fill(y_pred, 1)
for i in range(10):
y_true[i] = i
var model = Model[graph](inference_only=True)
var loss = model.inference(y_pred, y_true)[0]
var expected_loss: Scalar[dtype] = 0.0
for i in range(10):
expected_loss += (y_pred[i] - y_true[i]) ** 2
expected_loss = expected_loss / y_true_shape[1]
assert_almost_equal(loss[0], expected_loss)
fn test_CrossEntropy_perfect() raises:
alias y_pred_shape = TensorShape(2, 3) # batch of 2, 3 classes
alias y_true_shape = TensorShape(2, 3)
fn create_graph() -> Graph:
var g = Graph()
var y_pred = g.input(y_pred_shape)
var y_true = g.input(y_true_shape)
var loss = CrossEntropyLoss(g, y_pred, y_true)
g.out(loss)
return g ^
alias graph = create_graph()
assert_equal(len(graph.nodes), 9)
var y_pred = Tensor[dtype](y_pred_shape)
var y_true = Tensor[dtype](y_true_shape)
y_pred[0 * y_pred.dim(1) + 0] = 0.1
y_pred[0 * y_pred.dim(1) + 1] = 0.2
y_pred[0 * y_pred.dim(1) + 2] = 0.7
y_true[0 * y_true.dim(1) + 0] = 0
y_true[0 * y_true.dim(1) + 1] = 0
y_true[0 * y_true.dim(1) + 2] = 1
y_pred[1 * y_pred.dim(1) + 0] = 0.7
y_pred[1 * y_pred.dim(1) + 1] = 0.2
y_pred[1 * y_pred.dim(1) + 2] = 0.1
y_true[1 * y_true.dim(1) + 0] = 1
y_true[1 * y_true.dim(1) + 1] = 0
y_true[1 * y_true.dim(1) + 2] = 0
var model = Model[graph](inference_only=True)
var loss = model.inference(y_pred, y_true)[0]
assert_equal(loss.shape(), TensorShape(1))
assert_almost_equal(loss[0], 0.76794958)
fn test_CrossEntropy_imperfect() raises:
alias y_pred_shape = TensorShape(2, 3) # batch of 2, 3 classes
alias y_true_shape = TensorShape(2, 3)
fn create_graph() -> Graph:
var g = Graph()
var y_pred = g.input(y_pred_shape)
var y_true = g.input(y_true_shape)
var loss = CrossEntropyLoss(g, y_pred, y_true)
g.out(loss)
return g ^
alias graph = create_graph()
var y_pred = Tensor[dtype](y_pred_shape)
var y_true = Tensor[dtype](y_true_shape)
y_pred[0 * y_pred.dim(1) + 0] = 0.1
y_pred[0 * y_pred.dim(1) + 1] = 0.2
y_pred[0 * y_pred.dim(1) + 2] = 0.7
y_true[0 * y_true.dim(1) + 0] = 0
y_true[0 * y_true.dim(1) + 1] = 1
y_true[0 * y_true.dim(1) + 2] = 0
y_pred[1 * y_pred.dim(1) + 0] = 0.7
y_pred[1 * y_pred.dim(1) + 1] = 0.2
y_pred[1 * y_pred.dim(1) + 2] = 0.1
y_true[1 * y_true.dim(1) + 0] = 0
y_true[1 * y_true.dim(1) + 1] = 0
y_true[1 * y_true.dim(1) + 2] = 1
var model = Model[graph](inference_only=True)
var loss = model.inference(y_pred, y_true)[0]
assert_equal(loss.shape(), TensorShape(1))
assert_almost_equal(loss[0], 1.31794953)
fn main():
try:
test_MSE_perfect()
test_MSE_imperfect()
test_CrossEntropy_perfect()
test_CrossEntropy_imperfect()
except e:
print("[ERROR] Error in loss")
print(e)
| basalt/tests/mojo/test_loss.mojo | false |
from basalt import dtype, nelts
from basalt.autograd import OP
from basalt.autograd.attributes import AttributeVector, Attribute
from basalt.autograd.ops.mlops import SIGMOID, RELU, TANH, CLIP, SQUEEZE, UNSQUEEZE
from basalt.nn import Tensor, TensorShape
from basalt.utils.tensorutils import fill
from tests import assert_tensors_equal, test_unary_op, test_unary_op_backward, to_numpy
fn test_SIGMOID() raises:
alias t1_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var expected = Tensor[dtype](2, 3)
fill(expected, 0.5)
test_unary_op[OP.SIGMOID, t1_shape](t1, expected)
fn test_backward_SIGMOID() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(ug, 5.0)
var expected_grad = Tensor[dtype](2, 3)
fill(
expected_grad, 5.0 * 0.25
) # 0.25 = d(sigmoid(0))/dx = sigmoid(0) * (1 - sigmoid(0))
test_unary_op_backward[OP.SIGMOID, t1_shape, ug_shape](t1, ug, expected_grad)
fn test_RELU() raises:
alias t1_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
# TODO: When tensors can do slices, this could be changed to two fill functions.
for i in range(3):
t1[i] = 3
for i in range(3, 6):
t1[i] = -3
var expected = Tensor[dtype](2, 3)
for i in range(3):
expected[i] = 3
for i in range(3, 6):
expected[i] = 0
test_unary_op[OP.RELU, t1_shape](t1, expected)
fn test_backward_RELU() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
for i in range(3):
t1[i] = 3
for i in range(3, 6):
t1[i] = -3
fill(ug, 5.0)
var expected_grad = Tensor[dtype](2, 3)
for i in range(3):
expected_grad[i] = 1 * 5.0 # 1 = d(relu(3))/dx
for i in range(3, 6):
expected_grad[i] = 0 * 5.0 # 0 = d(relu(-3))/dx
test_unary_op_backward[OP.RELU, t1_shape, ug_shape](t1, ug, expected_grad)
fn test_TANH() raises:
alias t1_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var expected = Tensor[dtype](2, 3)
fill(expected, 0.0)
test_unary_op[OP.TANH, t1_shape](t1, expected)
fn test_backward_TANH() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(ug, 5.0)
var expected_grad = Tensor[dtype](2, 3)
fill(expected_grad, 5.0 * 1.0) # 1.0 = d(tanh(0))/dx = 1 - tanh(0)^2
test_unary_op_backward[OP.TANH, t1_shape, ug_shape](t1, ug, expected_grad)
fn test_CLIP() raises:
alias t1_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
for i in range(6):
t1[i] = i - 3
# Clip without min and max
var expected_no = t1
test_unary_op[OP.CLIP, t1_shape](t1, expected_no)
# Clip with min
alias min_attr = Attribute("min", -1.1)
var expected_min = Tensor[dtype](2, 3)
for i in range(6):
var val = Scalar[dtype](i - 3)
expected_min[i] = val if (val > -1.1) else -1.1
test_unary_op[OP.CLIP, t1_shape, AttributeVector(min_attr)](t1, expected_min)
# Clip with max
alias max_attr = Attribute("max", 1.1)
var expected_max = Tensor[dtype](2, 3)
for i in range(6):
var val = Scalar[dtype](i - 3)
expected_max[i] = val if (val < 1.1) else 1.1
test_unary_op[OP.CLIP, t1_shape, AttributeVector(max_attr)](t1, expected_max)
# Clip with min and max
var expected = Tensor[dtype](2, 3)
for i in range(6):
var val = Scalar[dtype](i - 3)
if val < -1.1:
expected[i] = -1.1
elif val > 1.1:
expected[i] = 1.1
else:
expected[i] = val
test_unary_op[OP.CLIP, t1_shape, AttributeVector(min_attr, max_attr)](t1, expected)
fn test_backward_CLIP() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
for i in range(6):
t1[i] = i - 3
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(ug, 5.0)
# Clip without min and max
var expected_no = ug
test_unary_op_backward[OP.CLIP, t1_shape, ug_shape](t1, ug, expected_no)
# Clip with min
alias min_attr = AttributeVector(Attribute("min", -1.1))
var expected_min = Tensor[dtype](2, 3)
for i in range(6):
var val = Scalar[dtype](i - 3)
expected_min[i] = 5.0 if (val > -1.1) else 0.0
test_unary_op_backward[OP.CLIP, t1_shape, ug_shape, min_attr](t1, ug, expected_min)
# Clip with max
alias max_attr = AttributeVector(Attribute("max", 1.1))
var expected_max = Tensor[dtype](2, 3)
for i in range(6):
var val = Scalar[dtype](i - 3)
expected_max[i] = 5.0 if (val < 1.1) else 0.0
test_unary_op_backward[OP.CLIP, t1_shape, ug_shape, max_attr](t1, ug, expected_max)
# Clip with min and max
alias attrs = AttributeVector(Attribute("min", -1.1), Attribute("max", 1.1))
var expected = Tensor[dtype](2, 3)
for i in range(6):
var val = Scalar[dtype](i - 3)
if val < -1.1 or val > 1.1:
expected[i] = 0.0
else:
expected[i] = 5.0
test_unary_op_backward[OP.CLIP, t1_shape, ug_shape, attrs](t1, ug, expected)
fn test_SQUEEZE() raises:
alias t1_shape = TensorShape(1, 2, 1, 3, 1)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
fill(t1, 5.0)
# Test with no dims
var expected = Tensor[dtype](2, 3)
fill(expected, 5.0)
test_unary_op[OP.SQUEEZE, t1_shape](t1, expected)
# Test with one dim
expected = Tensor[dtype](1, 2, 1, 3)
fill(expected, 5.0)
test_unary_op[
OP.SQUEEZE, t1_shape, AttributeVector(Attribute("dims", TensorShape(4)))
](t1, expected)
expected = Tensor[dtype](1, 2, 3, 1)
fill(expected, 5.0)
test_unary_op[
OP.SQUEEZE, t1_shape, AttributeVector(Attribute("dims", TensorShape(2)))
](t1, expected)
# Test with multiple dims
expected = Tensor[dtype](1, 2, 3)
fill(expected, 5.0)
test_unary_op[
OP.SQUEEZE, t1_shape, AttributeVector(Attribute("dims", TensorShape(2, 4)))
](t1, expected)
fn test_backward_SQUEEZE() raises:
alias t1_shape = TensorShape(2, 1, 3, 1)
alias ug_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
fill(t1, 5.0)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(ug, 5.0)
var expected_grad = Tensor[dtype](2, 1, 3, 1)
fill(expected_grad, 5.0)
test_unary_op_backward[OP.SQUEEZE, t1_shape, ug_shape](t1, ug, expected_grad)
fn test_UNSQUEEZE() raises:
# UNSQUEEZE here is more similar to jax expand_dims
alias t1_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
fill(t1, 5.0)
var expected = Tensor[dtype](2, 1, 3, 1)
fill(expected, 5.0)
test_unary_op[
OP.UNSQUEEZE, t1_shape, AttributeVector(Attribute("dims", TensorShape(1, 3)))
](t1, expected)
expected = Tensor[dtype](2, 1, 3)
fill(expected, 5.0)
test_unary_op[
OP.UNSQUEEZE, t1_shape, AttributeVector(Attribute("dims", TensorShape(1)))
](t1, expected)
expected = Tensor[dtype](1, 2, 3)
fill(expected, 5.0)
test_unary_op[
OP.UNSQUEEZE, t1_shape, AttributeVector(Attribute("dims", TensorShape(-3)))
](t1, expected)
expected = Tensor[dtype](2, 1, 3, 1)
fill(expected, 5.0)
test_unary_op[
OP.UNSQUEEZE, t1_shape, AttributeVector(Attribute("dims", TensorShape(-1, -3)))
](t1, expected)
fn test_backward_UNSQUEEZE() raises:
alias t1_shape = TensorShape(2, 3)
alias ug_shape = TensorShape(2, 1, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
fill(t1, 5.0)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(ug, 5.0)
var expected_grad = Tensor[dtype](2, 3)
fill(expected_grad, 5.0)
test_unary_op_backward[OP.UNSQUEEZE, t1_shape, ug_shape](t1, ug, expected_grad)
fn test_SLICE() raises:
alias t1_shape = TensorShape(3, 4, 5)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
for i in range(t1.num_elements()):
t1[i] = i
alias slice = Slice(1, 3, 1)
# dim = 0
var expected_0 = Tensor[dtype](2, 4, 5)
for i in range(2):
for j in range(4):
for k in range(5):
expected_0[i*4*5 + j*5 + k] = (i + 1) * 4 * 5 + j * 5 + k
test_unary_op[
OP.SLICE, t1_shape, AttributeVector(
Attribute("starts", TensorShape(slice.start)),
Attribute("ends", TensorShape(slice.end)),
Attribute("steps", TensorShape(slice.step)),
Attribute("axes", TensorShape(0))
)
](t1, expected_0)
# dim = 1
var expected_1 = Tensor[dtype](3, 2, 5)
for i in range(3):
for j in range(2):
for k in range(5):
expected_1[i*2*5 + j*5 + k] = i * 4 * 5 + (j + 1) * 5 + k
test_unary_op[
OP.SLICE, t1_shape, AttributeVector(
Attribute("starts", TensorShape(slice.start)),
Attribute("ends", TensorShape(slice.end)),
Attribute("steps", TensorShape(slice.step)),
Attribute("axes", TensorShape(1))
)
](t1, expected_1)
# dim = 2
var expected_2 = Tensor[dtype](3, 4, 2)
for i in range(3):
for j in range(4):
for k in range(2):
expected_2[i*4*2 + j*2 + k] = i * 4 * 5 + j * 5 + (k + 1)
test_unary_op[
OP.SLICE, t1_shape, AttributeVector(
Attribute("starts", TensorShape(slice.start)),
Attribute("ends", TensorShape(slice.end)),
Attribute("steps", TensorShape(slice.step)),
Attribute("axes", TensorShape(2))
)
](t1, expected_2)
fn test_SLICE_step() raises:
alias slice = Slice(1, 6, 2)
# dim = 0
alias t0_shape = TensorShape(10, 2, 2)
var t0: Tensor[dtype] = Tensor[dtype](t0_shape)
for i in range(t0.num_elements()):
t0[i] = i
var expected_0 = Tensor[dtype](3, 2, 2)
for i in range(3):
for j in range(2):
for k in range(2):
expected_0[i*2*2 + j*2 + k] = (i*2 + 1) * 2 * 2 + j * 2 + k
test_unary_op[
OP.SLICE, t0_shape, AttributeVector(
Attribute("starts", TensorShape(slice.start)),
Attribute("ends", TensorShape(slice.end)),
Attribute("steps", TensorShape(slice.step)),
Attribute("axes", TensorShape(0))
)
](t0, expected_0)
# dim = 1
alias t1_shape = TensorShape(2, 10, 2)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
for i in range(t1.num_elements()):
t1[i] = i
var expected_1 = Tensor[dtype](2, 3, 2)
for i in range(2):
for j in range(3):
for k in range(2):
expected_1[i*3*2 + j*2 + k] = i * 10 * 2 + (j*2 + 1) * 2 + k
test_unary_op[
OP.SLICE, t1_shape, AttributeVector(
Attribute("starts", TensorShape(slice.start)),
Attribute("ends", TensorShape(slice.end)),
Attribute("steps", TensorShape(slice.step)),
Attribute("axes", TensorShape(1))
)
](t1, expected_1)
# dim = 2
alias t2_shape = TensorShape(2, 2, 10)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
for i in range(t2.num_elements()):
t2[i] = i
var expected_2 = Tensor[dtype](2, 2, 3)
for i in range(2):
for j in range(2):
for k in range(3):
expected_2[i*2*3 + j*3 + k] = i * 2 * 10 + j * 10 + (k*2 + 1)
test_unary_op[
OP.SLICE, t2_shape, AttributeVector(
Attribute("starts", TensorShape(slice.start)),
Attribute("ends", TensorShape(slice.end)),
Attribute("steps", TensorShape(slice.step)),
Attribute("axes", TensorShape(2))
)
](t2, expected_2)
fn test_SLICE_neg() raises:
alias slice = Slice(6, 1, -2)
# dim = 0
alias t0_shape = TensorShape(10, 2, 2)
var t0: Tensor[dtype] = Tensor[dtype](t0_shape)
for i in range(t0.num_elements()):
t0[i] = i
var expected_0 = Tensor[dtype](3, 2, 2)
for i in range(3):
for j in range(2):
for k in range(2):
expected_0[i*2*2 + j*2 + k] = StaticIntTuple[3](6, 4, 2)[i] * 2 * 2 + j * 2 + k
test_unary_op[
OP.SLICE, t0_shape, AttributeVector(
Attribute("starts", TensorShape(slice.start)),
Attribute("ends", TensorShape(slice.end)),
Attribute("steps", TensorShape(slice.step)),
Attribute("axes", TensorShape(0))
)
](t0, expected_0)
# dim = 1
alias t1_shape = TensorShape(2, 10, 2)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
for i in range(t1.num_elements()):
t1[i] = i
var expected_1 = Tensor[dtype](2, 3, 2)
for i in range(2):
for j in range(3):
for k in range(2):
expected_1[i*3*2 + j*2 + k] = i * 10 * 2 + StaticIntTuple[3](6, 4, 2)[j] * 2 + k
test_unary_op[
OP.SLICE, t1_shape, AttributeVector(
Attribute("starts", TensorShape(slice.start)),
Attribute("ends", TensorShape(slice.end)),
Attribute("steps", TensorShape(slice.step)),
Attribute("axes", TensorShape(1))
)
](t1, expected_1)
# dim = 2
alias t2_shape = TensorShape(2, 2, 10)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
for i in range(t2.num_elements()):
t2[i] = i
var expected_2 = Tensor[dtype](2, 2, 3)
for i in range(2):
for j in range(2):
for k in range(3):
expected_2[i*2*3 + j*3 + k] = i * 2 * 10 + j * 10 + StaticIntTuple[3](6, 4, 2)[k]
test_unary_op[
OP.SLICE, t2_shape, AttributeVector(
Attribute("starts", TensorShape(slice.start)),
Attribute("ends", TensorShape(slice.end)),
Attribute("steps", TensorShape(slice.step)),
Attribute("axes", TensorShape(2))
)
](t2, expected_2)
fn test_SLICE_multiple_axes() raises:
alias t1_shape = TensorShape(20, 32, 40)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
for i in range(t1.num_elements()):
t1[i] = i
alias slice_0 = Slice(1, 6, 2)
alias slice_1 = Slice(3, 10, 3)
alias slice_2 = Slice(5, 15, 2)
var expected = Tensor[dtype](3, 3, 5)
for i in range(3):
for j in range(3):
for k in range(5):
expected[i*3*5 + j*5 + k] = StaticIntTuple[5](1, 3, 5, 7, 9)[i] * 32 * 40 + StaticIntTuple[3](3, 6, 9)[j] * 40 + StaticIntTuple[5](5, 7, 9, 11, 13)[k]
test_unary_op[
OP.SLICE, t1_shape, AttributeVector(
Attribute("starts", TensorShape(slice_0.start, slice_1.start, slice_2.start)),
Attribute("ends", TensorShape(slice_0.end, slice_1.end, slice_2.end)),
Attribute("steps", TensorShape(slice_0.step, slice_1.step, slice_2.step)),
# Attribute("axes", TensorShape(0, 1, 2))
)
](t1, expected)
alias t2_shape = TensorShape(20, 32, 40, 50)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
for i in range(t2.num_elements()):
t2[i] = i
alias slice_2_1 = Slice(1, 6, 2)
alias slice_2_2 = Slice(3, 10, 3)
alias slice_2_3 = Slice(5, 15, 2)
alias slice_2_4 = Slice(-43, -30, 4)
var expected_2 = Tensor[dtype](3, 3, 5, 4)
for i in range(3):
for j in range(3):
for k in range(5):
for l in range(4):
expected_2[i*3*5*4 + j*5*4 + k*4 + l] = StaticIntTuple[5](1, 3, 5, 7, 9)[i] * 32 * 40 * 50 + StaticIntTuple[3](3, 6, 9)[j] * 40 * 50 + StaticIntTuple[5](5, 7, 9, 11, 13)[k] * 50 + StaticIntTuple[4](7, 11, 15, 19)[l]
test_unary_op[
OP.SLICE, t2_shape, AttributeVector(
Attribute("starts", TensorShape(slice_2_1.start, slice_2_2.start, slice_2_3.start, slice_2_4.start)),
Attribute("ends", TensorShape(slice_2_1.end, slice_2_2.end, slice_2_3.end, slice_2_4.end)),
Attribute("steps", TensorShape(slice_2_1.step, slice_2_2.step, slice_2_3.step, slice_2_4.step)),
)
](t2, expected_2)
fn test_backward_SLICE() raises:
# dim = 0 (step = 1)
alias slice_0 = Slice(1, 3, 1)
alias t0_shape = TensorShape(3, 4, 5)
var t0: Tensor[dtype] = Tensor[dtype](t0_shape)
fill(t0, 5.0)
alias ug0_shape = TensorShape(2, 4, 5)
var ug0: Tensor[dtype] = Tensor[dtype](ug0_shape)
fill(ug0, 1.0)
var expected_ug0 = Tensor[dtype](t0_shape)
for i in range(2):
for j in range(4):
for k in range(5):
expected_ug0[(i+1)*4*5 + j*5 + k] = 1.0
test_unary_op_backward[
OP.SLICE, t0_shape, ug0_shape, AttributeVector(
Attribute("starts", TensorShape(slice_0.start)),
Attribute("ends", TensorShape(slice_0.end)),
Attribute("steps", TensorShape(slice_0.step)),
Attribute("axes", TensorShape(0))
)
](t0, ug0, expected_ug0)
# dim = 1 (step = 2)
alias slice_1 = Slice(1, 6, 2)
alias t1_shape = TensorShape(2, 10, 2)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
fill(t1, 5.0)
alias ug1_shape = TensorShape(2, 3, 2)
var ug1: Tensor[dtype] = Tensor[dtype](ug1_shape)
fill(ug1, 1.0)
var expected_ug1 = Tensor[dtype](t1_shape)
for i in range(2):
for j in range(3):
for k in range(2):
expected_ug1[i*10*2 + (j*2 + 1)*2 + k] = 1.0
test_unary_op_backward[
OP.SLICE, t1_shape, ug1_shape, AttributeVector(
Attribute("starts", TensorShape(slice_1.start)),
Attribute("ends", TensorShape(slice_1.end)),
Attribute("steps", TensorShape(slice_1.step)),
Attribute("axes", TensorShape(1))
)
](t1, ug1, expected_ug1)
# dim = 2 (step = -2)
alias slice_2 = Slice(6, 1, -2)
alias t2_shape = TensorShape(2, 2, 10)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
fill(t2, 5.0)
alias ug2_shape = TensorShape(2, 2, 3)
var ug2: Tensor[dtype] = Tensor[dtype](ug2_shape)
fill(ug2, 1.0)
var expected_ug2 = Tensor[dtype](t2_shape)
for i in range(2):
for j in range(2):
for k in range(3):
expected_ug2[i*2*10 + j*10 + StaticIntTuple[3](6, 4, 2)[k]] = 1.0
test_unary_op_backward[
OP.SLICE, t2_shape, ug2_shape, AttributeVector(
Attribute("starts", TensorShape(slice_2.start)),
Attribute("ends", TensorShape(slice_2.end)),
Attribute("steps", TensorShape(slice_2.step)),
Attribute("axes", TensorShape(2))
)
](t2, ug2, expected_ug2)
fn test_backward_SLICE_multiple_axes() raises:
alias t1_shape = TensorShape(20, 32, 40)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
for i in range(t1.num_elements()):
t1[i] = i
alias slice_0 = Slice(1, 6, 2)
alias slice_1 = Slice(3, 10, 3)
alias slice_2 = Slice(5, 15, 2)
var expected = Tensor[dtype](3, 3, 5)
for i in range(3):
for j in range(3):
for k in range(5):
expected[i*3*5 + j*5 + k] = StaticIntTuple[5](1, 3, 5, 7, 9)[i] * 32 * 40 + StaticIntTuple[3](3, 6, 9)[j] * 40 + StaticIntTuple[5](5, 7, 9, 11, 13)[k]
alias ug_shape = TensorShape(3, 3, 5)
var ug: Tensor[dtype] = Tensor[dtype](ug_shape)
fill(ug, 1.0)
var expected_ug = Tensor[dtype](t1_shape)
for i in range(3):
for j in range(3):
for k in range(5):
expected_ug[StaticIntTuple[5](1, 3, 5, 7, 9)[i] * 32 * 40 + StaticIntTuple[3](3, 6, 9)[j] * 40 + StaticIntTuple[5](5, 7, 9, 11, 13)[k]] = 1.0
test_unary_op_backward[
OP.SLICE, t1_shape, ug_shape, AttributeVector(
Attribute("starts", TensorShape(slice_0.start, slice_1.start, slice_2.start)),
Attribute("ends", TensorShape(slice_0.end, slice_1.end, slice_2.end)),
Attribute("steps", TensorShape(slice_0.step, slice_1.step, slice_2.step)),
)
](t1, ug, expected_ug)
fn main():
try:
test_SIGMOID()
test_RELU()
test_TANH()
test_CLIP()
test_SQUEEZE()
test_UNSQUEEZE()
test_SLICE()
test_SLICE_step()
test_SLICE_neg()
test_SLICE_multiple_axes()
except e:
print("[ERROR] Error in forward mlops")
print(e)
return
try:
test_backward_SIGMOID()
test_backward_RELU()
test_backward_TANH()
test_backward_CLIP()
test_backward_SQUEEZE()
test_backward_UNSQUEEZE()
test_backward_SLICE()
test_backward_SLICE_multiple_axes()
except e:
print("[ERROR] Error in backward mlops")
print(e)
return
| basalt/tests/mojo/test_mlops.mojo | false |
<filename>basalt/tests/mojo/test_ops.mojo
from math import exp, log
from basalt import dtype, nelts
from basalt.autograd import OP
from basalt.autograd.attributes import Attribute, AttributeVector
from basalt.utils.tensorutils import fill
from basalt.nn import Tensor, TensorShape
from tests import test_unary_op, test_binary_op, test_ternary_op
fn test_ADD() raises:
alias t1_shape = TensorShape(2, 3)
alias t2_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
fill(t1, 1.0)
fill(t2, 1.0)
var expected = Tensor[dtype](2, 3)
fill(expected, 2.0)
test_binary_op[OP.ADD, t1_shape, t2_shape](t1, t2, expected)
fn test_SUB() raises:
alias t1_shape = TensorShape(2, 3)
alias t2_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
fill(t1, 2.0)
fill(t2, 1.0)
var expected = Tensor[dtype](2, 3)
fill(expected, 1.0)
test_binary_op[OP.SUB, t1_shape, t2_shape](t1, t2, expected)
fn test_MUL() raises:
alias t1_shape = TensorShape(2, 3)
alias t2_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
fill(t1, 2.0)
fill(t2, 3.0)
var expected = Tensor[dtype](2, 3)
fill(expected, 6.0)
test_binary_op[OP.MUL, t1_shape, t2_shape](t1, t2, expected)
fn test_DIV() raises:
alias t1_shape = TensorShape(2, 3)
alias t2_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
fill(t1, 6.0)
fill(t2, 2.0)
var expected = Tensor[dtype](2, 3)
fill(expected, 3.0)
test_binary_op[OP.DIV, t1_shape, t2_shape](t1, t2, expected)
fn test_DOT() raises:
alias t1_shape = TensorShape(2, 3)
alias t2_shape = TensorShape(3, 2)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
fill(t1, 1.0)
fill(t2, 2.0)
var expected = Tensor[dtype](2, 2)
fill(expected, 6.0)
test_binary_op[OP.DOT, t1_shape, t2_shape](t1, t2, expected)
fn test_EXP() raises:
alias t1_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
fill(t1, 2.0)
var expected = Tensor[dtype](2, 3)
fill(expected, exp[dtype, 1](2.0))
test_unary_op[OP.EXP, t1_shape](t1, expected)
fn test_LOG() raises:
alias t1_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
fill(t1, 2.0)
var expected = Tensor[dtype](2, 3)
fill(expected, log[dtype, 1](2.0))
test_unary_op[OP.LOG, t1_shape](t1, expected)
fn test_POW() raises:
alias t1_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
fill(t1, 2.0)
alias t2_shape = TensorShape(1)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
t2[0] = 2.0
var expected = Tensor[dtype](2, 3)
fill(expected, 4.0)
test_binary_op[OP.POW, t1_shape, t2_shape](t1, t2, expected)
fn test_SUM() raises:
alias t1_shape = TensorShape(2, 3, 4)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
fill(t1, 1.0)
# No axis specified
var expected = Tensor[dtype](1)
fill(expected, 24.0)
test_unary_op[OP.SUM, t1_shape](t1, expected)
# Test axis 1
alias attrs = AttributeVector(Attribute("axis", 1))
expected = Tensor[dtype](2, 1, 4)
fill(expected, 3.0)
test_unary_op[OP.SUM, t1_shape, attrs](t1, expected)
fn test_MAX() raises:
alias t1_shape = TensorShape(2, 3, 2)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
for i in range(t1_shape.num_elements()):
t1[i] = i + 1
# No axis specified
var expected = Tensor[dtype](1)
fill(expected, t1_shape.num_elements())
test_unary_op[OP.MAX, t1_shape](t1, expected)
@parameter
fn fill_tensor[
size: Int
](inout tensor: Tensor[dtype], values: StaticIntTuple[size]):
for i in range(tensor.num_elements()):
tensor[i] = values[i]
# Test axis 0
alias attrs = AttributeVector(Attribute("axis", 0))
var expected_max_axis_0_temp = StaticIntTuple[6](7, 8, 9, 10, 11, 12)
expected = Tensor[dtype](1, 3, 2)
fill_tensor(expected, expected_max_axis_0_temp)
test_unary_op[OP.MAX, t1_shape, attrs](t1, expected)
# Test axis 1
alias attrs_1 = AttributeVector(Attribute("axis", 1))
var expected_max_axis_1_temp = StaticIntTuple[4](5, 6, 11, 12)
expected = Tensor[dtype](2, 1, 2)
fill_tensor(expected, expected_max_axis_1_temp)
test_unary_op[OP.MAX, t1_shape, attrs_1](t1, expected)
# Test axis 2
alias attrs_2 = AttributeVector(Attribute("axis", 2))
var expected_max_axis_2_temp = StaticIntTuple[6](2, 4, 6, 8, 10, 12)
expected = Tensor[dtype](2, 3, 1)
fill_tensor(expected, expected_max_axis_2_temp)
test_unary_op[OP.MAX, t1_shape, attrs_2](t1, expected)
fn test_MEAN() raises:
alias t1_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
fill(t1, 5.0)
# No axis specified
var expected = Tensor[dtype](1)
fill(expected, 5.0)
test_unary_op[OP.MEAN, t1_shape](t1, expected)
# Test axis 0
alias attrs = AttributeVector(Attribute("axis", 0))
expected = Tensor[dtype](1, 3)
fill(expected, 5.0)
test_unary_op[OP.MEAN, t1_shape, attrs](t1, expected)
# Test axis 1
alias attrs_1 = AttributeVector(Attribute("axis", 1))
expected = Tensor[dtype](2, 1)
fill(expected, 5.0)
test_unary_op[OP.MEAN, t1_shape, attrs_1](t1, expected)
fn test_TRANSPOSE() raises:
alias t1_shape = TensorShape(2, 3, 4)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
for i in range(t1_shape.num_elements()):
t1[i] = i + 1
# Test tranpose (no attributes = reversing the axis by default)
var expected = Tensor[dtype](4, 3, 2)
var expected_strides = expected.strides()
for i in range(t1_shape[0]):
for j in range(t1_shape[1]):
for k in range(t1_shape[2]):
expected[k * expected_strides[0] + j * expected_strides[1] + i] = t1[
i * t1_shape[1] * t1_shape[2] + j * t1_shape[2] + k
]
test_unary_op[OP.TRANSPOSE, t1_shape](t1, expected)
# Test tranpose 1, 2, 0
alias attrs = AttributeVector(Attribute("axes", TensorShape(1, 2, 0)))
var expected_axis_1 = Tensor[dtype](3, 4, 2)
var expected_axis_1_strides = expected_axis_1.strides()
for i in range(t1_shape[0]):
for j in range(t1_shape[1]):
for k in range(t1_shape[2]):
expected_axis_1[
j * expected_axis_1_strides[0] + k * expected_axis_1_strides[1] + i
] = t1[i * t1_shape[1] * t1_shape[2] + j * t1_shape[2] + k]
test_unary_op[OP.TRANSPOSE, t1_shape, attrs](t1, expected_axis_1)
fn test_FLATTEN() raises:
alias t1_shape = TensorShape(2, 3, 4)
var t1 = Tensor[dtype](t1_shape)
fill(t1, 1.0)
var expected = Tensor[dtype](24)
fill(expected, 1.0)
test_unary_op[OP.FLATTEN, t1_shape](t1, expected)
fn test_RESHAPE() raises:
alias t_shape = TensorShape(2, 2, 5)
alias new_shape = TensorShape(2, 10)
var t = Tensor[dtype](t_shape)
var expected = Tensor[dtype](new_shape)
for i in range(20):
t[i] = i + 1
expected[i] = i + 1
alias attrs = AttributeVector(Attribute("shape", new_shape))
test_unary_op[OP.RESHAPE, t_shape, attrs](t, expected)
fn test_FMA() raises:
alias t1_shape = TensorShape(2, 3)
alias t2_shape = TensorShape(2, 3)
alias t3_shape = TensorShape(2, 3)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
var t3: Tensor[dtype] = Tensor[dtype](t3_shape)
fill(t1, 1.0)
fill(t2, 2.0)
fill(t3, 3.0)
var expected = Tensor[dtype](2, 3)
fill(expected, 1.0 * 2.0 + 3.0)
test_ternary_op[OP.FMA, t1_shape, t2_shape, t3_shape](t1, t2, t3, expected)
fn main():
try:
test_ADD()
test_SUB()
test_MUL()
test_DIV()
test_DOT()
test_EXP()
test_LOG()
test_POW()
test_SUM()
test_MAX()
test_MEAN()
test_TRANSPOSE()
test_FLATTEN()
test_RESHAPE()
test_FMA()
except e:
print("[ERROR] Error in ops")
print(e)
| basalt/tests/mojo/test_ops.mojo | false |
from random import rand
from testing import assert_equal, assert_almost_equal
from math import sqrt, exp, round, add, sub, mul, div
from basalt import dtype, nelts
from basalt.autograd.ops.matmul import dot
from basalt.utils.tensorutils import (
fill,
elwise_transform,
elwise_pow,
elwise_op,
broadcast_shapes,
broadcast_elwise_op,
get_reduce_shape,
accumulate_grad,
tsum,
tmean,
tstd,
tmax,
transpose,
)
from basalt.nn import Tensor, TensorShape
from tests import assert_tensors_equal
fn test_zero() raises:
var A = Tensor[dtype](2, 3)
var B = Tensor[dtype](2, 3)
rand[dtype](B.data(), B.num_elements())
B.zero()
assert_tensors_equal(A, B)
fn test_fill() raises:
var A = Tensor[dtype](2, 3)
var B = Tensor[dtype](2, 3)
for i in range(A.num_elements()):
A[i] = 1.0
fill(B, 1.0)
assert_tensors_equal(A, B)
fn test_dot() raises:
alias a_shape = TensorShape(2, 3)
alias b_shape = TensorShape(3, 2)
var A = Tensor[dtype](a_shape)
var B = Tensor[dtype](b_shape)
fill(A, 1.0)
fill(B, 1.0)
var C = Tensor[dtype](2, 2)
dot[a_shape, b_shape](C, A, B)
var C_expected = Tensor[dtype](2, 2)
fill(C_expected, 3.0)
assert_tensors_equal(C, C_expected)
var D = Tensor[dtype](3, 3)
dot[b_shape, a_shape](D, B, A)
var D_expected = Tensor[dtype](3, 3)
fill(D_expected, 2.0)
assert_tensors_equal(D, D_expected)
fn test_elwise_transform() raises:
var A = Tensor[dtype](2, 10)
var B = Tensor[dtype](2, 10)
var C = Tensor[dtype](2, 10)
var D = Tensor[dtype](2, 10)
fill(A, 4)
fill(B, 2)
fill(C, exp[dtype, 1](2))
fill(D, 7)
var A_res = Tensor[dtype](2, 10)
elwise_transform[sqrt](A_res, A)
assert_tensors_equal(A_res, B)
var B_res = Tensor[dtype](2, 10)
elwise_transform[exp](B_res, B)
assert_tensors_equal(B_res, C)
var C_res = Tensor[dtype](2, 10)
elwise_transform[round](C_res, C)
assert_tensors_equal(C_res, D)
fn test_elwise_pow() raises:
var A = Tensor[dtype](1, 10)
var B = Tensor[dtype](1, 10)
for i in range(10):
A[i] = i
B[i] = i**2
var A_res = Tensor[dtype](1, 10)
elwise_pow(A_res, A, 2)
assert_tensors_equal(A_res, B)
fn test_elwise_tensor_tensor() raises:
alias t1_shape = TensorShape(2, 10)
alias t2_shape = TensorShape(2, 10)
var t1 = Tensor[dtype](t1_shape)
var t2 = Tensor[dtype](t2_shape)
fill(t1, 3.0)
fill(t2, 3.0)
var result1 = Tensor[dtype](2, 10)
elwise_op[t1_shape, t2_shape, add](result1, t1, t2)
var result1_expected = Tensor[dtype](2, 10)
fill(result1_expected, 6.0)
assert_tensors_equal(result1, result1_expected)
var result2 = Tensor[dtype](2, 10)
elwise_op[t1_shape, t2_shape, sub](result2, t1, t2)
var result2_expected = Tensor[dtype](2, 10)
assert_tensors_equal(result2, result2_expected)
var result3 = Tensor[dtype](2, 10)
elwise_op[t1_shape, t2_shape, mul](result3, t1, t2)
var result3_expected = Tensor[dtype](2, 10)
fill(result3_expected, 9.0)
assert_tensors_equal(result3, result3_expected)
var result4 = Tensor[dtype](2, 10)
elwise_op[t1_shape, t2_shape, div](result4, t1, t2)
var result4_expected = Tensor[dtype](2, 10)
fill(result4_expected, 1.0)
assert_tensors_equal(result4, result4_expected)
fn test_elwise_tensor_scalar() raises:
var a: Scalar[dtype] = 2.0
var t1 = Tensor[dtype](2, 10)
fill(t1, 1.0)
var result = Tensor[dtype](2, 10)
elwise_op[add](result, t1, a)
var result1_expected = Tensor[dtype](2, 10)
fill(result1_expected, 3.0)
assert_tensors_equal(result, result1_expected)
elwise_op[add](result, a, t1)
assert_tensors_equal(result, result1_expected)
elwise_op[sub](result, t1, a)
var result3_expected = Tensor[dtype](2, 10)
fill(result3_expected, -1)
assert_tensors_equal(result, result3_expected)
elwise_op[mul](result, a, t1)
var result4_expected = Tensor[dtype](2, 10)
fill(result4_expected, 2)
assert_tensors_equal(result, result4_expected)
elwise_op[div](result, t1, a)
var result5_expected = Tensor[dtype](2, 10)
fill(result5_expected, 0.5)
assert_tensors_equal(result, result5_expected)
fn test_elwise_broadcast_tensor() raises:
alias t1_shape = TensorShape(2, 3, 4)
alias t2_shape = TensorShape(5, 2, 1, 4)
alias res_shape = broadcast_shapes(t1_shape, t2_shape)
var t1 = Tensor[dtype](t1_shape)
var t2 = Tensor[dtype](t2_shape)
fill(t1, 3.0)
for i in range(40):
t2[i] = i + 1
var result1 = Tensor[dtype](res_shape)
elwise_op[t1_shape, t2_shape, add](result1, t1, t2)
var result1_expected = Tensor[dtype](5, 2, 3, 4)
# fill expected tensor
for i in range(40):
for j in range(3):
var index = (i % 4) + ((i // 4) * 12) + j * 4
result1_expected[index] = 3.0 + (i + 1)
assert_tensors_equal(result1, result1_expected)
from test_tensorutils_data import SumMeanStdData
fn test_sum_mean_std() raises:
var t = Tensor[dtype](2, 10)
var s = 0
for i in range(20):
t[i] = i + 1
s += i + 1
# Not specifying the axis takes all elements regardless of the shape
var tensor_sum = tsum(t)
assert_equal(tensor_sum, s)
var tensor_mean = tmean(t)
assert_equal(tensor_mean, s / 20)
var tensor_std = tstd(t)
var expected_std: Scalar[dtype] = 0
for i in range(20):
expected_std += (i + 1 - tensor_mean) ** 2
expected_std = sqrt(expected_std / 20)
assert_equal(tensor_std, expected_std)
# When specifying the axis you can sum across batches
# Axis 0
var batch_sum_0 = Tensor[dtype](get_reduce_shape(t.shape(), axis=0))
tsum(batch_sum_0, t, axis=0)
var expected_batch_sum_0 = Tensor[dtype](1, 10)
for i in range(10):
expected_batch_sum_0[i] = (i + 1) + (i + 1 + 10)
assert_tensors_equal(batch_sum_0, expected_batch_sum_0)
var batch_mean_0 = Tensor[dtype](get_reduce_shape(t.shape(), axis=0))
tmean(batch_mean_0, t, axis=0)
var expected_batch_mean_0 = Tensor[dtype](1, 10)
for i in range(10):
expected_batch_mean_0[i] = expected_batch_sum_0[i] / 2
assert_tensors_equal(batch_mean_0, expected_batch_mean_0)
var batch_std_0 = Tensor[dtype](get_reduce_shape(t.shape(), axis=0))
tstd(batch_std_0, t, axis=0)
var expected_batch_std_0 = Tensor[dtype](1, 10)
fill(expected_batch_std_0, 5)
assert_tensors_equal(batch_std_0, expected_batch_std_0)
# Axis 1
var batch_sum_1 = Tensor[dtype](get_reduce_shape(t.shape(), axis=1))
tsum(batch_sum_1, t, axis=1)
var expected_batch_sum_1 = Tensor[dtype](2, 1)
expected_batch_sum_1[0] = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10
expected_batch_sum_1[1] = 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20
assert_tensors_equal(batch_sum_1, expected_batch_sum_1)
var batch_mean_1 = Tensor[dtype](get_reduce_shape(t.shape(), axis=1))
tmean(batch_mean_1, t, axis=1)
var expected_batch_mean_1 = Tensor[dtype](2, 1)
expected_batch_mean_1[0] = expected_batch_sum_1[0] / 10
expected_batch_mean_1[1] = expected_batch_sum_1[1] / 10
assert_tensors_equal(batch_mean_1, expected_batch_mean_1)
var batch_std_1 = Tensor[dtype](get_reduce_shape(t.shape(), axis=1))
tstd(batch_std_1, t, axis=1)
var expected_batch_std_1 = Tensor[dtype](2, 1)
fill(expected_batch_std_1, 2.8722813129425049)
assert_tensors_equal(batch_std_1, expected_batch_std_1)
fn test_sum_mean_std_n() raises:
var t = Tensor[dtype](3, 4, 5)
var s = 0
for i in range(60):
t[i] = i + 1
s += i + 1
# Not specifying the axis takes all elements regardless of the shape
var tensor_sum = tsum(t)
assert_equal(tensor_sum, s)
var tensor_mean = tmean(t)
assert_equal(tensor_mean, s / 60)
var tensor_std = tstd(t)
var expected_std: Scalar[dtype] = 0
for i in range(60):
expected_std += (i + 1 - tensor_mean) ** 2
expected_std = sqrt(expected_std / 60)
assert_equal(tensor_std, expected_std)
# When specifying the axis you can sum across batches
# Axis 0
var data = SumMeanStdData.generate_3d_axis_0()
var batch_sum_0 = Tensor[dtype](get_reduce_shape(t.shape(), axis=0))
tsum(batch_sum_0, t, axis=0)
assert_tensors_equal(batch_sum_0, data.expected_sum)
var batch_mean_0 = Tensor[dtype](get_reduce_shape(t.shape(), axis=0))
tmean(batch_mean_0, t, axis=0)
assert_tensors_equal(batch_mean_0, data.expected_mean)
var batch_std_0 = Tensor[dtype](get_reduce_shape(t.shape(), axis=0))
tstd(batch_std_0, t, axis=0)
assert_tensors_equal(batch_std_0, data.expected_std)
# When specifying the axis you can sum across batches
# Axis 1
data = SumMeanStdData.generate_3d_axis_1()
var batch_sum_1 = Tensor[dtype](get_reduce_shape(t.shape(), axis=1))
tsum(batch_sum_1, t, axis=1)
assert_tensors_equal(batch_sum_1, data.expected_sum)
var batch_mean_1 = Tensor[dtype](get_reduce_shape(t.shape(), axis=1))
tmean(batch_mean_1, t, axis=1)
assert_tensors_equal(batch_mean_1, data.expected_mean)
var batch_std_1 = Tensor[dtype](get_reduce_shape(t.shape(), axis=1))
tstd(batch_std_1, t, axis=1)
assert_tensors_equal(batch_std_1, data.expected_std)
# When specifying the axis you can sum across batches
# Axis 2
data = SumMeanStdData.generate_3d_axis_2()
var batch_sum_2 = Tensor[dtype](get_reduce_shape(t.shape(), axis=2))
tsum(batch_sum_2, t, axis=2)
assert_tensors_equal(batch_sum_2, data.expected_sum)
var batch_mean_2 = Tensor[dtype](get_reduce_shape(t.shape(), axis=2))
tmean(batch_mean_2, t, axis=2)
assert_tensors_equal(batch_mean_2, data.expected_mean)
var batch_std_2 = Tensor[dtype](get_reduce_shape(t.shape(), axis=2))
tstd(batch_std_2, t, axis=2)
assert_tensors_equal(batch_std_2, data.expected_std)
fn test_max() raises:
var t = Tensor[dtype](2, 3, 2)
for i in range(12):
t[i] = i + 1
var tensor_max = tmax(t)
assert_equal(tensor_max, 12)
@parameter
fn fill_tensor[
size: Int
](inout tensor: Tensor[dtype], values: StaticIntTuple[size]):
for i in range(tensor.num_elements()):
tensor[i] = values[i]
var tensor_max_axis_0 = Tensor[dtype](get_reduce_shape(t.shape(), axis=0))
tmax(tensor_max_axis_0, t, axis=0)
var expected_max_axis_0_temp = StaticIntTuple[6](7, 8, 9, 10, 11, 12)
var expected_max_axis_0 = Tensor[dtype](1, 3, 2)
fill_tensor(expected_max_axis_0, expected_max_axis_0_temp)
assert_tensors_equal(tensor_max_axis_0, expected_max_axis_0)
var tensor_max_axis_1 = Tensor[dtype](get_reduce_shape(t.shape(), axis=1))
tmax(tensor_max_axis_1, t, axis=1)
var expected_max_axis_1_temp = StaticIntTuple[4](5, 6, 11, 12)
var expected_max_axis_1 = Tensor[dtype](2, 1, 2)
fill_tensor(expected_max_axis_1, expected_max_axis_1_temp)
assert_tensors_equal(tensor_max_axis_1, expected_max_axis_1)
var tensor_max_axis_2 = Tensor[dtype](get_reduce_shape(t.shape(), axis=2))
tmax(tensor_max_axis_2, t, axis=2)
var expected_max_axis_2_temp = StaticIntTuple[6](2, 4, 6, 8, 10, 12)
var expected_max_axis_2 = Tensor[dtype](2, 3, 1)
fill_tensor(expected_max_axis_2, expected_max_axis_2_temp)
assert_tensors_equal(tensor_max_axis_2, expected_max_axis_2)
from test_tensorutils_data import TransposeData
fn test_transpose() raises:
# Transpose 2D
var data = TransposeData.generate_1_2dim_test_case()
var transposed = transpose(data.A, TensorShape(data.transpose_dims))
assert_tensors_equal(transposed, data.expected)
# Transpose 2 dimensions
data = TransposeData.generate_2_2dim_test_case()
transposed = transpose(data.A, TensorShape(data.transpose_dims))
assert_tensors_equal(transposed, data.expected)
data = TransposeData.generate_3_2dim_test_case()
transposed = transpose(data.A, TensorShape(data.transpose_dims))
assert_tensors_equal(transposed, data.expected)
data = TransposeData.generate_4_2dim_test_case()
transposed = transpose(data.A, TensorShape(data.transpose_dims))
assert_tensors_equal(transposed, data.expected)
# Transpose all dimensions
data = TransposeData.generate_1_alldim_test_case()
transposed = transpose(data.A, TensorShape(data.transpose_dims))
assert_tensors_equal(transposed, data.expected)
data = TransposeData.generate_2_alldim_test_case()
transposed = transpose(data.A, TensorShape(data.transpose_dims))
assert_tensors_equal(transposed, data.expected)
# Transpose (reverse)
data = TransposeData.generate_1_transpose_test_case()
transposed = transpose(data.A, TensorShape(data.transpose_dims))
assert_tensors_equal(transposed, data.expected)
fn test_accumulate_grad() raises:
alias A_shape = TensorShape(2, 3, 4)
alias B_shape = TensorShape(2, 1, 1)
var A = Tensor[dtype](A_shape)
var B = Tensor[dtype](B_shape)
fill(A, 3.0)
accumulate_grad[B_shape, A_shape](B, A)
var expected = Tensor[dtype](2, 1, 1)
fill(expected, 36)
assert_tensors_equal(B, expected)
alias B_shape_2 = TensorShape(2, 1)
B = Tensor[dtype](B_shape_2)
accumulate_grad[B_shape_2, A_shape](B, A)
expected = Tensor[dtype](2, 1)
fill(expected, 24)
assert_tensors_equal(B, expected)
# from test_tensorutils_data import PaddingData
# fn test_padding() raises:
# # 1D padding (only after)
# var data = PaddingData.generate_1d_test_case_after()
# var padded_data = pad_zeros[dtype, nelts](data.A, data.pad_with)
# assert_tensors_equal(padded_data, data.expected)
# # 1D padding (before and after)
# data = PaddingData.generate_1d_test_case_before_after()
# padded_data = pad_zeros[dtype, nelts](data.A, data.pad_with)
# assert_tensors_equal(padded_data, data.expected)
# # 2D padding
# data = PaddingData.generate_2d_test_case()
# padded_data = pad_zeros[dtype, nelts](data.A, data.pad_with)
# assert_tensors_equal(padded_data, data.expected)
# # 3D padding (simple)
# data = PaddingData.generate_3d_test_case_simple()
# padded_data = pad_zeros[dtype, nelts](data.A, data.pad_with)
# assert_tensors_equal(padded_data, data.expected)
# # 3D padding
# data = PaddingData.generate_3d_test_case()
# padded_data = pad_zeros[dtype, nelts](data.A, data.pad_with)
# assert_tensors_equal(padded_data, data.expected)
# # 4D padding
# data = PaddingData.generate_4d_test_case()
# padded_data = pad_zeros[dtype, nelts](data.A, data.pad_with)
# assert_tensors_equal(padded_data, data.expected)
fn main():
try:
test_zero()
test_fill()
test_dot()
test_elwise_transform()
test_elwise_pow()
test_elwise_tensor_tensor()
test_elwise_tensor_scalar()
test_elwise_broadcast_tensor()
test_sum_mean_std()
test_sum_mean_std_n()
test_max()
test_transpose()
test_accumulate_grad()
# # test_padding()
except e:
print("[ERROR] Error in tensorutils.py")
print(e)
| basalt/tests/mojo/test_tensorutils.mojo | false |
<filename>basalt/tests/mojo/test_tensorutils_data.mojo
from math import add
from basalt import dtype, nelts
from basalt.nn import Tensor, TensorShape
from basalt.utils.tensorutils import fill, elwise_op
fn generate_tensor(*shape: Int) -> Tensor[dtype]:
var A = Tensor[dtype](shape)
var size = A.num_elements()
for i in range(size):
A[i] = i + 1
return A ^
fn generate_expected_tensor[
size: Int
](data: StaticIntTuple[size], *shape: Int) -> Tensor[dtype]:
var A = Tensor[dtype](shape)
for i in range(size):
A[i] = data[i]
return A ^
struct TransposeData:
var A: Tensor[dtype]
var expected: Tensor[dtype]
var transpose_dims: VariadicList[Int]
fn __init__(
inout self,
A: Tensor[dtype],
expected: Tensor[dtype],
transpose_dims: VariadicList[Int],
):
self.A = A
self.expected = expected
self.transpose_dims = transpose_dims
@staticmethod
fn generate_1_2dim_test_case() -> TransposeData:
var A = generate_tensor(2, 3)
var expected = StaticIntTuple[6](1, 4, 2, 5, 3, 6)
var tranpose_dims = VariadicList[Int](1, 0)
var B = generate_expected_tensor(expected, 3, 2)
return TransposeData(A, B, tranpose_dims)
@staticmethod
fn generate_2_2dim_test_case() -> TransposeData:
var A = generate_tensor(2, 3, 2)
var expected = StaticIntTuple[12](1, 7, 3, 9, 5, 11, 2, 8, 4, 10, 6, 12)
var tranpose_dims = VariadicList[Int](2, 1, 0)
var B = generate_expected_tensor(expected, 2, 3, 2)
return TransposeData(A, B, tranpose_dims)
@staticmethod
fn generate_3_2dim_test_case() -> TransposeData:
var A = generate_tensor(2, 3, 2, 3)
var expected = StaticIntTuple[36](
1,
2,
3,
7,
8,
9,
13,
14,
15,
4,
5,
6,
10,
11,
12,
16,
17,
18,
19,
20,
21,
25,
26,
27,
31,
32,
33,
22,
23,
24,
28,
29,
30,
34,
35,
36,
)
var tranpose_dims = VariadicList[Int](0, 2, 1, 3)
var B = generate_expected_tensor(expected, 2, 2, 3, 3)
return TransposeData(A, B, tranpose_dims)
@staticmethod
fn generate_4_2dim_test_case() -> TransposeData:
var A = generate_tensor(3, 2, 3, 2, 3)
var expected = StaticIntTuple[108](
1,
2,
3,
19,
20,
21,
7,
8,
9,
25,
26,
27,
13,
14,
15,
31,
32,
33,
4,
5,
6,
22,
23,
24,
10,
11,
12,
28,
29,
30,
16,
17,
18,
34,
35,
36,
37,
38,
39,
55,
56,
57,
43,
44,
45,
61,
62,
63,
49,
50,
51,
67,
68,
69,
40,
41,
42,
58,
59,
60,
46,
47,
48,
64,
65,
66,
52,
53,
54,
70,
71,
72,
73,
74,
75,
91,
92,
93,
79,
80,
81,
97,
98,
99,
85,
86,
87,
103,
104,
105,
76,
77,
78,
94,
95,
96,
82,
83,
84,
100,
101,
102,
88,
89,
90,
106,
107,
108,
)
var tranpose_dims = VariadicList[Int](0, 3, 2, 1, 4)
var B = generate_expected_tensor(expected, 3, 2, 3, 2, 3)
return TransposeData(A, B, tranpose_dims)
@staticmethod
fn generate_1_alldim_test_case() -> TransposeData:
var A = generate_tensor(2, 3, 2, 3)
var expected = StaticIntTuple[36](
1,
4,
2,
5,
3,
6,
19,
22,
20,
23,
21,
24,
7,
10,
8,
11,
9,
12,
25,
28,
26,
29,
27,
30,
13,
16,
14,
17,
15,
18,
31,
34,
32,
35,
33,
36,
)
var tranpose_dims = VariadicList[Int](1, 0, 3, 2)
var B = generate_expected_tensor(expected, 3, 2, 3, 2)
return TransposeData(A, B, tranpose_dims)
@staticmethod
fn generate_2_alldim_test_case() -> TransposeData:
var A = generate_tensor(2, 3, 4)
var expected = StaticIntTuple[24](
1,
13,
2,
14,
3,
15,
4,
16,
5,
17,
6,
18,
7,
19,
8,
20,
9,
21,
10,
22,
11,
23,
12,
24,
)
var tranpose_dims = VariadicList[Int](1, 2, 0)
var B = generate_expected_tensor(expected, 3, 4, 2)
return TransposeData(A, B, tranpose_dims)
@staticmethod
fn generate_1_transpose_test_case() -> TransposeData:
var A = generate_tensor(2, 3, 2, 3)
var expected = StaticIntTuple[36](
1,
19,
7,
25,
13,
31,
4,
22,
10,
28,
16,
34,
2,
20,
8,
26,
14,
32,
5,
23,
11,
29,
17,
35,
3,
21,
9,
27,
15,
33,
6,
24,
12,
30,
18,
36,
)
var tranpose_dims = VariadicList[Int](3, 2, 1, 0)
var B = generate_expected_tensor(expected, 3, 2, 3, 2)
return TransposeData(A, B, tranpose_dims)
struct PaddingData:
var A: Tensor[dtype]
var expected: Tensor[dtype]
var pad_with: List[Int]
fn __init__(
inout self,
A: Tensor[dtype],
expected: Tensor[dtype],
pad_with: List[Int],
):
self.A = A
self.expected = expected
self.pad_with = pad_with
@staticmethod
fn generate_1d_test_case_after() -> PaddingData:
var A = generate_tensor(2)
var expected = StaticIntTuple[4](1, 2, 0, 0)
var pad_with = List[Int]()
pad_with.append(0) # before
pad_with.append(2) # after
var B = generate_expected_tensor(expected, 4)
return PaddingData(A, B, pad_with)
@staticmethod
fn generate_1d_test_case_before_after() -> PaddingData:
var A = generate_tensor(3)
var expected = StaticIntTuple[6](0, 0, 1, 2, 3, 0)
var pad_with = List[Int]()
pad_with.append(2) # before
pad_with.append(1) # after
var B = generate_expected_tensor(expected, 6)
return PaddingData(A, B, pad_with)
@staticmethod
fn generate_2d_test_case() -> PaddingData:
var A = generate_tensor(2, 2)
var expected = StaticIntTuple[45](
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
3,
4,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
)
var pad_with = List[Int]()
pad_with.append(1) # before_1
pad_with.append(2) # after_1
pad_with.append(3) # before_2
pad_with.append(4) # after_2
var B = generate_expected_tensor[45](expected, 5, 9)
return PaddingData(A, B, pad_with)
@staticmethod
fn generate_3d_test_case_simple() -> PaddingData:
var A = generate_tensor(2, 2, 2)
var expected = StaticIntTuple[16](
0, 0, 1, 2, 3, 4, 0, 0, 0, 0, 5, 6, 7, 8, 0, 0
)
var pad_with = List[Int]()
pad_with.append(0) # before_1
pad_with.append(0) # after_1
pad_with.append(1) # before_2
pad_with.append(1) # after_2
pad_with.append(0) # before_3
pad_with.append(0) # after_3
var B = generate_expected_tensor[16](expected, 2, 4, 2)
return PaddingData(A, B, pad_with)
@staticmethod
fn generate_3d_test_case() -> PaddingData:
var A = generate_tensor(1, 2, 3)
var expected = StaticIntTuple[45](
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
3,
0,
0,
4,
5,
6,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
)
var pad_with = List[Int]()
pad_with.append(1) # before_1
pad_with.append(1) # after_1
pad_with.append(1) # before_2
pad_with.append(0) # after_2
pad_with.append(0) # before_3
pad_with.append(2) # after_3
var B = generate_expected_tensor[45](expected, 3, 3, 5)
return PaddingData(A, B, pad_with)
@staticmethod
fn generate_4d_test_case() -> PaddingData:
var A = generate_tensor(2, 2, 2, 2)
var expected = StaticIntTuple[81](
1,
2,
0,
3,
4,
0,
0,
0,
0,
5,
6,
0,
7,
8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
9,
10,
0,
11,
12,
0,
0,
0,
0,
13,
14,
0,
15,
16,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
)
var pad_with = List[Int]()
pad_with.append(0) # before_1
pad_with.append(1) # after_1
pad_with.append(0) # before_2
pad_with.append(1) # after_2
pad_with.append(0) # before_3
pad_with.append(1) # after_3
pad_with.append(0) # before_4
pad_with.append(1) # after_4
var B = generate_expected_tensor[81](expected, 3, 3, 3, 3)
return PaddingData(A, B, pad_with)
struct SumMeanStdData:
var A: Tensor[dtype]
var axis: Int
var expected_sum: Tensor[dtype]
var expected_mean: Tensor[dtype]
var expected_std: Tensor[dtype]
fn __init__(
inout self,
A: Tensor[dtype],
axis: Int,
expected_sum: Tensor[dtype],
expected_mean: Tensor[dtype],
expected_std: Tensor[dtype],
):
self.A = A
self.axis = axis
self.expected_sum = expected_sum
self.expected_mean = expected_mean
self.expected_std = expected_std
@staticmethod
fn generate_3d_axis_0() -> SumMeanStdData:
var A = generate_tensor(3, 4, 5)
var axis = 0
var expected_sum = StaticIntTuple[20](
63,
66,
69,
72,
75,
78,
81,
84,
87,
90,
93,
96,
99,
102,
105,
108,
111,
114,
117,
120,
)
var expected_mean = StaticIntTuple[20](
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
)
var expected_std = Tensor[dtype](1, 4, 5)
fill(expected_std, 16.32993162)
var B = generate_expected_tensor[20](expected_sum, 1, 4, 5)
var C = generate_expected_tensor[20](expected_mean, 1, 4, 5)
return SumMeanStdData(A, axis, B, C, expected_std)
@staticmethod
fn generate_3d_axis_1() -> SumMeanStdData:
var A = generate_tensor(3, 4, 5)
var axis = 1
var expected_sum = StaticIntTuple[15](
34,
38,
42,
46,
50,
114,
118,
122,
126,
130,
194,
198,
202,
206,
210,
)
var expected_mean = StaticIntTuple[15](
8,
9,
10,
11,
12,
28,
29,
30,
31,
32,
48,
49,
50,
51,
52,
) # 0.5 added afterwards
var expected_std = Tensor[dtype](3, 1, 5)
fill(expected_std, 5.59016994)
var B = generate_expected_tensor[15](expected_sum, 3, 1, 5)
var C = generate_expected_tensor[15](expected_mean, 3, 1, 5)
elwise_op[add](C, C, 0.5)
return SumMeanStdData(A, axis, B, C, expected_std)
@staticmethod
fn generate_3d_axis_2() -> SumMeanStdData:
var A = generate_tensor(3, 4, 5)
var axis = 2
var expected_sum = StaticIntTuple[12](
15,
40,
65,
90,
115,
140,
165,
190,
215,
240,
265,
290,
)
var expected_mean = StaticIntTuple[12](
3,
8,
13,
18,
23,
28,
33,
38,
43,
48,
53,
58,
)
var expected_std = Tensor[dtype](3, 4, 1)
fill(expected_std, 1.41421356)
var B = generate_expected_tensor[12](expected_sum, 3, 4, 1)
var C = generate_expected_tensor[12](expected_mean, 3, 4, 1)
return SumMeanStdData(A, axis, B, C, expected_std)
| basalt/tests/mojo/test_tensorutils_data.mojo | false |
from python.python import Python
from testing import assert_true
from basalt.nn import Tensor, TensorShape
from basalt.utils.tensorutils import broadcast_shapes
fn to_tensor_shape(owned shape: PythonObject) raises -> TensorShape:
var tensor_shape = List[Int]()
for dim in shape:
tensor_shape.append(int(dim.to_float64()))
return TensorShape(tensor_shape)
fn np_broadcast_shapes(s1: TensorShape, s2: TensorShape) raises -> TensorShape:
var np = Python.import_module("numpy")
var s1_py: PythonObject = []
var s2_py: PythonObject = []
for i in range(s1.rank()):
s1_py += [s1[i]]
for i in range(s2.rank()):
s2_py += [s2[i]]
var py_shape = np.broadcast_shapes(s1_py, s2_py)
return to_tensor_shape(py_shape)
fn test_broadcast_shapes() raises:
var s1 = TensorShape(3, 5, 2)
var s2 = TensorShape(3, 5, 2)
var s3 = broadcast_shapes(s1, s2)
assert_true(s3 == np_broadcast_shapes(s1, s2))
s1 = TensorShape(3, 5, 2)
s2 = TensorShape(1, 2)
s3 = broadcast_shapes(s1, s2)
assert_true(s3 == np_broadcast_shapes(s1, s2))
s1 = TensorShape(5, 1)
s2 = TensorShape(3, 5, 1)
s3 = broadcast_shapes(s1, s2)
assert_true(s3 == np_broadcast_shapes(s1, s2))
s1 = TensorShape(3, 1, 2)
s2 = TensorShape(3, 5, 2)
s3 = broadcast_shapes(s1, s2)
assert_true(s3 == np_broadcast_shapes(s1, s2))
s1 = TensorShape(1, 1, 1)
s2 = TensorShape(3, 5, 2)
s3 = broadcast_shapes(s1, s2)
assert_true(s3 == np_broadcast_shapes(s1, s2))
s1 = TensorShape(2)
s2 = TensorShape(3, 5, 2)
s3 = broadcast_shapes(s1, s2)
assert_true(s3 == np_broadcast_shapes(s1, s2))
s1 = TensorShape()
s2 = TensorShape(3, 5, 2)
s3 = broadcast_shapes(s1, s2)
assert_true(s3 == np_broadcast_shapes(s1, s2))
# # Both errors expected
# print("EXPECTED RAISE!")
# try:
# s1 = TensorShape(3, 2, 2)
# s2 = TensorShape(3, 5, 2)
# s3 = broadcast_shapes(s1, s2)
# _ = np_broadcast_shapes(s1, s2)
# except e:
# print("Numpy:", e)
# print("EXPECTED RAISE!")
# try:
# s1 = TensorShape(3)
# s2 = TensorShape(2)
# s3 = broadcast_shapes(s1, s2)
# _ = np_broadcast_shapes(s1, s2)
# except e:
# print("Numpy:", e)
fn test_broadcast_shapes_multiple() raises:
var np = Python.import_module("numpy")
var s1 = TensorShape(1, 2)
var s2 = TensorShape(3, 1)
var s3 = TensorShape(3, 2)
var res = broadcast_shapes(s1, s2, s3)
var res_np = to_tensor_shape(np.broadcast_shapes((1, 2), (3, 1), (3, 2)))
assert_true(res == res_np)
s1 = TensorShape(6, 7)
s2 = TensorShape(5, 6, 1)
s3 = TensorShape(7)
var s4 = TensorShape(5, 1, 7)
res = broadcast_shapes(s1, s2, s3, s4)
res_np = to_tensor_shape(np.broadcast_shapes((6, 7), (5, 6, 1), (7), (5, 1, 7)))
assert_true(res == res_np)
fn main():
try:
test_broadcast_shapes()
test_broadcast_shapes_multiple()
except e:
print("[Error] In test broadcasting.")
print(e)
| basalt/tests/python/test_broadcast_shapes.mojo | false |
<filename>basalt/tests/python/test_conv.mojo
from random import rand
from python.python import Python
from testing import assert_equal
from basalt import dtype, nelts
from basalt.autograd import Graph, Symbol
from basalt.autograd.attributes import Attribute, AttributeVector
from basalt.autograd.ops import OP
from basalt.autograd.ops.conv import get_result_shape, CONV2D
from basalt.nn import Tensor, TensorShape, Model
from basalt.utils.tensorutils import fill
from tests import assert_tensors_equal, to_numpy, to_tensor
fn test_get_result_shape() raises:
# padding=2, stride=1, dilation=1
# input shape: (4, 28, 28) kernel shape: (1, 16)
# result: (32, 17)
var inputs = Tensor[dtype](4, 28, 28)
var kernel = Tensor[dtype](1, 16)
var res = get_result_shape(inputs.shape(), kernel.shape(), 2, 1, 1)
assert_equal(res[0], 32)
assert_equal(res[1], 17)
# padding=0, stride=1, dilation=1
# input shape: (4, 32, 17) kernel shape: (2, 2)
# result: (31, 16)
inputs = Tensor[dtype](4, 32, 17)
kernel = Tensor[dtype](2, 2)
res = get_result_shape(inputs.shape(), kernel.shape(), 0, 1, 1)
assert_equal(res[0], 31)
assert_equal(res[1], 16)
# padding=(3, 1), stride=1, dilation=2
# input shape: (4, 32, 17) kernel shape: (2, 2)
# result: (36, 17)
inputs = Tensor[dtype](4, 32, 17)
kernel = Tensor[dtype](2, 2)
res = get_result_shape(
inputs.shape(), kernel.shape(), StaticIntTuple[2](3, 1), 1, 2
)
assert_equal(res[0], 36)
assert_equal(res[1], 17)
# padding=(3, 2), stride=(2, 1), dilation=(2, 3)
# input shape: (4, 32, 17) kernel shape: (2, 2)
# result: (18, 18)
inputs = Tensor[dtype](4, 32, 17)
kernel = Tensor[dtype](3, 2)
res = get_result_shape(
inputs.shape(),
kernel.shape(),
StaticIntTuple[2](3, 2),
StaticIntTuple[2](2, 1),
StaticIntTuple[2](2, 3),
)
assert_equal(res[0], 17)
assert_equal(res[1], 18)
@value
struct torch_conv2d_output:
var expected: Tensor[dtype]
var expected_inputs_grad: Tensor[dtype]
var expected_kernel_grad: Tensor[dtype]
var expected_bias_grad: Tensor[dtype]
fn torch_conv2d(
inputs: Tensor,
kernel: Tensor,
bias: Tensor,
padding: StaticIntTuple[2],
stride: StaticIntTuple[2],
dilation: StaticIntTuple[2],
upper_grad: Tensor,
) -> torch_conv2d_output:
var out: torch_conv2d_output
try:
var torch = Python.import_module("torch")
var F = Python.import_module("torch.nn.functional")
var np = Python.import_module("numpy")
var inputs = torch.from_numpy(to_numpy(inputs)).requires_grad_(True)
var weights = torch.from_numpy(to_numpy(kernel)).requires_grad_(True)
var bias = torch.from_numpy(to_numpy(bias)).requires_grad_(True)
var expected = F.conv2d(
inputs,
weights,
bias,
(stride[0], stride[1]),
(padding[0], padding[1]),
(dilation[0], dilation[1]),
)
# uppergrad & backwards
var upper_grad = torch.from_numpy(to_numpy(upper_grad))
_ = expected.backward(upper_grad)
# expected output
out = torch_conv2d_output(
to_tensor(expected.detach().numpy()),
to_tensor(inputs.grad.numpy()),
to_tensor(weights.grad.numpy()),
to_tensor(bias.grad.numpy()),
)
return out
except:
print("Error importing torch")
var d = Tensor[dtype](1)
var out: torch_conv2d_output = torch_conv2d_output(d, d, d, d)
return out
fn test_conv_forward[
input_shape: TensorShape,
kernel_shape: TensorShape,
padding: StaticIntTuple[2],
stride: StaticIntTuple[2],
dilation: StaticIntTuple[2],
](inputs: Tensor[dtype], kernel: Tensor[dtype], bias: Tensor[dtype]) raises:
fn create_graph() -> Graph:
var g = Graph()
var inp = g.input(input_shape)
var weights = g.input(kernel_shape) # as input
var bias = g.input(kernel_shape[0]) # as input
var res = g.op(
OP.CONV2D,
inp,
weights,
bias,
attributes=AttributeVector(
Attribute("padding", padding),
Attribute("stride", stride),
Attribute("dilation", dilation),
),
)
g.out(res)
return g ^
alias graph = create_graph()
assert_equal(len(graph.nodes), 1)
var model = Model[graph](inference_only=True)
var res = model.inference(inputs, kernel, bias)[0]
var torch_out = torch_conv2d(
inputs,
kernel,
bias=bias,
padding=padding,
stride=stride,
dilation=dilation,
upper_grad=Tensor[dtype](res.shape()),
)
assert_tensors_equal(res, torch_out.expected)
fn test_forward_1() raises:
# padding=2, stride=1, dilation=1
# input shape: (4, 1, 28, 28) kernel shape: (1, 1, 1, 16)
# result_shape: (4, 1, 32, 17)
alias padding = 2
alias stride = 1
alias dilation = 1
alias input_shape = TensorShape(4, 1, 28, 28)
alias kernel_shape = TensorShape(1, 1, 1, 16)
var inputs = Tensor[dtype](input_shape)
var kernel = Tensor[dtype](kernel_shape)
var bias = Tensor[dtype](kernel_shape[0])
fill(inputs, 1.0)
fill(kernel, 1.0)
test_conv_forward[input_shape, kernel_shape, padding, stride, dilation](
inputs, kernel, bias
)
fn test_forward_2() raises:
# padding=0, stride=1, dilation=1
# input shape: (4, 1, 32, 17) kernel shape: (1, 1, 2, 2)
# result_shape: (4, 1, 31, 16)
alias padding = 0
alias stride = 1
alias dilation = 1
alias input_shape = TensorShape(4, 1, 32, 17)
alias kernel_shape = TensorShape(1, 1, 2, 2)
var inputs = Tensor[dtype](input_shape)
var kernel = Tensor[dtype](kernel_shape)
fill(inputs, 1.0)
fill(kernel, 1.0)
var bias = Tensor[dtype](kernel_shape[0])
fill(bias, 66.99)
test_conv_forward[input_shape, kernel_shape, padding, stride, dilation](
inputs, kernel, bias
)
fn test_forward_3() raises:
# padding=(3, 1), stride=(2, 3), dilation=(2, 3)
# input shape: (4, 3, 32, 17) kernel shape: (2, 3, 2, 2)
# result_shape: (4, 2, 18, 6)
alias padding = StaticIntTuple[2](3, 1)
alias stride = StaticIntTuple[2](2, 3)
alias dilation = StaticIntTuple[2](2, 3)
alias input_shape = TensorShape(4, 3, 32, 17)
alias kernel_shape = TensorShape(2, 3, 2, 2)
var inputs = Tensor[dtype](input_shape)
var kernel = Tensor[dtype](kernel_shape)
fill(inputs, 3.0)
fill(kernel, 2.0)
var bias = Tensor[dtype](kernel_shape[0])
fill(bias, 3)
test_conv_forward[input_shape, kernel_shape, padding, stride, dilation](
inputs, kernel, bias
)
fn test_conv_backward[
ug_shape: TensorShape,
input_shape: TensorShape,
kernel_shape: TensorShape,
padding: StaticIntTuple[2],
stride: StaticIntTuple[2],
dilation: StaticIntTuple[2],
](
ug: Tensor[dtype], inputs: Tensor[dtype], kernel: Tensor[dtype], bias: Tensor[dtype]
) raises:
alias bias_shape = TensorShape(kernel_shape[0])
alias attributes = AttributeVector(
Attribute("padding", padding),
Attribute("stride", stride),
Attribute("dilation", dilation),
)
var grad1 = CONV2D.backward[
0, ug_shape, input_shape, kernel_shape, bias_shape, attributes
](ug, inputs, kernel, bias)
var grad2 = CONV2D.backward[
1, ug_shape, input_shape, kernel_shape, bias_shape, attributes
](ug, inputs, kernel, bias)
var grad3 = CONV2D.backward[
2, ug_shape, input_shape, kernel_shape, bias_shape, attributes
](ug, inputs, kernel, bias)
var torch_out = torch_conv2d(
inputs,
kernel,
bias=bias,
padding=padding,
stride=stride,
dilation=dilation,
upper_grad=ug,
)
assert_tensors_equal["almost"](grad1, torch_out.expected_inputs_grad)
assert_tensors_equal["almost"](grad2, torch_out.expected_kernel_grad)
assert_tensors_equal["almost"](grad3, torch_out.expected_bias_grad)
fn test_backward_1() raises:
# padding=2, stride=1, dilation=1
alias padding = 2
alias stride = 1
alias dilation = 1
alias input_shape = TensorShape(4, 2, 28, 28)
alias kernel_shape = TensorShape(3, 2, 1, 16)
var inputs = Tensor[dtype](input_shape)
var kernel = Tensor[dtype](kernel_shape)
fill(inputs, 1.0)
fill(kernel, 1.0)
var bias = Tensor[dtype](kernel_shape[0])
rand[dtype](bias.data(), bias.num_elements())
# uppergrad
alias res = get_result_shape(input_shape, kernel_shape, padding, stride, dilation)
alias ug_shape = TensorShape(input_shape[0], kernel_shape[0], res[0], res[1])
var ug = Tensor[dtype](ug_shape)
test_conv_backward[ug_shape, input_shape, kernel_shape, padding, stride, dilation](
ug, inputs, kernel, bias
)
fn test_backward_2() raises:
# padding=(2, 4), stride=(3, 1), dilation=2
alias padding = StaticIntTuple[2](2, 4)
alias stride = StaticIntTuple[2](3, 1)
alias dilation = 2
alias input_shape = TensorShape(4, 2, 28, 28)
alias kernel_shape = TensorShape(3, 2, 4, 8)
var inputs = Tensor[dtype](input_shape)
var kernel = Tensor[dtype](kernel_shape)
fill(inputs, 3.0)
fill(kernel, 1.0)
var bias = Tensor[dtype](kernel_shape[0])
rand[dtype](bias.data(), bias.num_elements())
# uppergrad
alias res = get_result_shape(input_shape, kernel_shape, padding, stride, dilation)
alias ug_shape = TensorShape(input_shape[0], kernel_shape[0], res[0], res[1])
var ug = Tensor[dtype](ug_shape)
rand[dtype](ug.data(), ug.num_elements())
test_conv_backward[ug_shape, input_shape, kernel_shape, padding, stride, dilation](
ug, inputs, kernel, bias
)
fn test_backward_3() raises:
# padding=(2, 4), stride=2, dilation=(3, 2)
alias padding = StaticIntTuple[2](3, 2)
alias stride = 2
alias dilation = StaticIntTuple[2](3, 2)
alias input_shape = TensorShape(4, 2, 28, 28)
alias kernel_shape = TensorShape(3, 2, 5, 6)
var inputs = Tensor[dtype](input_shape)
var kernel = Tensor[dtype](kernel_shape)
fill(inputs, 3.0)
fill(kernel, 4.0)
var bias = Tensor[dtype](kernel_shape[0])
rand[dtype](bias.data(), bias.num_elements())
# uppergrad
alias res = get_result_shape(input_shape, kernel_shape, padding, stride, dilation)
alias ug_shape = TensorShape(input_shape[0], kernel_shape[0], res[0], res[1])
var ug = Tensor[dtype](ug_shape)
rand[dtype](ug.data(), ug.num_elements())
test_conv_backward[ug_shape, input_shape, kernel_shape, padding, stride, dilation](
ug, inputs, kernel, bias
)
fn main():
try:
test_get_result_shape()
test_forward_1()
test_forward_2()
test_forward_3()
test_backward_1()
test_backward_2()
test_backward_3()
except e:
print("[Error] Error in Conv2D")
print(e)
| basalt/tests/python/test_conv.mojo | false |
from random import rand
from python.python import Python
from basalt import dtype, nelts
from basalt.autograd import Graph, Symbol, OP
from basalt.autograd.attributes import Attribute, AttributeVector
from basalt.nn import Model, Tensor, TensorShape
from tests import (
assert_tensors_equal,
to_numpy,
to_tensor,
create_graph_concat,
create_graph_split,
)
@value
struct torch_output_cat:
var expected: Tensor[dtype]
var grad_1: Tensor[dtype]
var grad_2: Tensor[dtype]
var grad_3: Tensor[dtype]
fn torch_cat(
input_1: Tensor, input_2: Tensor, input_3: Tensor, upper_grad: Tensor, dim: Int
) -> torch_output_cat:
try:
var py = Python.import_module("builtins")
var torch = Python.import_module("torch")
var np = Python.import_module("numpy")
var input_1 = torch.from_numpy(to_numpy(input_1)).requires_grad_(True)
var input_2 = torch.from_numpy(to_numpy(input_2)).requires_grad_(True)
var input_3 = torch.from_numpy(to_numpy(input_3)).requires_grad_(True)
var expected: PythonObject
var tensors = py.list()
tensors.append(input_1)
tensors.append(input_2)
tensors.append(input_3)
expected = torch.cat(tensors, dim=dim)
# uppergrad & backwards
var upper_grad = torch.from_numpy(to_numpy(upper_grad))
_ = expected.backward(upper_grad)
return torch_output_cat(
to_tensor(expected.detach().numpy()),
to_tensor(input_1.grad.numpy()),
to_tensor(input_2.grad.numpy()),
to_tensor(input_3.grad.numpy()),
)
except e:
print("Error importing torch: ", e)
var d = Tensor[dtype](1)
return torch_output_cat(d, d, d, d)
fn test_CONCAT() raises:
alias t1_shape = TensorShape(11, 3, 17, 19)
alias t2_shape = TensorShape(11, 3, 17, 19)
alias t3_shape = TensorShape(11, 3, 17, 19)
var t1 = Tensor[dtype](t1_shape)
var t2 = Tensor[dtype](t2_shape)
var t3 = Tensor[dtype](t3_shape)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
rand(t3.data(), t3.num_elements())
# default: dim = 0
alias graph = create_graph_concat(t1_shape, t2_shape, t3_shape, dim=0)
var model = Model[graph]()
var res = model.forward(t1, t2, t3)
alias ug_shape = TensorShape(33, 3, 17, 19)
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_cat(t1, t2, t3, ug, dim=0)
model.backward(ug)
assert_tensors_equal["almost"](res, expected_and_grad.expected)
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[0]],
expected_and_grad.grad_1,
)
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[1]],
expected_and_grad.grad_2,
)
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[2]],
expected_and_grad.grad_3,
)
# dim = 2
alias graph_2 = create_graph_concat(t1_shape, t2_shape, t3_shape, dim=2)
var model_2 = Model[graph_2]()
var res_2 = model_2.forward(t1, t2, t3)
alias ug_shape_2 = TensorShape(11, 3, 51, 19)
var ug_2 = Tensor[dtype](ug_shape_2)
rand(ug_2.data(), ug_2.num_elements())
var expected_and_grad_2 = torch_cat(t1, t2, t3, ug_2, dim=2)
model_2.backward(ug_2)
assert_tensors_equal["almost"](res_2, expected_and_grad_2.expected)
assert_tensors_equal["almost"](
model_2.parameters.grads[graph_2.nodes[0].inputs[0]],
expected_and_grad_2.grad_1,
)
assert_tensors_equal["almost"](
model_2.parameters.grads[graph_2.nodes[0].inputs[1]],
expected_and_grad_2.grad_2,
)
assert_tensors_equal["almost"](
model_2.parameters.grads[graph_2.nodes[0].inputs[2]],
expected_and_grad_2.grad_3,
)
@value
struct torch_output_split:
var expected1: Tensor[dtype]
var expected2: Tensor[dtype]
var expected3: Tensor[dtype]
var grad: Tensor[dtype]
fn torch_split(
input: Tensor,
upper_grad_1: Tensor,
upper_grad_2: Tensor,
upper_grad_3: Tensor,
sections: List[Int],
dim: Int,
) -> torch_output_split:
try:
var py = Python.import_module("builtins")
var torch = Python.import_module("torch")
var np = Python.import_module("numpy")
var input = torch.from_numpy(to_numpy(input)).requires_grad_(True)
var sizes = py.list()
sizes.append(sections[0])
sizes.append(sections[1])
sizes.append(sections[2])
var chunks: PythonObject = input.split(sizes, dim=dim)
# uppergrad & backwards
var upper_grad_1 = torch.from_numpy(to_numpy(upper_grad_1))
var upper_grad_2 = torch.from_numpy(to_numpy(upper_grad_2))
var upper_grad_3 = torch.from_numpy(to_numpy(upper_grad_3))
_ = chunks[0].backward(upper_grad_1)
_ = chunks[1].backward(upper_grad_2)
_ = chunks[2].backward(upper_grad_3)
return torch_output_split(
to_tensor(chunks[0].detach().numpy()),
to_tensor(chunks[1].detach().numpy()),
to_tensor(chunks[2].detach().numpy()),
to_tensor(input.grad.numpy()),
)
except e:
print("Error importing torch: ", e)
var d = Tensor[dtype](1)
return torch_output_split(d, d, d, d)
fn test_SPLIT() raises:
alias t1_shape = TensorShape(11, 3, 17, 19)
var t1 = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
# default: dim = 0
alias sections = List[Int](3, 6, 2) # 11
alias graph = create_graph_split(t1_shape, sections, dim=0)
var model = Model[graph]()
var results = model.inference(t1)
alias ug1_shape = TensorShape(3, 3, 17, 19)
alias ug2_shape = TensorShape(6, 3, 17, 19)
alias ug3_shape = TensorShape(2, 3, 17, 19)
var ug1 = Tensor[dtype](ug1_shape)
var ug2 = Tensor[dtype](ug2_shape)
var ug3 = Tensor[dtype](ug3_shape)
rand(ug1.data(), ug1.num_elements())
rand(ug2.data(), ug2.num_elements())
rand(ug3.data(), ug3.num_elements())
var expected_and_grad = torch_split(t1, ug1, ug2, ug3, sections, dim=0)
model.backward(ug1, ug2, ug3)
assert_tensors_equal["almost"](results[0], expected_and_grad.expected1)
assert_tensors_equal["almost"](results[1], expected_and_grad.expected2)
assert_tensors_equal["almost"](results[2], expected_and_grad.expected3)
assert_tensors_equal["almost"](
model.parameters.grads[graph.nodes[0].inputs[0]],
expected_and_grad.grad,
)
# dim = 2
alias sections_2 = List[Int](3, 6, 8) # 17
alias graph_2 = create_graph_split(t1_shape, sections_2, dim=2)
var model_2 = Model[graph_2]()
var results_2 = model_2.inference(t1)
alias ug1_shape_2 = TensorShape(11, 3, 3, 19)
alias ug2_shape_2 = TensorShape(11, 3, 6, 19)
alias ug3_shape_2 = TensorShape(11, 3, 8, 19)
var ug1_2 = Tensor[dtype](ug1_shape_2)
var ug2_2 = Tensor[dtype](ug2_shape_2)
var ug3_2 = Tensor[dtype](ug3_shape_2)
rand(ug1_2.data(), ug1_2.num_elements())
rand(ug2_2.data(), ug2_2.num_elements())
rand(ug3_2.data(), ug3_2.num_elements())
var expected_and_grad_2 = torch_split(t1, ug1_2, ug2_2, ug3_2, sections_2, dim=2)
model_2.backward(ug1_2, ug2_2, ug3_2)
assert_tensors_equal["almost"](results_2[0], expected_and_grad_2.expected1)
assert_tensors_equal["almost"](results_2[1], expected_and_grad_2.expected2)
assert_tensors_equal["almost"](results_2[2], expected_and_grad_2.expected3)
assert_tensors_equal["almost"](
model_2.parameters.grads[graph_2.nodes[0].inputs[0]], expected_and_grad_2.grad
)
fn main():
print("Running dynamic ops (compare with torch) tests")
try:
test_CONCAT()
test_SPLIT()
except e:
print("[ERROR] Error in dynamic ops (compare with torch)")
print(e)
return
print("Finished dynamic ops (compare with torch) tests")
| basalt/tests/python/test_dynamic_ops_torch.mojo | false |
from random import rand
from math.limit import min_finite, max_finite
from collections.optional import OptionalReg, Optional
from python.python import Python
from python.object import PythonObject
from basalt import dtype, nelts
from basalt.autograd import OP
from basalt.autograd.attributes import Attribute, AttributeVector
from basalt.nn import Tensor, TensorShape
from tests import (
assert_tensors_equal,
to_numpy,
to_tensor,
test_unary_op,
test_binary_op,
test_unary_op_backward,
test_binary_op_backward,
)
# ------ Test Unary Ops ------
@value
struct torch_output_unary_op:
var expected: Tensor[dtype]
var grad_1: Tensor[dtype]
fn torch_unary_op(
op: OP,
input_1: Tensor,
upper_grad: Tensor,
attrs: OptionalReg[AttributeVector] = None,
attrs_tuple: Optional[PythonObject] = None,
) -> torch_output_unary_op:
try:
var torch = Python.import_module("torch")
var np = Python.import_module("numpy")
var py = Python.import_module("builtins")
var input_1 = torch.from_numpy(to_numpy(input_1)).requires_grad_(True)
var expected: PythonObject
if op == OP.SIGMOID:
expected = torch.sigmoid(input_1)
elif op == OP.RELU:
expected = torch.relu(input_1)
elif op == OP.TANH:
expected = torch.tanh(input_1)
elif op == OP.CLIP:
var min_attr = attrs.value()["min"]
var max_attr = attrs.value()["max"]
var min_val = min_attr.value().to_scalar[
dtype
]() if min_attr else min_finite[dtype]()
var max_val = max_attr.value().to_scalar[
dtype
]() if max_attr else max_finite[dtype]()
expected = torch.clamp(input_1, min_val, max_val)
elif op == OP.SQUEEZE:
if attrs:
var attrs = attrs.value()
var dim = attrs["dims"]
if dim:
expected = torch.squeeze(input_1, dim=dim.value().to_shape()[0])
else:
expected = torch.squeeze(input_1)
elif attrs_tuple:
expected = torch.squeeze(input_1, dim=attrs_tuple.value()[])
else:
expected = torch.squeeze(input_1)
elif op == OP.UNSQUEEZE:
if attrs:
var attrs = attrs.value()
var dim = attrs["dims"]
if dim:
expected = torch.unsqueeze(input_1, dim=dim.value().to_shape()[0])
else:
expected = torch.unsqueeze(input_1, 0)
elif attrs_tuple:
expected = torch.reshape(input_1, attrs_tuple.value()[])
else:
expected = torch.unsqueeze(input_1, 0)
elif op == OP.SLICE:
var attrs = attrs_tuple.value()[]
# create a tuple of all the slices using the dims
var indices = PythonObject([])
for i in range(input_1.dim()):
indices.append(py.slice(input_1.shape[i]))
var flip_dims = PythonObject([])
for i in range(0, len(attrs), 4):
var start = attrs[i]
var end = attrs[i + 1]
var step = attrs[i + 2]
var dim = attrs[i + 3]
if step < 0:
flip_dims.append(dim)
step = step *- 1
end, start = (end + 1) * -1, (start + 1) * -1
indices[dim] = py.slice(start, end, step)
expected = input_1.flip(flip_dims)[indices]
else:
print("Error: op not supported (returning the value input_1): ", op)
expected = input_1
# uppergrad & backwards
var upper_grad = torch.from_numpy(to_numpy(upper_grad))
_ = expected.backward(upper_grad)
return torch_output_unary_op(
to_tensor(expected.detach().numpy()),
to_tensor(input_1.grad.numpy()),
)
except e:
print("Error importing torch", e)
var d = Tensor[dtype](1)
return torch_output_unary_op(d, d)
fn test_SIGMOID() raises:
alias t1_shape = TensorShape(37, 63, 107)
alias ug_shape = TensorShape(37, 63, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_unary_op(OP.SIGMOID, t1, ug)
test_unary_op[OP.SIGMOID, t1_shape](t1, expected_and_grad.expected)
test_unary_op_backward[OP.SIGMOID, t1_shape, ug_shape](
t1, ug, expected_and_grad.grad_1
)
fn test_RELU() raises:
alias t1_shape = TensorShape(37, 63, 107)
alias ug_shape = TensorShape(37, 63, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_unary_op(OP.RELU, t1, ug)
test_unary_op[OP.RELU, t1_shape](t1, expected_and_grad.expected)
test_unary_op_backward[OP.RELU, t1_shape, ug_shape](
t1, ug, expected_and_grad.grad_1
)
fn test_TANH() raises:
alias t1_shape = TensorShape(37, 63, 107)
alias ug_shape = TensorShape(37, 63, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_unary_op(OP.TANH, t1, ug)
test_unary_op[OP.TANH, t1_shape](t1, expected_and_grad.expected)
test_unary_op_backward[OP.TANH, t1_shape, ug_shape](
t1, ug, expected_and_grad.grad_1
)
fn test_CLIP() raises:
alias t1_shape = TensorShape(37, 63, 107)
alias ug_shape = TensorShape(37, 63, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
# No clipping
var expected_and_grad = torch_unary_op(OP.CLIP, t1, ug)
test_unary_op[OP.CLIP, t1_shape](t1, expected_and_grad.expected)
test_unary_op_backward[OP.CLIP, t1_shape, ug_shape](
t1, ug, expected_and_grad.grad_1
)
# Clip with min
alias min_attr = Attribute("min", 0.3333)
expected_and_grad = torch_unary_op(OP.CLIP, t1, ug, AttributeVector(min_attr))
test_unary_op[OP.CLIP, t1_shape, AttributeVector(min_attr)](
t1, expected_and_grad.expected
)
test_unary_op_backward[OP.CLIP, t1_shape, ug_shape, AttributeVector(min_attr)](
t1, ug, expected_and_grad.grad_1
)
# Clip with max
alias max_attr = Attribute("max", 0.6666)
expected_and_grad = torch_unary_op(OP.CLIP, t1, ug, AttributeVector(max_attr))
test_unary_op[OP.CLIP, t1_shape, AttributeVector(max_attr)](
t1, expected_and_grad.expected
)
test_unary_op_backward[OP.CLIP, t1_shape, ug_shape, AttributeVector(max_attr)](
t1, ug, expected_and_grad.grad_1
)
# Clip with min and max
expected_and_grad = torch_unary_op(
OP.CLIP, t1, ug, AttributeVector(min_attr, max_attr)
)
test_unary_op[OP.CLIP, t1_shape, AttributeVector(min_attr, max_attr)](
t1, expected_and_grad.expected
)
test_unary_op_backward[
OP.CLIP, t1_shape, ug_shape, AttributeVector(min_attr, max_attr)
](t1, ug, expected_and_grad.grad_1)
fn test_SQUEEZE() raises:
alias t1_shape = TensorShape(20, 1, 28, 1)
alias ug_shape = TensorShape(20, 28)
var t1 = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_unary_op(OP.SQUEEZE, t1, ug)
test_unary_op[OP.SQUEEZE, t1_shape](t1, expected_and_grad.expected)
test_unary_op_backward[OP.SQUEEZE, t1_shape, ug_shape](
t1, ug, expected_and_grad.grad_1
)
# Squeeze with one dim
alias ug_shape_1 = TensorShape(20, 1, 28)
ug = Tensor[dtype](ug_shape_1)
rand(ug.data(), ug.num_elements())
alias dim = Attribute("dims", TensorShape(3))
expected_and_grad = torch_unary_op(OP.SQUEEZE, t1, ug, AttributeVector(dim))
test_unary_op[OP.SQUEEZE, t1_shape, AttributeVector(dim)](
t1, expected_and_grad.expected
)
test_unary_op_backward[OP.SQUEEZE, t1_shape, ug_shape_1, AttributeVector(dim)](
t1, ug, expected_and_grad.grad_1
)
alias ug_shape_2 = TensorShape(20, 28, 1)
ug = Tensor[dtype](ug_shape_2)
rand(ug.data(), ug.num_elements())
alias dim_2 = Attribute("dims", TensorShape(1))
expected_and_grad = torch_unary_op(OP.SQUEEZE, t1, ug, AttributeVector(dim_2))
test_unary_op[OP.SQUEEZE, t1_shape, AttributeVector(dim_2)](
t1, expected_and_grad.expected
)
test_unary_op_backward[OP.SQUEEZE, t1_shape, ug_shape_2, AttributeVector(dim_2)](
t1, ug, expected_and_grad.grad_1
)
# Squeeze with multiple dims
ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
alias dims_shape = TensorShape(1, 3)
alias dims_tuple = (dims_shape[0], dims_shape[1])
alias dims = Attribute("dims", dims_shape)
expected_and_grad = torch_unary_op(
OP.SQUEEZE, t1, ug, attrs_tuple=PythonObject(dims_tuple)
)
test_unary_op[OP.SQUEEZE, t1_shape, AttributeVector(dims)](
t1, expected_and_grad.expected
)
test_unary_op_backward[OP.SQUEEZE, t1_shape, ug_shape, AttributeVector(dims)](
t1, ug, expected_and_grad.grad_1
)
fn test_UNSQUEEZE() raises:
alias t1_shape = TensorShape(20, 28)
alias ug_shape = TensorShape(20, 1, 28)
var t1 = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
alias dim = Attribute("dims", TensorShape(1))
var expected_and_grad = torch_unary_op(OP.UNSQUEEZE, t1, ug, AttributeVector(dim))
test_unary_op[OP.UNSQUEEZE, t1_shape, AttributeVector(dim)](
t1, expected_and_grad.expected
)
test_unary_op_backward[OP.UNSQUEEZE, t1_shape, ug_shape, AttributeVector(dim)](
t1, ug, expected_and_grad.grad_1
)
# Unsqueeze with multiple dims
alias ug_shape_2 = TensorShape(20, 1, 28, 1)
ug = Tensor[dtype](ug_shape_2)
alias dims_shape = TensorShape(1, 3)
alias dims_tuple = (20, 1, 28, 1)
alias dims = Attribute("dims", dims_shape)
expected_and_grad = torch_unary_op(
OP.UNSQUEEZE, t1, ug, attrs_tuple=PythonObject(dims_tuple)
)
test_unary_op[OP.UNSQUEEZE, t1_shape, AttributeVector(dims)](
t1, expected_and_grad.expected
)
test_unary_op_backward[OP.UNSQUEEZE, t1_shape, ug_shape_2, AttributeVector(dims)](
t1, ug, expected_and_grad.grad_1
)
fn test_SLICE() raises:
alias t1_shape = TensorShape(430, 322, 317)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
# dim = 0
alias slice_0 = Slice(5, 200, 3)
alias attrs_0 = AttributeVector(
Attribute("starts", TensorShape(slice_0.start)),
Attribute("ends", TensorShape(slice_0.end)),
Attribute("steps", TensorShape(slice_0.step)),
Attribute("axes", TensorShape(0))
)
alias ug_shape = TensorShape(65, 322, 317)
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var attrs_tuple_0 = PythonObject((slice_0.start, slice_0.end, slice_0.step, 0))
var expected_and_grad = torch_unary_op(OP.SLICE, t1, ug, attrs_tuple=attrs_tuple_0)
test_unary_op[OP.SLICE, t1_shape, attrs_0](t1, expected_and_grad.expected)
test_unary_op_backward[OP.SLICE, t1_shape, ug_shape, attrs_0](t1, ug, expected_and_grad.grad_1)
# dim = 1
alias slice_1 = Slice(10, 311, 5)
alias attrs_1 = AttributeVector(
Attribute("starts", TensorShape(slice_1.start)),
Attribute("ends", TensorShape(slice_1.end)),
Attribute("steps", TensorShape(slice_1.step)),
Attribute("axes", TensorShape(1))
)
alias ug_shape_1 = TensorShape(430, 61, 317)
ug = Tensor[dtype](ug_shape_1)
rand(ug.data(), ug.num_elements())
var attrs_tuple_1 = PythonObject((slice_1.start, slice_1.end, slice_1.step, 1))
expected_and_grad = torch_unary_op(OP.SLICE, t1, ug, attrs_tuple=attrs_tuple_1)
test_unary_op[OP.SLICE, t1_shape, attrs_1](t1, expected_and_grad.expected)
test_unary_op_backward[OP.SLICE, t1_shape, ug_shape_1, attrs_1](t1, ug, expected_and_grad.grad_1)
# dim = 2
alias slice_2 = Slice(293, 33, -7)
alias attrs_2 = AttributeVector(
Attribute("starts", TensorShape(slice_2.start)),
Attribute("ends", TensorShape(slice_2.end)),
Attribute("steps", TensorShape(slice_2.step)),
Attribute("axes", TensorShape(2))
)
alias ug_shape_2 = TensorShape(430, 322, 38)
ug = Tensor[dtype](ug_shape_2)
rand(ug.data(), ug.num_elements())
var attrs_tuple_2 = PythonObject((slice_2.start, slice_2.end, slice_2.step, 2))
expected_and_grad = torch_unary_op(OP.SLICE, t1, ug, attrs_tuple=attrs_tuple_2)
test_unary_op[OP.SLICE, t1_shape, attrs_2](t1, expected_and_grad.expected)
test_unary_op_backward[OP.SLICE, t1_shape, ug_shape_2, attrs_2](t1, ug, expected_and_grad.grad_1)
# Multiple dims
# dim = 0, 1
alias slice_0_1 = Slice(23, 340, 3)
alias slice_1_1 = Slice(10, 250, 5)
alias attrs_0_1 = AttributeVector(
Attribute("starts", TensorShape(slice_0_1.start, slice_1_1.start)),
Attribute("ends", TensorShape(slice_0_1.end, slice_1_1.end)),
Attribute("steps", TensorShape(slice_0_1.step, slice_1_1.step)),
Attribute("axes", TensorShape(0, 1))
)
alias ug_shape_0_1 = TensorShape(106, 48, 317)
ug = Tensor[dtype](ug_shape_0_1)
rand(ug.data(), ug.num_elements())
var attrs_tuple_0_1 = PythonObject((slice_0_1.start, slice_0_1.end, slice_0_1.step, 0, slice_1_1.start, slice_1_1.end, slice_1_1.step, 1))
expected_and_grad = torch_unary_op(OP.SLICE, t1, ug, attrs_tuple=attrs_tuple_0_1)
test_unary_op[OP.SLICE, t1_shape, attrs_0_1](t1, expected_and_grad.expected)
test_unary_op_backward[OP.SLICE, t1_shape, ug_shape_0_1, attrs_0_1](t1, ug, expected_and_grad.grad_1)
# dim = 0, 1, 2
alias slice_0_2 = Slice(-412, -5, 3)
alias slice_1_2 = Slice(-10, -182, -5)
alias slice_2_2 = Slice(293, 33, -7)
alias attrs_0_2 = AttributeVector(
Attribute("starts", TensorShape(slice_0_2.start, slice_1_2.start, slice_2_2.start)),
Attribute("ends", TensorShape(slice_0_2.end, slice_1_2.end, slice_2_2.end)),
Attribute("steps", TensorShape(slice_0_2.step, slice_1_2.step, slice_2_2.step)),
Attribute("axes", TensorShape(0, 1, 2))
)
alias ug_shape_0_2 = TensorShape(136, 35, 38)
ug = Tensor[dtype](ug_shape_0_2)
rand(ug.data(), ug.num_elements())
var attrs_tuple_0_2 = PythonObject((slice_0_2.start, slice_0_2.end, slice_0_2.step, 0, slice_1_2.start, slice_1_2.end, slice_1_2.step, 1, slice_2_2.start, slice_2_2.end, slice_2_2.step, 2))
expected_and_grad = torch_unary_op(OP.SLICE, t1, ug, attrs_tuple=attrs_tuple_0_2)
test_unary_op[OP.SLICE, t1_shape, attrs_0_2](t1, expected_and_grad.expected)
test_unary_op_backward[OP.SLICE, t1_shape, ug_shape_0_2, attrs_0_2](t1, ug, expected_and_grad.grad_1)
fn main():
print("Running mlops (compare with torch) tests")
try:
test_SIGMOID()
test_RELU()
test_TANH()
test_CLIP()
test_SQUEEZE()
test_UNSQUEEZE()
test_SLICE()
except e:
print("[ERROR] Error in mlops (compare with torch)")
print(e)
return
print("Finished mlops (compare with torch) tests")
| basalt/tests/python/test_mlops_torch.mojo | false |
from random import rand
from python import Python
from testing import assert_almost_equal
from basalt import dtype
from basalt.autograd import Graph, OP
from basalt.autograd.attributes import AttributeVector, Attribute
from basalt.nn import (
Tensor,
TensorShape,
Model,
ReLU,
MaxPool2d,
CrossEntropyLoss,
optim,
)
from tests import assert_tensors_equal, to_numpy, to_tensor
fn create_CNN(
batch_size: Int,
conv1_weights: List[Scalar[dtype]],
conv1_bias: List[Scalar[dtype]],
conv2_weights: List[Scalar[dtype]],
conv2_bias: List[Scalar[dtype]],
linear1_weights: List[Scalar[dtype]],
linear1_bias: List[Scalar[dtype]],
) -> Graph:
var g = Graph()
var x = g.input(TensorShape(batch_size, 1, 28, 28))
# conv1
# var x1 = nn.Conv2d(g, x, out_channels=16, kernel_size=5, padding=2)
var c1_w = g.param(TensorShape(16, x.shape[1], 5, 5), init=conv1_weights)
var c1_b = g.param(TensorShape(16), init=conv1_bias)
var x1 = g.op(
OP.CONV2D,
x,
c1_w,
c1_b,
attributes=AttributeVector(
Attribute("padding", StaticIntTuple[2](2, 2)),
Attribute("stride", StaticIntTuple[2](1, 1)),
Attribute("dilation", StaticIntTuple[2](1, 1)),
),
)
var x2 = ReLU(g, x1)
var x3 = MaxPool2d(g, x2, kernel_size=2)
# conv2
# var x4 = nn.Conv2d(g, x3, out_channels=32, kernel_size=5, padding=2)
var c2_w = g.param(TensorShape(32, x3.shape[1], 5, 5), init=conv2_weights)
var c2_b = g.param(TensorShape(32), init=conv2_bias)
var x4 = g.op(
OP.CONV2D,
x3,
c2_w,
c2_b,
attributes=AttributeVector(
Attribute("padding", StaticIntTuple[2](2, 2)),
Attribute("stride", StaticIntTuple[2](1, 1)),
Attribute("dilation", StaticIntTuple[2](1, 1)),
),
)
var x5 = ReLU(g, x4)
var x6 = MaxPool2d(g, x5, kernel_size=2)
var x6_shape = x6.shape
var x7 = g.op(
OP.RESHAPE,
x6,
attributes=AttributeVector(
Attribute(
"shape",
TensorShape(x6_shape[0], x6_shape[1] * x6_shape[2] * x6_shape[3]),
)
),
)
# linear1
# var out = nn.Linear(g, x7, n_outputs=10)
var l1_w = g.param(TensorShape(x7.shape[1], 10), init=linear1_weights)
var l1_b = g.param(TensorShape(10), init=linear1_bias)
var res = g.op(OP.DOT, x7, l1_w)
var out = g.op(OP.ADD, res, l1_b)
g.out(out)
var y_true = g.input(TensorShape(batch_size, 10))
var loss = CrossEntropyLoss(g, out, y_true)
# var loss = nn.MSELoss(g, out, y_true)
g.loss(loss)
return g ^
fn run_mojo[
batch_size: Int,
conv1_weights: List[Scalar[dtype]],
conv1_bias: List[Scalar[dtype]],
conv2_weights: List[Scalar[dtype]],
conv2_bias: List[Scalar[dtype]],
linear1_weights: List[Scalar[dtype]],
linear1_bias: List[Scalar[dtype]],
](
epochs: Int,
learning_rate: Float64,
inputs: Tensor[dtype],
labels: Tensor[dtype],
) -> List[Scalar[dtype]]:
alias graph = create_CNN(
batch_size,
conv1_weights,
conv1_bias,
conv2_weights,
conv2_bias,
linear1_weights,
linear1_bias,
)
var model = Model[graph]()
var optim = optim.Adam[graph](Reference(model.parameters), lr=learning_rate)
var losses = List[Scalar[dtype]]()
for i in range(epochs):
var loss = model.forward(inputs, labels)
# Backward pass
optim.zero_grad()
model.backward()
optim.step()
losses.append(loss[0])
return losses
fn run_torch(
epochs: Int,
learning_rate: Float64,
inputs: Tensor,
labels: Tensor,
owned conv1_weights: Tensor,
owned conv1_bias: Tensor,
owned conv2_weights: Tensor,
owned conv2_bias: Tensor,
owned linear1_weights: Tensor,
owned linear1_bias: Tensor,
) -> List[Scalar[dtype]]:
var out: List[Scalar[dtype]] = List[Scalar[dtype]]()
try:
var torch = Python.import_module("torch")
var F = Python.import_module("torch.nn.functional")
var np = Python.import_module("numpy")
Python.add_to_path("./tests/python")
var torch_models = Python.import_module("test_models_torch")
var inputs = torch.from_numpy(to_numpy(inputs)).requires_grad_(True)
var labels = torch.from_numpy(to_numpy(labels)).requires_grad_(True)
var conv1_weights = torch.from_numpy(to_numpy(conv1_weights)).requires_grad_(
True
)
var conv1_bias = torch.from_numpy(to_numpy(conv1_bias)).requires_grad_(True)
var conv2_weights = torch.from_numpy(to_numpy(conv2_weights)).requires_grad_(
True
)
var conv2_bias = torch.from_numpy(to_numpy(conv2_bias)).requires_grad_(True)
var linear1_weights = torch.from_numpy(
to_numpy(linear1_weights)
).requires_grad_(True)
var linear1_bias = torch.from_numpy(to_numpy(linear1_bias)).requires_grad_(True)
var cnn = torch_models.CNN(
conv1_weights,
conv1_bias,
conv2_weights,
conv2_bias,
linear1_weights,
linear1_bias,
)
var loss_func = torch_models.CrossEntropyLoss2()
# var loss_func = torch.nn.CrossEntropyLoss()
var optimizer = torch.optim.Adam(cnn.parameters(), learning_rate)
for i in range(epochs):
var output = cnn.forward(inputs)
var loss = loss_func(output, labels)
_ = optimizer.zero_grad()
_ = loss.backward()
_ = optimizer.step()
out.append(to_tensor(loss)[0])
return out
except e:
print("Error importing torch")
print(e)
return out
fn create_weights(num_elements: Int, zero: Bool) -> List[Scalar[dtype]]:
var weights = List[Scalar[dtype]](capacity=num_elements)
for i in range(num_elements):
if zero:
weights.append(Scalar[dtype](0.0))
else:
weights.append(Scalar[dtype](0.02))
return weights ^
fn dv_to_tensor(dv: List[Scalar[dtype]], shape: TensorShape) -> Tensor[dtype]:
var t = Tensor[dtype](shape)
if t.num_elements() != len(dv):
print("[WARNING] tensor and dv not the shame shape")
for i in range(t.num_elements()):
t[i] = dv[i]
return t ^
fn main():
alias learning_rate = 1e-3
alias epochs = 100
alias batch_size = 4
var inputs = Tensor[dtype](batch_size, 1, 28, 28)
rand[dtype](inputs.data(), inputs.num_elements())
var labels = Tensor[dtype](batch_size, 10) # one-hot encoded (probabilities)
for i in range(4):
labels[i * 10 + i] = 1.0
alias cv1_w_shape = TensorShape(16, 1, 5, 5)
alias conv1_weights = create_weights(cv1_w_shape.num_elements(), zero=False)
alias cv1_b_shape = TensorShape(16)
alias conv1_bias = create_weights(16, zero=True)
alias cv2_w_shape = TensorShape(32, 16, 5, 5)
alias conv2_weights = create_weights(cv2_w_shape.num_elements(), zero=False)
alias cv2_b_shape = TensorShape(32)
alias conv2_bias = create_weights(32, zero=True)
alias l1_w_shape = TensorShape(32 * 7 * 7, 10)
alias linear1_weights = create_weights(l1_w_shape.num_elements(), zero=False)
alias l1_b_shape = TensorShape(10)
alias linear1_bias = create_weights(10, zero=True)
var losses_mojo = run_mojo[
batch_size,
conv1_weights,
conv1_bias,
conv2_weights,
conv2_bias,
linear1_weights,
linear1_bias,
](
epochs,
learning_rate,
inputs,
labels,
)
var losses_torch = run_torch(
epochs,
learning_rate,
inputs,
labels,
dv_to_tensor(conv1_weights, cv1_w_shape),
dv_to_tensor(conv1_bias, cv1_b_shape),
dv_to_tensor(conv2_weights, cv2_w_shape),
dv_to_tensor(conv2_bias, cv2_b_shape),
dv_to_tensor(linear1_weights, l1_w_shape),
dv_to_tensor(linear1_bias, l1_b_shape),
)
for i in range(epochs):
print("loss_mojo: ", losses_mojo[i], " loss_torch: ", losses_torch[i])
for i in range(epochs):
var loss_mojo = losses_mojo[i]
var loss_torch = losses_torch[i]
print("loss_mojo: ", loss_mojo, " loss_torch: ", loss_torch)
try:
assert_almost_equal(loss_mojo, loss_torch, rtol=1e-5)
except e:
print("Losses not equal")
print(e)
break
| basalt/tests/python/test_models_mnist.mojo | false |
from random import rand
from python import Python
from math.limit import max_finite
from testing import assert_almost_equal
from basalt import dtype
from basalt.autograd import Graph, OP
from basalt.nn import Tensor, TensorShape, Model, MSELoss, optim
from basalt.utils.rand_utils import MersenneTwister
from tests import to_numpy, to_tensor
fn create_linear_regression(
batch_size: Int,
n_outputs: Int,
linear1_weights: List[Scalar[dtype]],
linear1_bias: List[Scalar[dtype]],
) -> Graph:
var g = Graph()
var x = g.input(TensorShape(batch_size, 13))
# linear1
# var out = nn.Linear(g, x, n_outputs=1)
var l1_w = g.param(TensorShape(13, n_outputs), init=linear1_weights)
var l1_b = g.param(TensorShape(n_outputs), init=linear1_bias)
var res = g.op(OP.DOT, x, l1_w)
var out = g.op(OP.ADD, res, l1_b)
g.out(out)
var y_true = g.input(TensorShape(batch_size, n_outputs))
var loss = MSELoss(g, out, y_true)
g.loss(loss)
return g ^
fn run_mojo[
batch_size: Int,
n_outputs: Int,
linear1_weights: List[Scalar[dtype]],
linear1_bias: List[Scalar[dtype]],
](
epochs: Int,
learning_rate: Float64,
inputs: Tensor[dtype],
labels: Tensor[dtype],
) -> List[Scalar[dtype]]:
alias graph = create_linear_regression(
batch_size,
n_outputs,
linear1_weights,
linear1_bias,
)
var model = Model[graph]()
var optim = optim.Adam[graph](Reference(model.parameters), lr=learning_rate)
var losses = List[Scalar[dtype]]()
for i in range(epochs):
var loss = model.forward(inputs, labels)
# Backward pass
optim.zero_grad()
model.backward()
optim.step()
losses.append(loss[0])
return losses
fn run_torch(
epochs: Int,
learning_rate: Float64,
inputs: Tensor,
labels: Tensor,
owned linear1_weights: Tensor,
owned linear1_bias: Tensor,
) -> List[Scalar[dtype]]:
var out: List[Scalar[dtype]] = List[Scalar[dtype]]()
try:
var torch = Python.import_module("torch")
var F = Python.import_module("torch.nn.functional")
var np = Python.import_module("numpy")
Python.add_to_path("./tests/python")
var torch_models = Python.import_module("test_models_torch")
var inputs = torch.from_numpy(to_numpy(inputs)).requires_grad_(True)
var labels = torch.from_numpy(to_numpy(labels)).requires_grad_(True)
var linear1_weights = torch.from_numpy(
to_numpy(linear1_weights)
).requires_grad_(True)
var linear1_bias = torch.from_numpy(to_numpy(linear1_bias)).requires_grad_(True)
var regression = torch_models.LinearRegression(
linear1_weights,
linear1_bias,
)
var loss_func = torch_models.MSELoss()
var optimizer = torch.optim.Adam(regression.parameters(), learning_rate)
for i in range(epochs):
var output = regression.forward(inputs)
var loss = loss_func(output, labels)
_ = optimizer.zero_grad()
_ = loss.backward()
_ = optimizer.step()
out.append(to_tensor(loss)[0].cast[dtype]())
return out
except e:
print("Error importing torch")
print(e)
return out
fn create_weights(num_elements: Int, zero: Bool) -> List[Scalar[dtype]]:
var prng = MersenneTwister(123456)
var weights = List[Scalar[dtype]](capacity=num_elements)
for i in range(num_elements):
if zero:
weights.append(Scalar[dtype](0.0))
else:
var rand_float = prng.next().cast[dtype]() / max_finite[DType.int32]().cast[
dtype
]()
weights.append(Scalar[dtype](rand_float / 10))
return weights ^
fn dv_to_tensor(dv: List[Scalar[dtype]], shape: TensorShape) -> Tensor[dtype]:
var t = Tensor[dtype](shape)
if t.num_elements() != len(dv):
print("[WARNING] tensor and dv not the shame shape")
for i in range(t.num_elements()):
t[i] = dv[i]
return t ^
fn main():
alias learning_rate = 1e-3
alias epochs = 100
alias batch_size = 64
alias n_outputs = 10
var inputs = Tensor[dtype](batch_size, 13)
rand[dtype](inputs.data(), inputs.num_elements())
var labels = Tensor[dtype](batch_size, n_outputs)
for i in range(batch_size):
for j in range(n_outputs):
labels[i * n_outputs + j] = 1
alias l1_w_shape = TensorShape(13, n_outputs)
alias linear1_weights = create_weights(l1_w_shape.num_elements(), zero=False)
alias l1_b_shape = TensorShape(n_outputs)
alias linear1_bias = create_weights(l1_b_shape.num_elements(), zero=False)
var losses_mojo = run_mojo[batch_size, n_outputs, linear1_weights, linear1_bias,](
epochs,
learning_rate,
inputs,
labels,
)
var losses_torch = run_torch(
epochs,
learning_rate,
inputs,
labels,
dv_to_tensor(linear1_weights, l1_w_shape),
dv_to_tensor(linear1_bias, l1_b_shape),
)
var success = True
for i in range(epochs):
var loss_mojo = losses_mojo[i]
var loss_torch = losses_torch[i]
# print("loss_mojo: ", loss_mojo, " loss_torch: ", loss_torch)
try:
assert_almost_equal(loss_mojo, loss_torch, rtol=1e-4)
except e:
print("Losses not equal")
print(e)
success = False
break
if success:
print("SUCCES: All losses in Linear Regression model are equal.")
| basalt/tests/python/test_models_regression.mojo | false |
from random import rand
from python import Python
from math.limit import max_finite
from testing import assert_almost_equal
from basalt import dtype
from basalt.autograd import Graph, OP
from basalt.nn import Tensor, TensorShape, Model, ReLU, MSELoss, optim
from basalt.utils.rand_utils import MersenneTwister
from tests import to_numpy, to_tensor
fn create_simple_nn(
batch_size: Int,
linear1_weights: List[Scalar[dtype]],
linear1_bias: List[Scalar[dtype]],
linear2_weights: List[Scalar[dtype]],
linear2_bias: List[Scalar[dtype]],
linear3_weights: List[Scalar[dtype]],
linear3_bias: List[Scalar[dtype]],
) -> Graph:
var g = Graph()
var x = g.input(TensorShape(batch_size, 1))
var y_true = g.input(TensorShape(batch_size, 1))
# Linear 1: nn.Linear(g, x, n_outputs=32)
var l1_w = g.param(TensorShape(1, 32), init=linear1_weights)
var l1_b = g.param(TensorShape(32), init=linear1_bias)
var res_1 = g.op(OP.DOT, x, l1_w)
var x1 = g.op(OP.ADD, res_1, l1_b)
# ReLU 1
var x2 = ReLU(g, x1)
# Linear 2: nn.Linear(g, x2, n_outputs=32)
var l2_w = g.param(TensorShape(32, 32), init=linear2_weights)
var l2_b = g.param(TensorShape(32), init=linear2_bias)
var res_2 = g.op(OP.DOT, x2, l2_w)
var x3 = g.op(OP.ADD, res_2, l2_b)
# ReLU 2
var x4 = ReLU(g, x3)
# Linear 3: nn.Linear(g, x4, n_outputs=1)
var l3_w = g.param(TensorShape(32, 1), init=linear3_weights)
var l3_b = g.param(TensorShape(1), init=linear3_bias)
var res_3 = g.op(OP.DOT, x4, l3_w)
var y_pred = g.op(OP.ADD, res_3, l3_b)
g.out(y_pred)
var loss = MSELoss(g, y_pred, y_true)
g.loss(loss)
return g ^
fn run_mojo[
batch_size: Int,
linear1_weights: List[Scalar[dtype]],
linear1_bias: List[Scalar[dtype]],
linear2_weights: List[Scalar[dtype]],
linear2_bias: List[Scalar[dtype]],
linear3_weights: List[Scalar[dtype]],
linear3_bias: List[Scalar[dtype]],
](
epochs: Int,
learning_rate: Float64,
inputs: Tensor[dtype],
labels: Tensor[dtype],
) -> List[Scalar[dtype]]:
alias graph = create_simple_nn(
batch_size,
linear1_weights,
linear1_bias,
linear2_weights,
linear2_bias,
linear3_weights,
linear3_bias,
)
var model = Model[graph]()
var optim = optim.Adam[graph](Reference(model.parameters), lr=learning_rate)
var losses = List[Scalar[dtype]]()
for i in range(epochs):
var loss = model.forward(inputs, labels)
# Backward pass
optim.zero_grad()
model.backward()
optim.step()
losses.append(loss[0])
return losses
fn run_torch(
epochs: Int,
learning_rate: Float64,
inputs: Tensor,
labels: Tensor,
owned linear1_weights: Tensor,
owned linear1_bias: Tensor,
owned linear2_weights: Tensor,
owned linear2_bias: Tensor,
owned linear3_weights: Tensor,
owned linear3_bias: Tensor,
) -> List[Scalar[dtype]]:
var out: List[Scalar[dtype]] = List[Scalar[dtype]]()
try:
var torch = Python.import_module("torch")
var F = Python.import_module("torch.nn.functional")
var np = Python.import_module("numpy")
Python.add_to_path("./tests/python")
var torch_models = Python.import_module("test_models_torch")
var inputs = torch.from_numpy(to_numpy(inputs)).requires_grad_(True)
var labels = torch.from_numpy(to_numpy(labels)).requires_grad_(True)
var linear1_weights = torch.from_numpy(
to_numpy(linear1_weights)
).requires_grad_(True)
var linear1_bias = torch.from_numpy(to_numpy(linear1_bias)).requires_grad_(True)
var linear2_weights = torch.from_numpy(
to_numpy(linear2_weights)
).requires_grad_(True)
var linear2_bias = torch.from_numpy(to_numpy(linear2_bias)).requires_grad_(True)
var linear3_weights = torch.from_numpy(
to_numpy(linear3_weights)
).requires_grad_(True)
var linear3_bias = torch.from_numpy(to_numpy(linear3_bias)).requires_grad_(True)
var regression = torch_models.SimpleNN(
linear1_weights,
linear1_bias,
linear2_weights,
linear2_bias,
linear3_weights,
linear3_bias,
)
var loss_func = torch_models.MSELoss()
var optimizer = torch.optim.Adam(regression.parameters(), learning_rate)
for i in range(epochs):
var output = regression.forward(inputs)
var loss = loss_func(output, labels)
_ = optimizer.zero_grad()
_ = loss.backward()
_ = optimizer.step()
out.append(to_tensor(loss)[0].cast[dtype]())
return out
except e:
print("Error importing torch")
print(e)
return out
fn create_weights(num_elements: Int, zero: Bool) -> List[Scalar[dtype]]:
var prng = MersenneTwister(123456)
var weights = List[Scalar[dtype]](capacity=num_elements)
for i in range(num_elements):
if zero:
weights.append(Scalar[dtype](0.0))
else:
var rand_float = prng.next().cast[dtype]() / max_finite[DType.int32]().cast[
dtype
]()
weights.append(Scalar[dtype](rand_float / 10))
return weights ^
fn dv_to_tensor(dv: List[Scalar[dtype]], shape: TensorShape) -> Tensor[dtype]:
var t = Tensor[dtype](shape)
if t.num_elements() != len(dv):
print("[WARNING] tensor and dv not the shame shape")
for i in range(t.num_elements()):
t[i] = dv[i]
return t ^
fn main():
alias learning_rate = 1e-3
alias epochs = 100
alias batch_size = 64
alias n_outputs = 10
var x_data = Tensor[dtype](batch_size, 1)
rand[dtype](x_data.data(), x_data.num_elements())
var y_data = Tensor[dtype](batch_size, 1)
for j in range(batch_size):
x_data[j] = x_data[j] * 2 - 1
y_data[j] = math.sin(x_data[j])
alias l1_w_shape = TensorShape(1, 32)
alias l1_b_shape = TensorShape(32)
alias l2_w_shape = TensorShape(32, 32)
alias l2_b_shape = TensorShape(32)
alias l3_w_shape = TensorShape(32, 1)
alias l3_b_shape = TensorShape(1)
alias linear1_weights = create_weights(l1_w_shape.num_elements(), zero=False)
alias linear1_bias = create_weights(l1_b_shape.num_elements(), zero=False)
alias linear2_weights = create_weights(l2_w_shape.num_elements(), zero=False)
alias linear2_bias = create_weights(l2_b_shape.num_elements(), zero=False)
alias linear3_weights = create_weights(l3_w_shape.num_elements(), zero=False)
alias linear3_bias = create_weights(l3_b_shape.num_elements(), zero=False)
var losses_mojo = run_mojo[
batch_size,
linear1_weights,
linear1_bias,
linear2_weights,
linear2_bias,
linear3_weights,
linear3_bias,
](epochs, learning_rate, x_data, y_data)
var losses_torch = run_torch(
epochs,
learning_rate,
x_data,
y_data,
dv_to_tensor(linear1_weights, l1_w_shape),
dv_to_tensor(linear1_bias, l1_b_shape),
dv_to_tensor(linear2_weights, l2_w_shape),
dv_to_tensor(linear2_bias, l2_b_shape),
dv_to_tensor(linear3_weights, l3_w_shape),
dv_to_tensor(linear3_bias, l3_b_shape),
)
var success = True
for i in range(epochs):
var loss_mojo = losses_mojo[i]
var loss_torch = losses_torch[i]
# print("loss_mojo: ", loss_mojo, " loss_torch: ", loss_torch)
try:
assert_almost_equal(loss_mojo, loss_torch, rtol=1e-4)
except e:
print("Losses not equal")
print(e)
success = False
break
if success:
print("SUCCES: All losses in Sin estimate model are equal.")
| basalt/tests/python/test_models_sin_estimate.mojo | false |
from random import rand
from math import exp, log
from python.python import Python
from collections.optional import Optional
from basalt import dtype, nelts
from basalt.autograd import OP
from basalt.autograd.attributes import Attribute, AttributeVector
from basalt.nn import Tensor, TensorShape
from tests import (
to_numpy,
to_tensor,
test_unary_op,
test_binary_op,
test_ternary_op,
test_unary_op_backward,
test_binary_op_backward,
test_ternary_op_backward,
)
# ------ Test Binary Ops ------
@value
struct torch_output_binary_op:
var expected: Tensor[dtype]
var grad_1: Tensor[dtype]
var grad_2: Tensor[dtype]
fn torch_binary_op(
op: OP, input_1: Tensor, input_2: Tensor, upper_grad: Tensor
) -> torch_output_binary_op:
try:
var torch = Python.import_module("torch")
var np = Python.import_module("numpy")
var input_1 = torch.from_numpy(to_numpy(input_1)).requires_grad_(True)
var input_2 = torch.from_numpy(to_numpy(input_2)).requires_grad_(True)
var expected: PythonObject
if op == OP.ADD:
expected = input_1 + input_2
elif op == OP.SUB:
expected = input_1 - input_2
elif op == OP.MUL:
expected = input_1 * input_2
elif op == OP.DIV:
expected = input_1 / input_2
elif op == OP.DOT:
expected = torch.matmul(input_1, input_2)
else:
print("Error: op not supported (returning the default add op result): ", op)
expected = input_1 + input_2
# uppergrad & backwards
var upper_grad = torch.from_numpy(to_numpy(upper_grad))
_ = expected.backward(upper_grad)
return torch_output_binary_op(
to_tensor(expected.detach().numpy()),
to_tensor(input_1.grad.numpy()),
to_tensor(input_2.grad.numpy()),
)
except e:
print("Error importing torch: ", e)
var d = Tensor[dtype](1)
return torch_output_binary_op(d, d, d)
fn test_ADD() raises:
alias t1_shape = TensorShape(37, 63, 107)
alias t2_shape = TensorShape(37, 63, 107)
alias ug_shape = TensorShape(37, 63, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_binary_op(OP.ADD, t1, t2, ug)
test_binary_op[OP.ADD, t1_shape, t2_shape](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.ADD, t1_shape, t2_shape, ug_shape](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
# broadcasting
alias t1_shape_2 = TensorShape(37, 63, 107)
alias t2_shape_2 = TensorShape(37, 63, 1)
alias ug_shape_2 = TensorShape(37, 63, 107)
t1 = Tensor[dtype](t1_shape_2)
t2 = Tensor[dtype](t2_shape_2)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
ug = Tensor[dtype](ug_shape_2)
rand(ug.data(), ug.num_elements())
expected_and_grad = torch_binary_op(OP.ADD, t1, t2, ug)
test_binary_op[OP.ADD, t1_shape_2, t2_shape_2](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.ADD, t1_shape_2, t2_shape_2, ug_shape_2](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
fn test_SUB() raises:
alias t1_shape = TensorShape(37, 63, 107)
alias t2_shape = TensorShape(37, 63, 107)
alias ug_shape = TensorShape(37, 63, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_binary_op(OP.SUB, t1, t2, ug)
test_binary_op[OP.SUB, t1_shape, t2_shape](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.SUB, t1_shape, t2_shape, ug_shape](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
# broadcasting
alias t1_shape_2 = TensorShape(37, 63, 107)
alias t2_shape_2 = TensorShape(37, 63, 1)
alias ug_shape_2 = TensorShape(37, 63, 107)
t1 = Tensor[dtype](t1_shape_2)
t2 = Tensor[dtype](t2_shape_2)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
ug = Tensor[dtype](ug_shape_2)
rand(ug.data(), ug.num_elements())
expected_and_grad = torch_binary_op(OP.SUB, t1, t2, ug)
test_binary_op[OP.SUB, t1_shape_2, t2_shape_2](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.SUB, t1_shape_2, t2_shape_2, ug_shape_2](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
fn test_MUL() raises:
alias t1_shape = TensorShape(37, 63, 107)
alias t2_shape = TensorShape(37, 63, 107)
alias ug_shape = TensorShape(37, 63, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_binary_op(OP.MUL, t1, t2, ug)
test_binary_op[OP.MUL, t1_shape, t2_shape](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.MUL, t1_shape, t2_shape, ug_shape](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
# broadcasting
alias t1_shape_2 = TensorShape(37, 63, 107)
alias t2_shape_2 = TensorShape(37, 63, 1)
alias ug_shape_2 = TensorShape(37, 63, 107)
t1 = Tensor[dtype](t1_shape_2)
t2 = Tensor[dtype](t2_shape_2)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
ug = Tensor[dtype](ug_shape_2)
rand(ug.data(), ug.num_elements())
expected_and_grad = torch_binary_op(OP.MUL, t1, t2, ug)
test_binary_op[OP.MUL, t1_shape_2, t2_shape_2](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.MUL, t1_shape_2, t2_shape_2, ug_shape_2](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
fn test_DIV() raises:
alias t1_shape = TensorShape(37, 63, 107)
alias t2_shape = TensorShape(37, 63, 107)
alias ug_shape = TensorShape(37, 63, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_binary_op(OP.DIV, t1, t2, ug)
test_binary_op[OP.DIV, t1_shape, t2_shape](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.DIV, t1_shape, t2_shape, ug_shape](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
# broadcasting
alias t1_shape_2 = TensorShape(37, 63, 107)
alias t2_shape_2 = TensorShape(37, 63, 1)
alias ug_shape_2 = TensorShape(37, 63, 107)
t1 = Tensor[dtype](t1_shape_2)
t2 = Tensor[dtype](t2_shape_2)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
ug = Tensor[dtype](ug_shape_2)
rand(ug.data(), ug.num_elements())
expected_and_grad = torch_binary_op(OP.DIV, t1, t2, ug)
test_binary_op[OP.DIV, t1_shape_2, t2_shape_2](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.DIV, t1_shape_2, t2_shape_2, ug_shape_2](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
alias t1_shape_3 = TensorShape(37, 63, 1)
alias t2_shape_3 = TensorShape(37, 63, 107)
alias ug_shape_3 = TensorShape(37, 63, 107)
t1 = Tensor[dtype](t1_shape_3)
t2 = Tensor[dtype](t2_shape_3)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
ug = Tensor[dtype](ug_shape_3)
rand(ug.data(), ug.num_elements())
expected_and_grad = torch_binary_op(OP.DIV, t1, t2, ug)
test_binary_op[OP.DIV, t1_shape_3, t2_shape_3](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.DIV, t1_shape_3, t2_shape_3, ug_shape_3](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
fn test_DOT() raises:
alias t1_shape = TensorShape(107, 203)
alias t2_shape = TensorShape(203, 139)
alias ug_shape = TensorShape(107, 139)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_binary_op(OP.DOT, t1, t2, ug)
test_binary_op[OP.DOT, t1_shape, t2_shape](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.DOT, t1_shape, t2_shape, ug_shape](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
# Test same M and N values
alias t1_shape_2 = TensorShape(107, 186)
alias t2_shape_2 = TensorShape(186, 107)
alias ug_shape_2 = TensorShape(107, 107)
t1 = Tensor[dtype](t1_shape_2)
t2 = Tensor[dtype](t2_shape_2)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
ug = Tensor[dtype](ug_shape_2)
rand(ug.data(), ug.num_elements())
expected_and_grad = torch_binary_op(OP.DOT, t1, t2, ug)
test_binary_op[OP.DOT, t1_shape_2, t2_shape_2](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.DOT, t1_shape_2, t2_shape_2, ug_shape_2](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
# Test square matrix
alias t1_shape_3 = TensorShape(207, 207)
alias t2_shape_3 = TensorShape(207, 207)
alias ug_shape_3 = TensorShape(207, 207)
t1 = Tensor[dtype](t1_shape_3)
t2 = Tensor[dtype](t2_shape_3)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
ug = Tensor[dtype](ug_shape_3)
rand(ug.data(), ug.num_elements())
expected_and_grad = torch_binary_op(OP.DOT, t1, t2, ug)
test_binary_op[OP.DOT, t1_shape_3, t2_shape_3](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.DOT, t1_shape_3, t2_shape_3, ug_shape_3](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
# Test with power of 2 values
alias t1_shape_4 = TensorShape(64, 128)
alias t2_shape_4 = TensorShape(128, 256)
alias ug_shape_4 = TensorShape(64, 256)
t1 = Tensor[dtype](t1_shape_4)
t2 = Tensor[dtype](t2_shape_4)
rand(t1.data(), t1.num_elements())
rand(t2.data(), t2.num_elements())
ug = Tensor[dtype](ug_shape_4)
rand(ug.data(), ug.num_elements())
expected_and_grad = torch_binary_op(OP.DOT, t1, t2, ug)
test_binary_op[OP.DOT, t1_shape_4, t2_shape_4](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.DOT, t1_shape_4, t2_shape_4, ug_shape_4](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
# ------ Test Unary Ops ------
@value
struct torch_output_unary_op:
var expected: Tensor[dtype]
var grad_1: Tensor[dtype]
fn torch_unary_op(op: OP, input_1: Tensor, upper_grad: Tensor) -> torch_output_unary_op:
try:
var torch = Python.import_module("torch")
var np = Python.import_module("numpy")
var input_1 = torch.from_numpy(to_numpy(input_1)).requires_grad_(True)
var expected: PythonObject
if op == OP.EXP:
expected = torch.exp(input_1)
elif op == OP.LOG:
expected = torch.log(input_1)
else:
print("Error: op not supported (returning the value input_1): ", op)
expected = input_1
# uppergrad & backwards
var upper_grad = torch.from_numpy(to_numpy(upper_grad))
_ = expected.backward(upper_grad)
return torch_output_unary_op(
to_tensor(expected.detach().numpy()),
to_tensor(input_1.grad.numpy()),
)
except:
print("Error importing torch")
var d = Tensor[dtype](1)
return torch_output_unary_op(d, d)
fn test_EXP() raises:
alias t1_shape = TensorShape(37, 63, 107)
alias ug_shape = TensorShape(37, 63, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_unary_op(OP.EXP, t1, ug)
test_unary_op[OP.EXP, t1_shape](t1, expected_and_grad.expected)
test_unary_op_backward[OP.EXP, t1_shape, ug_shape](t1, ug, expected_and_grad.grad_1)
fn test_LOG() raises:
alias t1_shape = TensorShape(37, 63, 107)
alias ug_shape = TensorShape(37, 63, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_unary_op(OP.LOG, t1, ug)
test_unary_op[OP.LOG, t1_shape](t1, expected_and_grad.expected)
test_unary_op_backward[OP.LOG, t1_shape, ug_shape](t1, ug, expected_and_grad.grad_1)
# ------ Test POW ------
@value
struct torch_output_pow_op:
var expected: Tensor[dtype]
var grad_1: Tensor[dtype]
var grad_2: Tensor[dtype]
fn torch_pow_op(
op: OP, input_1: Tensor, input_2: Tensor, upper_grad: Tensor
) -> torch_output_pow_op:
try:
var torch = Python.import_module("torch")
var np = Python.import_module("numpy")
var input_1 = torch.from_numpy(to_numpy(input_1)).requires_grad_(True)
var input_2 = torch.from_numpy(to_numpy(input_2)).requires_grad_(True)
var expected: PythonObject
if op == OP.POW:
expected = torch.pow(input_1, input_2)
else:
print("Error: op not supported (returning input 1 value): ", op)
expected = input_1
# uppergrad & backwards
var upper_grad = torch.from_numpy(to_numpy(upper_grad))
_ = expected.backward(upper_grad)
return torch_output_pow_op(
to_tensor(expected.detach().numpy()),
to_tensor(input_1.grad.numpy()),
to_tensor(input_2.grad.numpy()),
)
except:
print("Error importing torch")
var d = Tensor[dtype](1)
return torch_output_pow_op(d, d, d)
fn test_POW() raises:
alias t1_shape = TensorShape(37, 63, 107)
alias t2_shape = TensorShape(1)
alias ug_shape = TensorShape(37, 63, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
alias exponent = 3
var t2 = Tensor[dtype](1)
t2[0] = exponent
var expected_and_grad = torch_pow_op(OP.POW, t1, t2, ug)
test_binary_op[OP.POW, t1_shape, t2_shape](t1, t2, expected_and_grad.expected)
test_binary_op_backward[OP.POW, t1_shape, t2_shape, ug_shape](
t1, t2, ug, expected_and_grad.grad_1, expected_and_grad.grad_2
)
# ------ Test Reduction Ops ------
@value
struct torch_output_reduction_op:
var expected: Tensor[dtype]
var grad_1: Tensor[dtype]
fn torch_reduction_op(
op: OP, input_1: Tensor, upper_grad: Tensor, axis: Optional[Int] = None
) -> torch_output_reduction_op:
try:
var torch = Python.import_module("torch")
var np = Python.import_module("numpy")
var input_1 = torch.from_numpy(to_numpy(input_1)).requires_grad_(True)
var expected: PythonObject
if op == OP.SUM:
if axis:
expected = torch.sum(input_1, axis.value()[], True)
else:
expected = torch.sum(input_1)
elif op == OP.MAX:
if axis:
expected = torch.amax(input_1, axis.value()[], True)
else:
expected = torch.amax(input_1)
elif op == OP.MEAN:
if axis:
expected = torch.mean(input_1, axis.value()[], True)
else:
expected = torch.mean(input_1)
else:
print("Error: op not supported (returning input 1 value): ", op)
expected = input_1
# uppergrad & backwards
var upper_grad = torch.from_numpy(to_numpy(upper_grad))
# because torch when working with a tensor of size 1, it considers it as a tensor of size 0 in reality
if not axis:
upper_grad = upper_grad.squeeze()
_ = expected.backward(upper_grad)
var expected_res: PythonObject
var grad_1_res = input_1.grad.numpy()
if not axis:
expected_res = expected.detach().numpy().reshape(1)
else:
expected_res = expected.detach().numpy()
return torch_output_reduction_op(
to_tensor(expected_res),
to_tensor(grad_1_res),
)
except e:
print("Error importing torch: ", e)
var d = Tensor[dtype](1)
return torch_output_reduction_op(d, d)
fn test_SUM() raises:
alias t1_shape = TensorShape(87, 73, 107)
alias ug_shape = TensorShape(87, 1, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
# 1 axis
alias axis = 1
alias attrs = AttributeVector(Attribute("axis", axis))
var expected_and_grad = torch_reduction_op(OP.SUM, t1, ug, axis)
test_unary_op[OP.SUM, t1_shape, attrs](t1, expected_and_grad.expected)
test_unary_op_backward[OP.SUM, t1_shape, ug_shape, attrs](
t1, ug, expected_and_grad.grad_1
)
# 2 axis
alias ug_shape_2 = TensorShape(87, 73, 1)
ug = Tensor[dtype](ug_shape_2)
rand(ug.data(), ug.num_elements())
alias axis_2 = 2
alias attrs_2 = AttributeVector(Attribute("axis", axis_2))
expected_and_grad = torch_reduction_op(OP.SUM, t1, ug, axis_2)
test_unary_op[OP.SUM, t1_shape, attrs_2](t1, expected_and_grad.expected)
test_unary_op_backward[OP.SUM, t1_shape, ug_shape_2, attrs_2](
t1, ug, expected_and_grad.grad_1
)
# 0 axis
alias ug_shape_3 = TensorShape(1, 73, 107)
ug = Tensor[dtype](ug_shape_3)
rand(ug.data(), ug.num_elements())
alias axis_3 = 0
alias attrs_3 = AttributeVector(Attribute("axis", axis_3))
expected_and_grad = torch_reduction_op(OP.SUM, t1, ug, axis_3)
test_unary_op[OP.SUM, t1_shape, attrs_3](t1, expected_and_grad.expected)
test_unary_op_backward[OP.SUM, t1_shape, ug_shape_3, attrs_3](
t1, ug, expected_and_grad.grad_1
)
# all dims
alias ug_shape_4 = TensorShape(1)
ug = Tensor[dtype](ug_shape_4)
rand(ug.data(), ug.num_elements())
expected_and_grad = torch_reduction_op(OP.SUM, t1, ug)
test_unary_op[OP.SUM, t1_shape](t1, expected_and_grad.expected)
test_unary_op_backward[OP.SUM, t1_shape, ug_shape_4](
t1, ug, expected_and_grad.grad_1
)
fn test_MAX() raises:
alias t1_shape = TensorShape(87, 73, 107)
alias ug_shape = TensorShape(87, 1, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
# 1 axis
alias axis = 1
alias attrs = AttributeVector(Attribute("axis", axis))
var expected_and_grad = torch_reduction_op(OP.MAX, t1, ug, axis)
test_unary_op[OP.MAX, t1_shape, attrs](t1, expected_and_grad.expected)
test_unary_op_backward[OP.MAX, t1_shape, ug_shape, attrs](
t1, ug, expected_and_grad.grad_1
)
# 2 axis
alias ug_shape_2 = TensorShape(87, 73, 1)
ug = Tensor[dtype](ug_shape_2)
rand(ug.data(), ug.num_elements())
alias axis_2 = 2
alias attrs_2 = AttributeVector(Attribute("axis", axis_2))
expected_and_grad = torch_reduction_op(OP.MAX, t1, ug, axis_2)
test_unary_op[OP.MAX, t1_shape, attrs_2](t1, expected_and_grad.expected)
test_unary_op_backward[OP.MAX, t1_shape, ug_shape_2, attrs_2](
t1, ug, expected_and_grad.grad_1
)
# 0 axis
alias ug_shape_3 = TensorShape(1, 73, 107)
ug = Tensor[dtype](ug_shape_3)
rand(ug.data(), ug.num_elements())
alias axis_3 = 0
alias attrs_3 = AttributeVector(Attribute("axis", axis_3))
expected_and_grad = torch_reduction_op(OP.MAX, t1, ug, axis_3)
test_unary_op[OP.MAX, t1_shape, attrs_3](t1, expected_and_grad.expected)
test_unary_op_backward[OP.MAX, t1_shape, ug_shape_3, attrs_3](
t1, ug, expected_and_grad.grad_1
)
# all dims
alias ug_shape_4 = TensorShape(1)
ug = Tensor[dtype](ug_shape_4)
rand(ug.data(), ug.num_elements())
expected_and_grad = torch_reduction_op(OP.MAX, t1, ug)
test_unary_op[OP.MAX, t1_shape](t1, expected_and_grad.expected)
test_unary_op_backward[OP.MAX, t1_shape, ug_shape_4](
t1, ug, expected_and_grad.grad_1
)
fn test_MEAN() raises:
alias t1_shape = TensorShape(87, 73, 107)
alias ug_shape = TensorShape(87, 1, 107)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
# 1 axis
alias axis = 1
alias attrs = AttributeVector(Attribute("axis", axis))
var expected_and_grad = torch_reduction_op(OP.MEAN, t1, ug, axis)
test_unary_op[OP.MEAN, t1_shape, attrs](t1, expected_and_grad.expected)
test_unary_op_backward[OP.MEAN, t1_shape, ug_shape, attrs](
t1, ug, expected_and_grad.grad_1
)
# 2 axis
alias ug_shape_2 = TensorShape(87, 73, 1)
ug = Tensor[dtype](ug_shape_2)
rand(ug.data(), ug.num_elements())
alias axis_2 = 2
alias attrs_2 = AttributeVector(Attribute("axis", axis_2))
expected_and_grad = torch_reduction_op(OP.MEAN, t1, ug, axis_2)
test_unary_op[OP.MEAN, t1_shape, attrs_2](t1, expected_and_grad.expected)
test_unary_op_backward[OP.MEAN, t1_shape, ug_shape_2, attrs_2](
t1, ug, expected_and_grad.grad_1
)
# 0 axis
alias ug_shape_3 = TensorShape(1, 73, 107)
ug = Tensor[dtype](ug_shape_3)
rand(ug.data(), ug.num_elements())
alias axis_3 = 0
alias attrs_3 = AttributeVector(Attribute("axis", axis_3))
expected_and_grad = torch_reduction_op(OP.MEAN, t1, ug, axis_3)
test_unary_op[OP.MEAN, t1_shape, attrs_3](t1, expected_and_grad.expected)
test_unary_op_backward[OP.MEAN, t1_shape, ug_shape_3, attrs_3](
t1, ug, expected_and_grad.grad_1
)
# all dims
alias ug_shape_4 = TensorShape(1)
ug = Tensor[dtype](ug_shape_4)
rand(ug.data(), ug.num_elements())
expected_and_grad = torch_reduction_op(OP.MEAN, t1, ug)
test_unary_op[OP.MEAN, t1_shape](t1, expected_and_grad.expected)
test_unary_op_backward[OP.MEAN, t1_shape, ug_shape_4](
t1, ug, expected_and_grad.grad_1
)
# ------ Test transformation Ops ------
@value
struct torch_output_transform_op:
var expected: Tensor[dtype]
var grad_1: Tensor[dtype]
fn torch_transform_op(
op: OP, input_1: Tensor, upper_grad: Tensor, new_shape: PythonObject = None
) -> torch_output_transform_op:
try:
var torch = Python.import_module("torch")
var np = Python.import_module("numpy")
var input_1 = torch.from_numpy(to_numpy(input_1)).requires_grad_(True)
var expected: PythonObject
if op == OP.FLATTEN:
expected = input_1.flatten()
elif op == OP.RESHAPE:
expected = input_1.reshape(new_shape)
elif op == OP.TRANSPOSE:
expected = input_1.permute(new_shape)
else:
print("Error: op not supported (returning input 1 value): ", op)
expected = input_1
# uppergrad & backwards
var upper_grad = torch.from_numpy(to_numpy(upper_grad))
_ = expected.backward(upper_grad)
return torch_output_transform_op(
to_tensor(expected.detach().numpy()),
to_tensor(input_1.grad.numpy()),
)
except e:
print("Error importing torch: ", e)
var d = Tensor[dtype](1)
return torch_output_transform_op(d, d)
fn test_FLATTEN() raises:
alias t1_shape = TensorShape(87, 73, 84)
alias ug_shape = TensorShape(t1_shape.num_elements())
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
var expected_and_grad = torch_transform_op(OP.FLATTEN, t1, ug, None)
test_unary_op[OP.FLATTEN, t1_shape](t1, expected_and_grad.expected)
test_unary_op_backward[OP.FLATTEN, t1_shape, ug_shape](
t1, ug, expected_and_grad.grad_1
)
fn test_RESHAPE() raises:
alias t1_shape = TensorShape(87, 73, 84)
alias ug_shape = TensorShape(87, 73 * 84)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
alias new_shape = TensorShape(87, 73 * 84)
alias new_shape_tuple = (new_shape[0], new_shape[1])
alias attrs = AttributeVector(Attribute("shape", new_shape))
var expected_and_grad = torch_transform_op(OP.RESHAPE, t1, ug, new_shape_tuple)
test_unary_op[OP.RESHAPE, t1_shape, attrs](t1, expected_and_grad.expected)
test_unary_op_backward[OP.RESHAPE, t1_shape, ug_shape, attrs](
t1, ug, expected_and_grad.grad_1
)
fn test_TRANSPOSE() raises:
alias t1_shape = TensorShape(87, 73, 84)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
alias ug_shape = TensorShape(73, 84, 87)
var ug = Tensor[dtype](ug_shape)
rand(ug.data(), ug.num_elements())
alias axes = TensorShape(1, 2, 0)
alias axes_tuple = (axes[0], axes[1], axes[2])
alias attrs = AttributeVector(Attribute("axes", axes))
var expected_and_grad = torch_transform_op(OP.TRANSPOSE, t1, ug, axes_tuple)
test_unary_op[OP.TRANSPOSE, t1_shape, attrs](t1, expected_and_grad.expected)
test_unary_op_backward[OP.TRANSPOSE, t1_shape, ug_shape, attrs](
t1, ug, expected_and_grad.grad_1
)
# Test reverse axis
alias ug_shape_2 = TensorShape(84, 73, 87)
ug = Tensor[dtype](ug_shape_2)
rand(ug.data(), ug.num_elements())
alias axes_2 = TensorShape(2, 1, 0)
alias axes_tuple_2 = (axes_2[0], axes_2[1], axes_2[2])
alias attrs_2 = AttributeVector(Attribute("axes", axes_2))
expected_and_grad = torch_transform_op(OP.TRANSPOSE, t1, ug, axes_tuple_2)
test_unary_op[OP.TRANSPOSE, t1_shape, attrs_2](t1, expected_and_grad.expected)
test_unary_op_backward[OP.TRANSPOSE, t1_shape, ug_shape_2, attrs_2](
t1, ug, expected_and_grad.grad_1
)
# Test with rank 2 tensor
alias t1_shape_3 = TensorShape(87, 73)
t1 = Tensor[dtype](t1_shape_3)
rand(t1.data(), t1.num_elements())
alias ug_shape_3 = TensorShape(73, 87)
ug = Tensor[dtype](ug_shape_3)
rand(ug.data(), ug.num_elements())
alias axes_3 = TensorShape(1, 0)
alias axes_tuple_3 = (axes_3[0], axes_3[1])
alias attrs_3 = AttributeVector(Attribute("axes", axes_3))
expected_and_grad = torch_transform_op(OP.TRANSPOSE, t1, ug, axes_tuple_3)
test_unary_op[OP.TRANSPOSE, t1_shape_3, attrs_3](t1, expected_and_grad.expected)
test_unary_op_backward[OP.TRANSPOSE, t1_shape_3, ug_shape_3, attrs_3](
t1, ug, expected_and_grad.grad_1
)
# ------ Test ternary Ops ------
@value
struct torch_output_ternary_op:
var expected: Tensor[dtype]
var grad_1: Tensor[dtype]
var grad_2: Tensor[dtype]
var grad_3: Tensor[dtype]
fn torch_ternary_op(
op: OP, input_1: Tensor, input_2: Tensor, input_3: Tensor, upper_grad: Tensor
) -> torch_output_ternary_op:
try:
var torch = Python.import_module("torch")
var np = Python.import_module("numpy")
var input_1 = torch.from_numpy(to_numpy(input_1)).requires_grad_(True)
var input_2 = torch.from_numpy(to_numpy(input_2)).requires_grad_(True)
var input_3 = torch.from_numpy(to_numpy(input_3)).requires_grad_(True)
var expected: PythonObject
if op == OP.FMA:
expected = input_1 * input_2 + input_3
else:
print("Error: op not supported (returning input 1 value): ", op)
expected = input_1
# uppergrad & backwards
var upper_grad = torch.from_numpy(to_numpy(upper_grad))
_ = expected.backward(upper_grad)
return torch_output_ternary_op(
to_tensor(expected.detach().numpy()),
to_tensor(input_1.grad.numpy()),
to_tensor(input_2.grad.numpy()),
to_tensor(input_3.grad.numpy()),
)
except e:
print("Error importing torch: ", e)
var d = Tensor[dtype](1)
return torch_output_ternary_op(d, d, d, d)
fn test_FMA() raises:
alias t1_shape = TensorShape(87, 73, 84)
alias t2_shape = TensorShape(87, 73, 84)
alias t3_shape = TensorShape(87, 73, 84)
var t1: Tensor[dtype] = Tensor[dtype](t1_shape)
rand(t1.data(), t1.num_elements())
var t2: Tensor[dtype] = Tensor[dtype](t2_shape)
rand(t2.data(), t2.num_elements())
var t3: Tensor[dtype] = Tensor[dtype](t3_shape)
rand(t3.data(), t3.num_elements())
var expected_and_grad = torch_ternary_op(OP.FMA, t1, t2, t3, t1)
test_ternary_op[OP.FMA, t1_shape, t2_shape, t3_shape](
t1, t2, t3, expected_and_grad.expected
)
test_ternary_op_backward[OP.FMA, t1_shape, t2_shape, t3_shape, t1_shape](
t1,
t2,
t3,
t1,
expected_and_grad.grad_1,
expected_and_grad.grad_2,
expected_and_grad.grad_3,
)
fn main():
print("Running ops (compare with torch) tests")
try:
test_ADD()
test_SUB()
test_MUL()
test_DIV()
test_DOT()
test_EXP()
test_LOG()
test_POW()
test_SUM()
test_MAX()
test_MEAN()
test_FLATTEN()
test_RESHAPE()
test_TRANSPOSE()
test_FMA()
except e:
print("[ERROR] Error in ops (compare with torch)")
print(e)
return
print("Finished ops (compare with torch) tests")
| basalt/tests/python/test_ops_torch.mojo | false |
from random import rand
from python.python import Python
from testing import assert_equal
from basalt import dtype, nelts
from basalt.autograd import Graph, OP
from basalt.autograd.ops.pool import MAXPOOL2D
from basalt.autograd.ops.conv import get_result_shape
from basalt.autograd.attributes import Attribute, AttributeVector
from basalt.nn import Tensor, TensorShape, Model
from tests import assert_tensors_equal, to_numpy, to_tensor
@value
struct torch_maxpool2d_output:
var expected: Tensor[dtype]
var expected_grad: Tensor[dtype]
fn torch_maxpool2d(
inputs: Tensor,
kernel_size: StaticIntTuple[2],
padding: StaticIntTuple[2],
stride: StaticIntTuple[2],
dilation: StaticIntTuple[2],
upper_grad: Tensor,
) -> torch_maxpool2d_output:
var out: torch_maxpool2d_output
try:
var torch = Python.import_module("torch")
var F = Python.import_module("torch.nn.functional")
var np = Python.import_module("numpy")
var inputs = torch.from_numpy(to_numpy(inputs)).requires_grad_(True)
var expected = F.max_pool2d(
inputs,
(kernel_size[0], kernel_size[1]),
(stride[0], stride[1]),
(padding[0], padding[1]),
(dilation[0], dilation[1]),
)
# uppergrad & backwards
var upper_grad = torch.from_numpy(to_numpy(upper_grad))
_ = expected.backward(upper_grad)
# expected
out = torch_maxpool2d_output(
to_tensor(expected.detach().numpy()), to_tensor(inputs.grad.numpy())
)
return out
except:
print("Error in torch_maxpool2d")
var d = Tensor[dtype](1)
var out = torch_maxpool2d_output(d, d)
return out
fn test_pool_forward[
input_shape: TensorShape,
kernel_size: StaticIntTuple[2],
padding: StaticIntTuple[2],
stride: StaticIntTuple[2],
dilation: StaticIntTuple[2],
](inputs: Tensor[dtype]) raises:
fn create_graph() -> Graph:
var g = Graph()
var inp = g.input(input_shape)
var res = g.op(
OP.MAXPOOL2D,
inp,
attributes=AttributeVector(
Attribute("kernel_size", kernel_size),
Attribute("padding", padding),
Attribute("stride", stride),
Attribute("dilation", dilation),
),
)
g.out(res)
return g ^
alias graph = create_graph()
assert_equal(len(graph.nodes), 1)
var model = Model[graph](inference_only=True)
var res = model.inference(inputs)[0]
var torch_out = torch_maxpool2d(
inputs,
kernel_size=kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
upper_grad=Tensor[dtype](res.shape()),
)
assert_tensors_equal(res, torch_out.expected)
fn test_forward_1() raises:
# padding=2, stride=1, dilation=1
# input shape: (4, 1, 28, 28) kernel size: (5, 5)
alias kernel_size = 5
alias padding = 2
alias stride = 1
alias dilation = 1
alias input_shape = TensorShape(4, 1, 28, 28)
var inputs = Tensor[dtype](input_shape)
rand[dtype](inputs.data(), inputs.num_elements())
test_pool_forward[input_shape, kernel_size, padding, stride, dilation](inputs)
fn test_forward_2() raises:
# padding=0, stride=1, dilation=1
# input shape: (4, 1, 32, 17) kernel size: (2, 2)
alias kernel_size = StaticIntTuple[2](2, 2)
alias padding = 0
alias stride = 1
alias dilation = 1
alias input_shape = TensorShape(4, 1, 32, 17)
var inputs = Tensor[dtype](input_shape)
rand[dtype](inputs.data(), inputs.num_elements())
test_pool_forward[input_shape, kernel_size, padding, stride, dilation](inputs)
fn test_forward_3() raises:
# padding=(3, 1), stride=(2, 3), dilation=(2, 3)
# input shape: (4, 3, 32, 17) kernel size: (6, 6)
alias kernel_size = StaticIntTuple[2](6, 6)
alias padding = StaticIntTuple[2](3, 1)
alias stride = StaticIntTuple[2](2, 3)
alias dilation = StaticIntTuple[2](2, 3)
alias input_shape = TensorShape(4, 3, 32, 17)
var inputs = Tensor[dtype](input_shape)
rand[dtype](inputs.data(), inputs.num_elements())
test_pool_forward[input_shape, kernel_size, padding, stride, dilation](inputs)
fn test_pool_backward[
ug_shape: TensorShape,
input_shape: TensorShape,
kernel_size: StaticIntTuple[2],
padding: StaticIntTuple[2],
stride: StaticIntTuple[2],
dilation: StaticIntTuple[2],
](ug: Tensor[dtype], inputs: Tensor[dtype]) raises:
alias attributes = AttributeVector(
Attribute("kernel_size", kernel_size),
Attribute("padding", padding),
Attribute("stride", stride),
Attribute("dilation", dilation),
)
var grad = MAXPOOL2D.backward[ug_shape, input_shape, attributes](ug, inputs)
var torch_out = torch_maxpool2d(
inputs,
kernel_size=kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
upper_grad=ug,
)
assert_tensors_equal["almost"](grad, torch_out.expected_grad)
fn test_backward_1() raises:
# padding=2, stride=1, dilation=1
# input shape: (4, 1, 28, 28) kernel size: (5, 5)
alias kernel_size = 5
alias padding = 2
alias stride = 1
alias dilation = 1
alias input_shape = TensorShape(4, 1, 28, 28)
var inputs = Tensor[dtype](input_shape)
rand[dtype](inputs.data(), inputs.num_elements())
# uppergrad
alias res = get_result_shape(
input_shape, TensorShape(kernel_size, kernel_size), padding, stride, dilation
)
alias ug_shape = TensorShape(input_shape[0], input_shape[1], res[0], res[1])
var ug = Tensor[dtype](ug_shape)
rand[dtype](ug.data(), ug.num_elements())
test_pool_backward[ug_shape, input_shape, kernel_size, padding, stride, dilation](
ug, inputs
)
fn test_backward_2() raises:
# padding=0, stride=1, dilation=1
# input shape: (4, 1, 32, 17) kernel size: (2, 2)
alias kernel_size = 2
alias padding = 0
alias stride = 1
alias dilation = 1
alias input_shape = TensorShape(4, 1, 32, 17)
var inputs = Tensor[dtype](input_shape)
rand[dtype](inputs.data(), inputs.num_elements())
# uppergrad
alias res = get_result_shape(
input_shape, TensorShape(kernel_size, kernel_size), padding, stride, dilation
)
alias ug_shape = TensorShape(input_shape[0], input_shape[1], res[0], res[1])
var ug = Tensor[dtype](ug_shape)
rand[dtype](ug.data(), ug.num_elements())
test_pool_backward[ug_shape, input_shape, kernel_size, padding, stride, dilation](
ug, inputs
)
fn test_backward_3() raises:
# padding=(3, 1), stride=(2, 3), dilation=(2, 3)
# input shape: (4, 3, 32, 17) kernel size: (6, 6)
alias kernel_size = StaticIntTuple[2](6, 6)
alias padding = StaticIntTuple[2](3, 1)
alias stride = StaticIntTuple[2](2, 3)
alias dilation = StaticIntTuple[2](2, 3)
alias input_shape = TensorShape(4, 3, 32, 17)
var inputs = Tensor[dtype](input_shape)
rand[dtype](inputs.data(), inputs.num_elements())
# uppergrad
alias kernel_size_static: StaticIntTuple[2] = kernel_size
alias res = get_result_shape(
input_shape, TensorShape(kernel_size_static), padding, stride, dilation
)
alias ug_shape = TensorShape(input_shape[0], input_shape[1], res[0], res[1])
var ug = Tensor[dtype](ug_shape)
rand[dtype](ug.data(), ug.num_elements())
test_pool_backward[ug_shape, input_shape, kernel_size, padding, stride, dilation](
ug, inputs
)
fn main():
try:
test_forward_1()
test_forward_2()
test_forward_3()
test_backward_1()
test_backward_2()
test_backward_3()
except e:
print("[Error] Error in MaxPool2D")
print(e)
| basalt/tests/python/test_pool.mojo | false |
import benchmark
from generic_dict import Dict, Keyable, KeysBuilder
from collections.dict import KeyElement, Dict as StdDict
from pathlib import cwd
from testing import assert_equal
from corpora import *
@value
struct StringKey(KeyElement, Keyable):
var s: String
fn __init__(inout self, owned s: String):
self.s = s^
fn __init__(inout self, s: StringLiteral):
self.s = String(s)
fn __hash__(self) -> Int:
var ptr = self.s.unsafe_ptr()
return hash(ptr, len(self.s))
fn __eq__(self, other: Self) -> Bool:
return self.s == other.s
fn __ne__(self, other: Self) -> Bool:
return self.s != other.s
fn accept[T: KeysBuilder](self, inout keys_builder: T):
keys_builder.add_buffer(self.s.unsafe_ptr(), len(self.s))
fn corpus_stats(corpus: List[String]):
print("=======Corpus Stats=======")
print("Number of elements:", len(corpus))
var min = 100000000
var max = 0
var sum = 0
var count = 0
for i in range(len(corpus)):
var key = corpus[i]
if len(key) == 0:
continue
count += 1
sum += len(key)
if min > len(key):
min = len(key)
if max < len(key):
max = len(key)
var avg = sum / count
print("Min key lenght:", min)
print("Avg key length:", avg)
print("Max key length:", max)
print("Total num of bytes:", sum)
print("\n")
fn main() raises:
var d1 = Dict[Int]()
var d2 = StdDict[StringKey, Int]()
var corpus = french_text_to_keys()
print("")
corpus_stats(corpus)
@parameter
fn build_compact_dict():
var d = Dict[Int](len(corpus))
# var d = Dict[Int]()
for i in range(len(corpus)):
try:
_ = d.put(StringKey(corpus[i]), i)
except:
print("!!!")
d1 = d^
@parameter
fn build_std_dict():
var d = StdDict[StringKey, Int]()
for i in range(len(corpus)):
d[corpus[i]] = i
d2 = d^
print("+++++++Create Dict Benchmark+++++++")
var build_compact_stats = benchmark.run[build_compact_dict](max_runtime_secs=0.5)
# build_compact_stats.print("ns")
var build_std_stats = benchmark.run[build_std_dict](max_runtime_secs=0.5)
# build_std_stats.print("ns")
print("Compact build speedup:", build_std_stats.mean() / build_compact_stats.mean())
var sum1 = 0
@parameter
fn read_compact_dict():
sum1 = 0
for i in range(len(corpus)):
try:
sum1 += d1.get(StringKey(corpus[i]), -1)
except:
print("!!!!!")
# d1.keys.print_keys()
print("+++++++Read Dict Benchmark+++++++")
var read_compact_stats = benchmark.run[read_compact_dict](max_runtime_secs=0.5)
print("Sum1:", sum1, len(d1))
# read_compact_stats.print("ns")
var sum2 = 0
@parameter
fn read_std_dict():
sum2 = 0
for i in range(len(corpus)):
try:
sum2 += d2[corpus[i]]
except:
sum2 += -1
var raed_std_stats = benchmark.run[read_std_dict](max_runtime_secs=0.5)
# raed_std_stats.print("ns")
print("Sum2:", sum2, len(d2))
print("Compact read speedup:", raed_std_stats.mean() / read_compact_stats.mean())
assert_equal(sum1, sum2)
assert_equal(len(d1), len(d2))
var m = 9
@parameter
fn delete_compact_dict():
for i in range(len(corpus)):
if i % m == 0:
try:
d1.delete(StringKey(corpus[i]))
except:
print("!!!!!!!!!!!!!!")
@parameter
fn delete_std_dict():
for i in range(len(corpus)):
if i % m == 0:
try:
_ = d2.pop(corpus[i])
except:
pass
print("+++++++Delete Dict Benchmark+++++++")
var delete_compact_stats = benchmark.run[delete_compact_dict](max_runtime_secs=0.5)
var delete_std_stats = benchmark.run[delete_std_dict](max_runtime_secs=0.5)
print("Compact delete speedup:", delete_std_stats.mean() / delete_compact_stats.mean())
print("+++++++Read After Delete Dict Benchmark+++++++")
var read_after_delete_compact_stats = benchmark.run[read_compact_dict](max_runtime_secs=0.5)
var read_after_delete_std_stats = benchmark.run[read_std_dict](max_runtime_secs=0.5)
print("Compact read after delete speedup:", read_after_delete_std_stats.mean() / read_after_delete_compact_stats.mean())
print("Sum1:", sum1, "length:", len(d1))
print("Sum2:", sum2, "length:", len(d2))
assert_equal(sum1, sum2)
assert_equal(len(d1), len(d2))
_ = corpus
_ = d1^
_ = d2^ | compact-dict/benchmark_generic_dict.mojo | false |
<filename>compact-dict/benchmark_multi_dict.mojo
import benchmark
from generic_dict import MultiDict, Keyable, KeysBuilder
from collections.dict import KeyElement, Dict as StdDict
from pathlib import cwd
from testing import assert_equal
from corpora import *
@value
struct StringKey(KeyElement, Keyable):
var s: String
fn __init__(inout self, owned s: String):
self.s = s^
fn __init__(inout self, s: StringLiteral):
self.s = String(s)
fn __hash__(self) -> Int:
var ptr = self.s.unsafe_ptr()
return hash(ptr, len(self.s))
fn __eq__(self, other: Self) -> Bool:
return self.s == other.s
fn __ne__(self, other: Self) -> Bool:
return self.s != other.s
fn accept[T: KeysBuilder](self, inout keys_builder: T):
keys_builder.add_buffer(self.s.unsafe_ptr(), len(self.s))
fn corpus_stats(corpus: List[String]):
print("=======Corpus Stats=======")
print("Number of elements:", len(corpus))
var min = 100000000
var max = 0
var sum = 0
var count = 0
for i in range(len(corpus)):
var key = corpus[i]
if len(key) == 0:
continue
count += 1
sum += len(key)
if min > len(key):
min = len(key)
if max < len(key):
max = len(key)
var avg = sum / count
print("Min key lenght:", min)
print("Avg key length:", avg)
print("Max key length:", max)
print("Total num of bytes:", sum)
print("\n")
fn main() raises:
var d1 = MultiDict[Int]()
var d2 = StdDict[StringKey, Int]()
var corpus = french_text_to_keys()
print("")
corpus_stats(corpus)
@parameter
fn build_compact_dict():
var d = MultiDict[Int](len(corpus))
# var d = MultiDict[Int]()
for i in range(len(corpus)):
try:
d.put(StringKey(corpus[i]), i)
except:
print("!!!")
d1 = d^
@parameter
fn build_std_dict():
var d = StdDict[StringKey, Int]()
for i in range(len(corpus)):
d[corpus[i]] = i
d2 = d^
print("+++++++Create Dict Benchmark+++++++")
var build_compact_stats = benchmark.run[build_compact_dict](max_runtime_secs=0.5)
# build_compact_stats.print("ns")
var build_std_stats = benchmark.run[build_std_dict](max_runtime_secs=0.5)
# build_std_stats.print("ns")
print("Compact build speedup:", build_std_stats.mean() / build_compact_stats.mean())
var sum1 = 0
@parameter
fn read_compact_dict():
sum1 = 0
for i in range(len(corpus)):
try:
var v = d1.get(StringKey(corpus[i]))
sum1 += v[len(v) - 1]
except:
print("!!!!!")
# d1.keys.print_keys()
print("+++++++Read Dict Benchmark+++++++")
var read_compact_stats = benchmark.run[read_compact_dict](max_runtime_secs=0.5)
print("Sum1:", sum1, len(d1))
# read_compact_stats.print("ns")
var sum2 = 0
@parameter
fn read_std_dict():
sum2 = 0
for i in range(len(corpus)):
try:
sum2 += d2[corpus[i]]
except:
sum2 += -1
var raed_std_stats = benchmark.run[read_std_dict](max_runtime_secs=0.5)
# raed_std_stats.print("ns")
print("Sum2:", sum2, len(d2))
print("Compact read speedup:", raed_std_stats.mean() / read_compact_stats.mean())
assert_equal(sum1, sum2)
assert_equal(len(d1), len(d2))
_ = corpus
_ = d1^
_ = d2^ | compact-dict/benchmark_multi_dict.mojo | false |
<filename>compact-dict/benchmark_report_string_dict.mojo
import benchmark
from string_dict import Dict as CompactDict
from collections.dict import KeyElement, Dict as StdDict
from pathlib import cwd
from testing import assert_equal
from csv import CsvBuilder
from corpora import *
alias M = 9
@value
struct BenchmarkData:
var reports: List[benchmark.Report]
var read_checksums: List[Int]
fn __init__(inout self):
self.reports = List[benchmark.Report]()
self.read_checksums = List[Int]()
fn report_std_benchmarks(corpus: List[String], inout csv_builder: CsvBuilder) -> BenchmarkData:
var benchmark_data = BenchmarkData()
var std_dict = StdDict[String, Int]()
@parameter
fn build_dict():
var d = StdDict[String, Int]()
for i in range(len(corpus)):
d[corpus[i]] = i
std_dict = d^
var build_stats = benchmark.run[build_dict](max_runtime_secs=0.5)
csv_builder.push(str(build_stats.mean("ns")), False)
benchmark_data.reports.append(build_stats)
var sum = 0
@parameter
fn read_dict():
sum = 0
for i in range(len(corpus)):
try:
sum += std_dict[corpus[i]]
except:
sum += -1
var read_stats = benchmark.run[read_dict](max_runtime_secs=0.5)
csv_builder.push(str(read_stats.mean("ns")), False)
benchmark_data.reports.append(read_stats)
benchmark_data.read_checksums.append(sum)
@parameter
fn delete_dict():
for i in range(len(corpus)):
if i % M == 0:
try:
_ = std_dict.pop(corpus[i])
except:
pass
var delete_stats = benchmark.run[delete_dict](max_runtime_secs=0.5)
csv_builder.push(str(delete_stats.mean("ns")), False)
benchmark_data.reports.append(delete_stats)
var read_after_delete_stats = benchmark.run[read_dict](max_runtime_secs=0.5)
csv_builder.push(str(read_after_delete_stats.mean("ns")), False)
benchmark_data.reports.append(read_after_delete_stats)
benchmark_data.read_checksums.append(sum)
_ = std_dict
return benchmark_data
fn report_compact_benchmarks(corpus: List[String], inout csv_builder: CsvBuilder) -> BenchmarkData:
var benchmark_data = BenchmarkData()
var dict = CompactDict[Int]()
@parameter
fn build_dict_nc():
var d = CompactDict[Int]()
for i in range(len(corpus)):
d.put(corpus[i], i)
dict = d^
var build_stats_nc = benchmark.run[build_dict_nc](max_runtime_secs=0.5)
csv_builder.push(str(build_stats_nc.mean("ns")), False)
benchmark_data.reports.append(build_stats_nc)
@parameter
fn build_dict():
var d = CompactDict[Int](len(corpus))
for i in range(len(corpus)):
d.put(corpus[i], i)
dict = d^
var build_stats = benchmark.run[build_dict](max_runtime_secs=0.5)
csv_builder.push(str(build_stats.mean("ns")), False)
benchmark_data.reports.append(build_stats)
var sum = 0
@parameter
fn read_dict():
sum = 0
for i in range(len(corpus)):
sum += dict.get(corpus[i], -1)
var read_stats = benchmark.run[read_dict](max_runtime_secs=0.5)
var read_checksum = sum
csv_builder.push(str(read_stats.mean("ns")), False)
benchmark_data.reports.append(read_stats)
benchmark_data.read_checksums.append(sum)
@parameter
fn delete_dict():
for i in range(len(corpus)):
if i % M == 0:
dict.delete(corpus[i])
var delete_stats = benchmark.run[delete_dict](max_runtime_secs=0.5)
csv_builder.push(str(delete_stats.mean("ns")), False)
benchmark_data.reports.append(delete_stats)
var read_after_delete_stats = benchmark.run[read_dict](max_runtime_secs=0.5)
var read_after_delete_checksum = sum
csv_builder.push(str(read_after_delete_stats.mean("ns")), False)
benchmark_data.reports.append(read_after_delete_stats)
benchmark_data.read_checksums.append(sum)
_ = dict
return benchmark_data
fn corpus_stats(corpus: List[String], inout csv_builder: CsvBuilder):
csv_builder.push(str(len(corpus)), False)
var min = 100000000
var max = 0
var sum = 0
var count = 0
for i in range(len(corpus)):
var key = corpus[i]
if len(key) == 0:
continue
count += 1
sum += len(key)
if min > len(key):
min = len(key)
if max < len(key):
max = len(key)
var avg = sum / count
csv_builder.push(str(sum), False)
csv_builder.push(str(min), False)
csv_builder.push(str(avg), False)
csv_builder.push(str(max), False)
fn report_speedup(std: BenchmarkData, compact: BenchmarkData, inout csv_builder: CsvBuilder):
csv_builder.push(str(std.reports[0].mean() / compact.reports[0].mean()), False)
csv_builder.push(str(std.reports[0].mean() / compact.reports[1].mean()), False)
csv_builder.push(str(std.reports[1].mean() / compact.reports[2].mean()), False)
csv_builder.push(str(std.reports[2].mean() / compact.reports[3].mean()), False)
csv_builder.push(str(std.reports[3].mean() / compact.reports[4].mean()), False)
fn report_checksums_alignment(std: BenchmarkData, compact: BenchmarkData, inout csv_builder: CsvBuilder):
csv_builder.push(str(std.read_checksums[0] == compact.read_checksums[0]), False)
csv_builder.push(str(std.read_checksums[1] == compact.read_checksums[1]), False)
fn report(name: StringLiteral, corpus: List[String], inout csv_builder: CsvBuilder):
csv_builder.push(name, False)
corpus_stats(corpus, csv_builder)
var std_stats = report_std_benchmarks(corpus, csv_builder)
var compact_stats = report_compact_benchmarks(corpus, csv_builder)
report_speedup(std_stats, compact_stats, csv_builder)
report_checksums_alignment(std_stats, compact_stats, csv_builder)
fn main() raises:
# Crashes because of this bug https://github.com/modularml/mojo/issues/2829
var csv_builder = CsvBuilder(
"Corpus", "Number of keys", "Total bytes", "Min key", "Avg key", "Max key",
"Build stdlib", "Read stdlib", "Delete stdlib", "Read after delete stdlib",
"Build compact nc", "Build compact", "Read compact", "Delete compact", "Read after delete compact",
"Speedup build nc", "Speedup build", "Speedup read", "Speadup delete", "Speedup read after delete",
"Read Checksum", "Read Checksum after delete"
)
report("Arabic", arabic_text_to_keys(), csv_builder)
report("Chinese", chinese_text_to_keys(), csv_builder)
report("English", english_text_to_keys(), csv_builder)
report("French", french_text_to_keys(), csv_builder)
report("Georgien", georgian_text_to_keys(), csv_builder)
report("German", german_text_to_keys(), csv_builder)
report("Greek", greek_text_to_keys(), csv_builder)
report("Hebrew", hebrew_text_to_keys(), csv_builder)
report("Hindi", hindi_text_to_keys(), csv_builder)
report("Japanese", japanese_long_keys(), csv_builder)
report("l33t", l33t_text_to_keys(), csv_builder)
report("Russian", russian_text_to_keys(), csv_builder)
report("S3", s3_action_names(), csv_builder)
report("Words", system_words_collection(), csv_builder)
print(csv_builder^.finish())
| compact-dict/benchmark_report_string_dict.mojo | false |
<filename>compact-dict/benchmark_string_dict.mojo
import benchmark
from string_dict import Dict as CompactDict
from collections.dict import KeyElement, Dict as StdDict
from pathlib import cwd
from testing import assert_equal
from corpora import *
fn corpus_stats(corpus: List[String]):
print("=======Corpus Stats=======")
print("Number of elements:", len(corpus))
var min = 100000000
var max = 0
var sum = 0
var count = 0
for i in range(len(corpus)):
var key = corpus[i]
if len(key) == 0:
continue
count += 1
sum += len(key)
if min > len(key):
min = len(key)
if max < len(key):
max = len(key)
var avg = sum / count
print("Min key lenght:", min)
print("Avg key length:", avg)
print("Max key length:", max)
print("Total num of bytes:", sum)
print("\n")
fn main() raises:
var d1 = CompactDict[Int]()
var d2 = StdDict[String, Int]()
var corpus = french_text_to_keys()
print("")
corpus_stats(corpus)
@parameter
fn build_compact_dict():
var d = CompactDict[Int](len(corpus))
# var d = CompactDict[Int]()
for i in range(len(corpus)):
d.put(corpus[i], i)
d1 = d^
@parameter
fn build_std_dict():
var d = StdDict[String, Int]()
for i in range(len(corpus)):
d[corpus[i]] = i
d2 = d^
print("+++++++Create Dict Benchmark+++++++")
var build_compact_stats = benchmark.run[build_compact_dict](max_runtime_secs=0.5)
# build_compact_stats.print("ns")
var build_std_stats = benchmark.run[build_std_dict](max_runtime_secs=0.5)
# build_std_stats.print("ns")
print("Compact build speedup:", build_std_stats.mean() / build_compact_stats.mean())
var sum1 = 0
@parameter
fn read_compact_dict():
sum1 = 0
for i in range(len(corpus)):
sum1 += d1.get(corpus[i], -1)
# d1.keys.print_keys()
print("+++++++Read Dict Benchmark+++++++")
var read_compact_stats = benchmark.run[read_compact_dict](max_runtime_secs=0.5)
print("Sum1:", sum1, len(d1))
# read_compact_stats.print("ns")
var sum2 = 0
@parameter
fn read_std_dict():
sum2 = 0
for i in range(len(corpus)):
try:
sum2 += d2[corpus[i]]
except:
sum2 += -1
var raed_std_stats = benchmark.run[read_std_dict](max_runtime_secs=0.5)
# raed_std_stats.print("ns")
print("Sum2:", sum2, len(d2))
print("Compact read speedup:", raed_std_stats.mean() / read_compact_stats.mean())
assert_equal(sum1, sum2)
assert_equal(len(d1), len(d2))
var m = 9
@parameter
fn delete_compact_dict():
for i in range(len(corpus)):
if i % m == 0:
d1.delete(corpus[i])
@parameter
fn delete_std_dict():
for i in range(len(corpus)):
if i % m == 0:
try:
_ = d2.pop(corpus[i])
except:
pass
print("+++++++Delete Dict Benchmark+++++++")
var delete_compact_stats = benchmark.run[delete_compact_dict](max_runtime_secs=0.5)
var delete_std_stats = benchmark.run[delete_std_dict](max_runtime_secs=0.5)
print("Compact delete speedup:", delete_std_stats.mean() / delete_compact_stats.mean())
print("+++++++Read After Delete Dict Benchmark+++++++")
var read_after_delete_compact_stats = benchmark.run[read_compact_dict](max_runtime_secs=0.5)
var read_after_delete_std_stats = benchmark.run[read_std_dict](max_runtime_secs=0.5)
print("Compact read after delete speedup:", read_after_delete_std_stats.mean() / read_after_delete_compact_stats.mean())
print("Sum1:", sum1, "length:", len(d1))
print("Sum2:", sum2, "length:", len(d2))
assert_equal(sum1, sum2)
assert_equal(len(d1), len(d2))
_ = corpus
_ = d1^
_ = d2^ | compact-dict/benchmark_string_dict.mojo | false |
from string_dict import Dict
from corpora import system_words_collection, hindi_text_to_keys
fn main() raises:
var corpus = system_words_collection()
var dict = Dict[Int](len(corpus))
for _ in range(100):
for i in range(len(corpus)):
dict.put(corpus[i], i)
var sum = 0
for _ in range(100):
sum = 0
for i in range(len(corpus)):
sum += dict.get(corpus[i], -1)
print(sum)
| compact-dict/memory_consumption_compact_dict.mojo | false |
<filename>compact-dict/memory_consumption_std_lib_dict.mojo
from collections import Dict
from corpora import system_words_collection, hindi_text_to_keys
fn main() raises:
var corpus = system_words_collection()
var dict = Dict[String, Int]()
for _ in range(100):
for i in range(len(corpus)):
dict[corpus[i]] = i
var sum = 0
for _ in range(100):
sum = 0
for i in range(len(corpus)):
sum += dict[corpus[i]]
print(sum)
| compact-dict/memory_consumption_std_lib_dict.mojo | false |
<filename>compact-dict/test_generic_dict.mojo
from generic_dict import Dict, Keyable, KeysBuilder
from testing import assert_equal
from corpora import *
@value
struct Person(Keyable):
var name: String
var age: Int
fn accept[T: KeysBuilder](self, inout keys_builder: T):
keys_builder.add_buffer[DType.uint8](self.name.unsafe_ptr(), len(self.name))
keys_builder.add(Int64(self.age))
fn test_person_dict() raises:
var p1 = Person("Maxim", 42)
var p2 = Person("Maximilian", 62)
var p3 = Person("Alex", 25)
var p4 = Person("Maria", 28)
var p5 = Person("Daria", 13)
var p6 = Person("Max", 31)
var d = Dict[Int]()
_= d.put(p1, 1)
_= d.put(p2, 11)
_= d.put(p3, 111)
_= d.put(p4, 1111)
_= d.put(p5, 11111)
_= d.put(p6, 111111)
assert_equal(d.get(p1, 0), 1)
# assert_equal(d.get(p2, 0), 11)
# assert_equal(d.get(p3, 0), 111)
# assert_equal(d.get(p4, 0), 1111)
# assert_equal(d.get(p5, 0), 11111)
# assert_equal(d.get(p6, 0), 111111)
@value
struct StringKey(Keyable):
var s: String
fn __init__(inout self, owned s: String):
self.s = s^
fn __init__(inout self, s: StringLiteral):
self.s = String(s)
fn accept[T: KeysBuilder](self, inout keys_builder: T):
alias type_prefix = "String:"
keys_builder.add_buffer(type_prefix.unsafe_ptr(), len(type_prefix))
keys_builder.add_buffer(self.s.unsafe_ptr(), len(self.s))
@value
struct IntKey(Keyable):
var i: Int
fn __init__(inout self, i: Int):
self.i = i
fn accept[T: KeysBuilder](self, inout keys_builder: T):
alias type_prefix = "Int:"
keys_builder.add_buffer(type_prefix.unsafe_ptr(), len(type_prefix))
keys_builder.add(Int64(self.i))
fn test_add_vs_update() raises:
var d = Dict[Int]()
assert_equal(d.put(StringKey("a"), 1), True)
assert_equal(d.put(StringKey("a"), 2), False)
d.delete(StringKey("a"))
assert_equal(d.put(StringKey("a"), 3), True)
assert_equal(d.put(StringKey("a"), 4), False)
assert_equal(d.get(StringKey("a"), 0), 4)
fn test_clear() raises:
var d = Dict[Int]()
assert_equal(d.put(StringKey("a"), 1), True)
assert_equal(d.put(StringKey("b"), 1), True)
assert_equal(d.put(StringKey("a"), 2), False)
assert_equal(d.get(StringKey("a"), 0), 2)
d.clear()
assert_equal(d.put(StringKey("a"), 3), True)
assert_equal(d.get(StringKey("a"), 0), 3)
assert_equal(d.get(StringKey("b"), 0), 0)
fn test_no_key_collision() raises:
var d = Dict[Int]()
assert_equal(d.put(StringKey("a"), 1), True)
assert_equal(d.put(IntKey(97), 2), True)
assert_equal(d.get(StringKey("a"), 0), 1)
assert_equal(d.get(IntKey(97), 0), 2)
fn main() raises:
test_person_dict()
test_add_vs_update()
test_clear()
test_no_key_collision()
| compact-dict/test_generic_dict.mojo | false |
<filename>compact-dict/test_multi_dict.mojo
from generic_dict import MultiDict, Keyable, KeysBuilder
from testing import assert_equal
from corpora import *
@value
struct StringKey(Keyable):
var s: String
fn __init__(inout self, owned s: String):
self.s = s^
fn __init__(inout self, s: StringLiteral):
self.s = String(s)
fn accept[T: KeysBuilder](self, inout keys_builder: T):
keys_builder.add_buffer(self.s.unsafe_ptr(), len(self.s))
fn test_add() raises:
var d = MultiDict[Int]()
d.put(StringKey("a"), 1)
d.put(StringKey("b"), 2)
d.put(StringKey("c"), 3)
d.put(StringKey("a"), 4)
d.put(StringKey("a"), 5)
d.put(StringKey("a"), 6)
d.put(StringKey("c"), 7)
assert_equal(len(d.get(StringKey("a"))), 4)
assert_equal(d.get(StringKey("a"))[0], 1)
assert_equal(d.get(StringKey("a"))[1], 4)
assert_equal(d.get(StringKey("a"))[2], 5)
assert_equal(d.get(StringKey("a"))[3], 6)
assert_equal(len(d.get(StringKey("b"))), 1)
assert_equal(d.get(StringKey("b"))[0], 2)
assert_equal(len(d.get(StringKey("c"))), 2)
assert_equal(d.get(StringKey("c"))[0], 3)
assert_equal(d.get(StringKey("c"))[1], 7)
fn test_s3_corpus() raises:
var d = MultiDict[
Int,
KeyCountType=DType.uint8,
KeyOffsetType=DType.uint16,
NextKeyCountType=DType.uint8
]()
var corpus = s3_action_names()
for i in range(len(corpus)):
d.put(StringKey(corpus[i]), i)
assert_equal(len(d), 143)
var all_values = 0
for i in range(len(corpus)):
var v = d.get(StringKey(corpus[i]))
var c = len(v)
all_values += c
assert_equal(all_values, 143 + (len(corpus) - 143) * 3)
_ = d
fn test_system_corpus() raises:
var d = MultiDict[Int]()
var corpus = system_words_collection()
for i in range(len(corpus)):
d.put(StringKey(corpus[i]), i)
assert_equal(len(d), len(corpus))
var all_values = 0
for i in range(len(corpus)):
var v = d.get(StringKey(corpus[i]))
var c = len(v)
all_values += c
assert_equal(all_values, len(corpus))
_ = d
fn test_english_corpus() raises:
var d = MultiDict[
Int,
KeyCountType=DType.uint16,
KeyOffsetType=DType.uint16,
NextKeyCountType=DType.uint16
]()
var corpus = english_text_to_keys()
for i in range(len(corpus)):
d.put(StringKey(corpus[i]), i)
assert_equal(len(d), 192)
var all_values = 0
for i in range(len(corpus)):
var v = d.get(StringKey(corpus[i]))
var c = len(v)
all_values += c
assert_equal(all_values, 18631)
var the_occurances = 0
for i in range(len(corpus)):
if corpus[i] == "the":
the_occurances += 1
assert_equal(len(d.get(StringKey("the"))), the_occurances)
_ = d
fn test_get_itter() raises:
var d = MultiDict[Int]()
d.put(StringKey("a"), 1)
d.put(StringKey("b"), 2)
d.put(StringKey("c"), 3)
d.put(StringKey("a"), 4)
d.put(StringKey("a"), 5)
d.put(StringKey("a"), 6)
d.put(StringKey("c"), 7)
var index_a = 0
var expected_a = List[Int](1, 4, 5, 6)
for v in d.get_itter(StringKey("a")):
assert_equal(expected_a[index_a], v[])
index_a += 1
assert_equal(index_a, 4)
var index_b = 0
var expected_b = List[Int](2)
for v in d.get_itter(StringKey("b")):
assert_equal(expected_b[index_b], v[])
index_b += 1
assert_equal(index_b, 1)
var index_c = 0
var expected_c = List[Int](3, 7)
for v in d.get_itter(StringKey("c")):
assert_equal(expected_c[index_c], v[])
index_c += 1
assert_equal(index_c, 2)
var index_d = 0
var expected_d = List[Int](2)
for v in d.get_itter(StringKey("d")):
print(v[])
assert_equal(expected_d[index_d], v[])
index_d += 1
assert_equal(index_d, 0)
fn main()raises:
test_add()
test_s3_corpus()
test_system_corpus()
test_english_corpus()
test_get_itter()
| compact-dict/test_multi_dict.mojo | false |
<filename>compact-dict/test_sparse_array.mojo
from generic_dict import SparseArray
from tensor import Tensor, TensorShape
from testing import assert_equal, assert_true
fn assert_equal_list[T: DType](lhs: List[Scalar[T]], rhs: List[Scalar[T]]) raises:
assert_equal(len(lhs), len(rhs))
for i in range(len(lhs)):
assert_true(lhs[i] == rhs[i])
fn main() raises:
var a = SparseArray[DType.int64](25)
assert_equal(len(a.dense_values_list()), 0)
a[23] = 15
assert_equal(a.get(23).or_else(0), 15)
assert_equal_list[DType.int64](a.dense_values_list(), List[Int64](15))
a[1] = 45
assert_equal(a.get(1).or_else(0), 45)
assert_equal_list[DType.int64](a.dense_values_list(), List[Int64](45, 15))
a[13] = 1
assert_equal(a.get(13).or_else(0), 1)
assert_equal_list[DType.int64](a.dense_values_list(), List[Int64](45, 1, 15))
a[24] = 11
assert_equal(a.get(24).or_else(0), 11)
assert_equal_list[DType.int64](a.dense_values_list(), List[Int64](45, 1, 15, 11))
a[2] = 0
assert_equal(a.get(2).or_else(0), 0)
assert_equal_list[DType.int64](a.dense_values_list(), List[Int64](45, 0, 1, 15, 11))
a[53] = 5
assert_equal(a.get(53).or_else(0), 5)
assert_equal_list[DType.int64](a.dense_values_list(), List[Int64](45, 0, 1, 15, 11, 5))
a[0] = 33
assert_equal(a.get(0).or_else(0), 33)
assert_equal_list[DType.int64](a.dense_values_list(), List[Int64](33, 45, 0, 1, 15, 11, 5))
a[53] = 49
assert_equal(a.get(53).or_else(0), 49)
assert_equal_list[DType.int64](a.dense_values_list(), List[Int64](33, 45, 0, 1, 15, 11, 49))
| compact-dict/test_sparse_array.mojo | false |
<filename>compact-dict/test_string_dict.mojo
from string_dict import Dict
from testing import assert_equal
from corpora import *
fn test_simple_manipulations() raises:
var d = Dict[Int, KeyCountType=DType.uint8, KeyOffsetType=DType.uint16]()
var corpus = s3_action_names()
for i in range(len(corpus)):
d.put(corpus[i], i)
assert_equal(len(d), 143)
assert_equal(d.get("CopyObject", -1), 2)
d.delete("CopyObject")
assert_equal(d.get("CopyObject", -1), -1)
assert_equal(len(d), 142)
d.put("CopyObjects", 256)
assert_equal(d.get("CopyObjects", -1), 256)
assert_equal(d.get("CopyObject", -1), -1)
assert_equal(len(d), 143)
d.put("CopyObject", 257)
assert_equal(d.get("CopyObject", -1), 257)
assert_equal(len(d), 144)
_ = d
fn test_simple_manipulations_on_non_destructive() raises:
var d = Dict[Int, KeyCountType=DType.uint8, KeyOffsetType=DType.uint16, destructive=False]()
var corpus = s3_action_names()
for i in range(len(corpus)):
d.put(corpus[i], i)
assert_equal(len(d), 143)
assert_equal(d.get("CopyObject", -1), 2)
d.delete("CopyObject")
assert_equal(d.get("CopyObject", -1), 2)
assert_equal(len(d), 143)
d.put("CopyObjects", 256)
assert_equal(d.get("CopyObjects", -1), 256)
assert_equal(d.get("CopyObject", -1), 2)
assert_equal(len(d), 144)
d.put("CopyObject", 257)
assert_equal(d.get("CopyObject", -1), 257)
assert_equal(len(d), 144)
fn test_simple_manipulations_non_caching() raises:
var d = Dict[
Int,
KeyCountType=DType.uint8,
KeyOffsetType=DType.uint16,
caching_hashes=False
]()
var corpus = s3_action_names()
for i in range(len(corpus)):
d.put(corpus[i], i)
assert_equal(len(d), 143)
assert_equal(d.get("CopyObject", -1), 2)
d.delete("CopyObject")
assert_equal(d.get("CopyObject", -1), -1)
assert_equal(len(d), 142)
d.put("CopyObjects", 256)
assert_equal(d.get("CopyObjects", -1), 256)
assert_equal(d.get("CopyObject", -1), -1)
assert_equal(len(d), 143)
d.put("CopyObject", 257)
assert_equal(d.get("CopyObject", -1), 257)
assert_equal(len(d), 144)
_ = d
@value
struct MyInt:
var value: Int
fn test_upsert() raises:
var d1 = Dict[MyInt, KeyCountType=DType.uint8, KeyOffsetType=DType.uint16]()
var corpus = s3_action_names()
fn inc(value: Optional[MyInt]) -> MyInt:
return MyInt(value.or_else(MyInt(0)).value + 1)
for i in range(len(corpus)):
d1.upsert(corpus[i], inc)
# Does not work probably because of Int is a register passable type
# var d2 = Dict[Int, KeyCountType=DType.uint8, KeyOffsetType=DType.uint16]()
# fn inc2(value: Optional[Int]) -> Int:
# return value.or_else(0) + 1
# for i in range(len(corpus)):
# d2.upsert(corpus[i], inc2)
fn test_clear() raises:
var d = Dict[Int]()
d.put("a", 1)
d.put("b", 1)
assert_equal(d.get("a", 0), 1)
assert_equal(d.get("b", 0), 1)
d.clear()
d.put("a", 2)
assert_equal(d.get("a", 0), 2)
assert_equal(d.get("b", 0), 0)
fn main()raises:
test_simple_manipulations()
test_simple_manipulations_on_non_destructive()
test_simple_manipulations_non_caching()
test_upsert()
test_clear()
| compact-dict/test_string_dict.mojo | false |
<filename>compact-dict/corpora/__init__.mojo
from pathlib import cwd, Path
fn english_text_to_keys() raises -> List[String]:
return String('A wonderful serenity has taken possession of my entire soul, like these sweet mornings of spring which I enjoy with my whole heart. I am alone, and feel the charm of existence in this spot, which was created for the bliss of souls like mine. I am so happy, my dear friend, so absorbed in the exquisite sense of mere tranquil existence, that I neglect my talents. I should be incapable of drawing a single stroke at the present moment; and yet I feel that I never was a greater artist than now. When, while the lovely valley teems with vapour around me, and the meridian sun strikes the upper surface of the impenetrable foliage of my trees, and but a few stray gleams steal into the inner sanctuary, I throw myself down among the tall grass by the trickling stream; and, as I lie close to the earth, a thousand unknown plants are noticed by me: when I hear the buzz of the little world among the stalks, and grow familiar with the countless indescribable forms of the insects and flies, then I feel the presence of the Almighty, who formed us in his own image, and the breath of that universal love which bears and sustains us, as it floats around us in an eternity of bliss; and then, my friend, when darkness overspreads my eyes, and heaven and earth seem to dwell in my soul and absorb its power, like the form of a beloved mistress, then I often think with longing, Oh, would I could describe these conceptions, could impress upon paper all that is living so full and warm within me, that it might be the mirror of my soul, as my soul is the mirror of the infinite God! O my friend -- but it is too much for my strength -- I sink under the weight of the splendour of these visions! A wonderful serenity has taken possession of my entire soul, like these sweet mornings of spring which I enjoy with my whole heart. I am alone, and feel the charm of existence in this spot, which was created for the bliss of souls like mine. I am so happy, my dear friend, so absorbed in the exquisite sense of mere tranquil existence, that I neglect my talents. I should be incapable of drawing a single stroke at the present moment; and yet I feel that I never was a greater artist than now. When, while the lovely valley teems with vapour around me, and the meridian sun strikes the upper surface of the impenetrable foliage of my trees, and but a few stray gleams steal into the inner sanctuary, I throw myself down among the tall grass by the trickling stream; and, as I lie close to the earth, a thousand unknown plants are noticed by me: when I hear the buzz of the little world among the stalks, and grow familiar with the countless indescribable forms of the insects and flies, then I feel the presence of the Almighty, who formed us in his own image, and the breath of that universal love which bears and sustains us, as it floats around us in an eternity of bliss; and then, my friend, when darkness overspreads my eyes, and heaven and earth seem to dwell in my soul and absorb its power, like the form of a beloved mistress, then I often think with longing, Oh, would I could describe these conceptions, could impress upon paper all that is living so full and warm within me, that it might be the mirror of my soul, as my soul is the mirror of the infinite God! O my friend -- but it is too much for my strength -- I sink under the weight of the splendour of these visions! A wonderful serenity has taken possession of my entire soul, like these sweet mornings of spring which I enjoy with my whole heart. I am alone, and feel the charm of existence in this spot, which was created for the bliss of souls like mine. I am so happy, my dear friend, so absorbed in the exquisite sense of mere tranquil existence, that I neglect my talents. I should be incapable of drawing a single stroke at the present moment; and yet I feel that I never was a greater artist than now. When, while the lovely valley teems with vapour around me, and the meridian sun strikes the upper surface of the impenetrable foliage of my trees, and but a few stray gleams steal into the inner sanctuary, I throw myself down among the tall grass by the trickling stream; and, as I lie close to the earth, a thousand unknown plants are noticed by me: when I hear the buzz of the little world among the stalks, and grow familiar with the countless indescribable forms of the insects and flies, then I feel the presence of the Almighty, who formed us in his own image, and the breath of that universal love which bears and sustains us, as it floats around us in an eternity of bliss; and then, my friend, when darkness overspreads my eyes, and heaven and earth seem to dwell in my soul and absorb its power, like the form of a beloved mistress, then I often think with longing, Oh, would I could describe these conceptions, could impress upon paper all that is living so full and warm within me, that it might be the mirror of my soul, as my soul is the mirror of the infinite God! O my friend -- but it is too much for my strength -- I sink under the weight of the splendour of these visions!A wonderful serenity has taken possession of my entire soul, like these sweet mornings of spring which I enjoy with my whole heart. I am alone, and feel the charm of existence in this spot, which was created for the bliss of souls').split(" ")
fn greek_text_to_keys() raises -> List[String]:
return (cwd() / "corpora" / "greek.txt").read_text().replace("\n", " ").split(" ")
fn hebrew_text_to_keys() raises -> List[String]:
return (cwd() / "corpora" / "hebrew.txt").read_text().replace("\n", " ").split(" ")
fn arabic_text_to_keys() raises -> List[String]:
return (cwd() / "corpora" / "arabic.txt").read_text().replace("\n", " ").split(" ")
fn l33t_text_to_keys() raises -> List[String]:
return (cwd() / "corpora" / "l33t.txt").read_text().replace("\n", " ").split(" ")
fn georgian_text_to_keys() raises -> List[String]:
return (cwd() / "corpora" / "georgian.txt").read_text().replace("\n", " ").split(" ")
fn chinese_text_to_keys() raises -> List[String]:
return (cwd() / "corpora" / "chinese.txt").read_text().replace("\n", " ").split(" ")
fn french_text_to_keys() raises -> List[String]:
return (cwd() / "corpora" / "french.txt").read_text().replace("\n", " ").split(" ")
fn hindi_text_to_keys() raises -> List[String]:
return (cwd() / "corpora" / "hindi.txt").read_text().replace("\n", " ").split(" ")
fn russian_text_to_keys() raises -> List[String]:
return String('Проснувшись однажды утром после беспокойного сна, Грегор Замза обнаружил, что он у себя в постели превратился в страшное насекомое. Лежа на панцирнотвердой спине, он видел, стоило ему приподнять голову, свой коричневый, выпуклый, разделенный дугообразными чешуйками живот, на верхушке которого еле держалось готовое вот-вот окончательно сползти одеяло. Его многочисленные, убого тонкие по сравнению с остальным телом ножки беспомощно копошились у него перед глазами. «Что со мной случилось?» – подумал он. Это не было сном. Его комната, настоящая, разве что слишком маленькая, но обычная комната, мирно покоилась в своих четырех хорошо знакомых стенах. Над столом, где были разложены распакованные образцы сукон – Замза был коммивояжером, – висел портрет, который он недавно вырезал из иллюстрированного журнала и вставил в красивую золоченую рамку. На портрете была изображена дама в меховой шляпе и боа, она сидела очень прямо и протягивала зрителю тяжелую меховую муфту, в которой целиком исчезала ее рука. Затем взгляд Грегора устремился в окно, и пасмурная погода – слышно было, как по жести подоконника стучат капли дождя – привела его и вовсе в грустное настроение. «Хорошо бы еще немного поспать и забыть всю эту чепуху», – подумал он, но это было совершенно неосуществимо, он привык спать на правом боку, а в теперешнем своем состоянии он никак не мог принять этого положения. С какой бы силой ни поворачивался он на правый бок, он неизменно сваливался опять на спину. Закрыв глаза, чтобы не видеть своих барахтающихся ног, он проделал это добрую сотню раз и отказался от этих попыток только тогда, когда почувствовал какую-то неведомую дотоле, тупую и слабую боль в боку. «Ах ты, господи, – подумал он, – какую я выбрал хлопотную профессию! Изо дня в день в разъездах. Деловых волнений куда больше, чем на месте, в торговом доме, а кроме того, изволь терпеть тяготы дороги, думай о расписании поездов, мирись с плохим, нерегулярным питанием, завязывай со все новыми и новыми людьми недолгие, никогда не бывающие сердечными отношения. Черт бы побрал все это!» Он почувствовал вверху живота легкий зуд; медленно подвинулся на спине к прутьям кровати, чтобы удобнее было поднять голову; нашел зудевшее место, сплошь покрытое, как оказалось, белыми непонятными точечками; хотел было ощупать это место одной из ножек, но сразу отдернул ее, ибо даже простое прикосновение вызвало у него, Грегора, озноб. Он соскользнул в прежнее свое положение. «От этого раннего вставания, – подумал он, – можно совсем обезуметь. Человек должен высыпаться. Другие коммивояжеры живут, как одалиски. Когда я, например, среди дня возвращаюсь в гостиницу, чтобы переписать полученные заказы, эти господа только завтракают. А осмелься я вести себя так, мои хозяин выгнал бы меня сразу. Кто знает, впрочем, может быть, это было бы даже очень хорошо для меня. Если бы я не сдерживался ради родителей, я бы давно заявил об уходе, я бы подошел к своему хозяину и выложил ему все, что о нем думаю. Он бы так и свалился с конторки! Странная у него манера – садиться на конторку и с ее высоты разговаривать со служащим, который вдобавок вынужден подойти вплотную к конторке из-за того, что хозяин туг на ухо. Однако надежда еще не совсем потеряна: как только я накоплю денег, чтобы выплатить долг моих родителей – на это уйдет еще лет пять-шесть, – я так и поступлю. Тут-то мы и распрощаемся раз и навсегда. А пока что надо подниматься, мой поезд отходит в пять». И он взглянул на будильник, который тикал на сундуке. «Боже правый!» – подумал он. Было половина седьмого, и стрелки спокойно двигались дальше, было даже больше половины, без малого уже три четверти. Неужели будильник не звонил? С кровати было видно, что он поставлен правильно, на четыре часа; и он, несомненно, звонил. Но как можно было спокойно спать под этот сотрясающий мебель трезвон? Ну, спал-то он неспокойно, но, видимо, крепко. Однако что делать теперь? Следующий поезд уходит в семь часов; чтобы поспеть на него, он должен отчаянно торопиться, а набор образцов еще не упакован, да и сам он отнюдь не чувствует себя свежим и легким на подъем. И даже поспей он на поезд, хозяйского разноса ему все равно не избежать – ведь рассыльный торгового дома дежурил у пятичасового поезда и давно доложил о его, Грегора, опоздании. Рассыльный, человек бесхарактерный и неумный, был ставленником хозяина. А что, если сказаться больным? Но это было бы крайне неприятно и показалось бы подозрительным, ибо за пятилетнюю свою службу Грегор ни разу еще не болел. Хозяин, конечно, привел бы врача больничной кассы и стал попрекать родителей сыном-лентяем, отводя любые возражения ссылкой на этого врача, по мнению которого все люди на свете совершенно здоровы и только не любят работать. И разве в данном случае он был бы так уж неправ? Если не считать сонливости, действительно странной после такого долгого сна, Грегор и в самом деле чувствовал себя превосходно и был даже чертовски голоден.Проснувшись однажды утром после беспокойного сна, Грегор Замза обнаружил, что он у себя в постели превратился в страшное насекомое. Лежа на панцирнотвердой спине, он видел, стоило ему приподнять голову, свой коричневый, выпуклый, разделенный дугообразными чешуйками живот, на верхушке которого еле держалось готовое вот-вот окончательно сползти одеяло. Его многочисленные, убого тонкие по сравнению с остальным телом ножки беспомощно копошились у него перед глазами. «Что со мной случилось?» – подумал он. Это не было сном. Его комната, настоящая, разве что слишком маленькая, но обычная комната, мирно покоилась в своих четырех хорошо знакомых стенах. Над столом, где были разложены распакованные образцы сукон – Замза был коммивояжером, – висел портрет, который он недавно вырезал из иллюстрированного журнала и вставил в красивую золоченую рамку. На портрете была изображена дама в меховой шляпе и боа, она сидела очень прямо и протягивала зрителю тяжелую меховую муфту, в которой целиком исчезала ее рука. Затем взгляд Грегора устремился в окно, и пасмурная погода – слышно было, как по жести подоконника стучат капли дождя – привела его и вовсе в грустное настроение. «Хорошо бы еще немного поспать и забыть всю эту чепуху», – подумал он, но это было совершенно неосуществимо, он привык спать на правом боку, а в теперешнем своем состоянии он никак не мог принять этого положения. С какой бы силой ни поворачивался он на правый бок, он неизменно сваливался опять на спину.').split(" ")
fn german_text_to_keys() raises -> List[String]:
return String('Weit hinten, hinter den Wortbergen, fern der Länder Vokalien und Konsonantien leben die Blindtexte. Abgeschieden wohnen sie in Buchstabhausen an der Küste des Semantik, eines großen Sprachozeans. Ein kleines Bächlein namens Duden fließt durch ihren Ort und versorgt sie mit den nötigen Regelialien. Es ist ein paradiesmatisches Land, in dem einem gebratene Satzteile in den Mund fliegen. Nicht einmal von der allmächtigen Interpunktion werden die Blindtexte beherrscht – ein geradezu unorthographisches Leben. Eines Tages aber beschloß eine kleine Zeile Blindtext, ihr Name war Lorem Ipsum, hinaus zu gehen in die weite Grammatik. Der große Oxmox riet ihr davon ab, da es dort wimmele von bösen Kommata, wilden Fragezeichen und hinterhältigen Semikoli, doch das Blindtextchen ließ sich nicht beirren. Es packte seine sieben Versalien, schob sich sein Initial in den Gürtel und machte sich auf den Weg. Als es die ersten Hügel des Kursivgebirges erklommen hatte, warf es einen letzten Blick zurück auf die Skyline seiner Heimatstadt Buchstabhausen, die Headline von Alphabetdorf und die Subline seiner eigenen Straße, der Zeilengasse. Wehmütig lief ihm eine rhetorische Frage über die Wange, dann setzte es seinen Weg fort. Unterwegs traf es eine Copy. Die Copy warnte das Blindtextchen, da, wo sie herkäme wäre sie zigmal umgeschrieben worden und alles, was von ihrem Ursprung noch übrig wäre, sei das Wort "und" und das Blindtextchen solle umkehren und wieder in sein eigenes, sicheres Land zurückkehren. Doch alles Gutzureden konnte es nicht überzeugen und so dauerte es nicht lange, bis ihm ein paar heimtückische Werbetexter auflauerten, es mit Longe und Parole betrunken machten und es dann in ihre Agentur schleppten, wo sie es für ihre Projekte wieder und wieder mißbrauchten. Und wenn es nicht umgeschrieben wurde, dann benutzen Sie es immernoch. Weit hinten, hinter den Wortbergen, fern der Länder Vokalien und Konsonantien leben die Blindtexte. Abgeschieden wohnen sie in Buchstabhausen an der Küste des Semantik, eines großen Sprachozeans. Ein kleines Bächlein namens Duden fließt durch ihren Ort und versorgt sie mit den nötigen Regelialien. Es ist ein paradiesmatisches Land, in dem einem gebratene Satzteile in den Mund fliegen. Nicht einmal von der allmächtigen Interpunktion werden die Blindtexte beherrscht – ein geradezu unorthographisches Leben. Eines Tages aber beschloß eine kleine Zeile Blindtext, ihr Name war Lorem Ipsum, hinaus zu gehen in die weite Grammatik. Der große Oxmox riet ihr davon ab, da es dort wimmele von bösen Kommata, wilden Fragezeichen und hinterhältigen Semikoli, doch das Blindtextchen ließ sich nicht beirren. Es packte seine sieben Versalien, schob sich sein Initial in den Gürtel und machte sich auf den Weg. Als es die ersten Hügel des Kursivgebirges erklommen hatte, warf es einen letzten Blick zurück auf die Skyline seiner Heimatstadt Buchstabhausen, die Headline von Alphabetdorf und die Subline seiner eigenen Straße, der Zeilengasse. Wehmütig lief ihm eine rhetorische Frage über die Wange, dann setzte es seinen Weg fort. Unterwegs traf es eine Copy. Die Copy warnte das Blindtextchen, da, wo sie herkäme wäre sie zigmal umgeschrieben worden und alles, was von ihrem Ursprung noch übrig wäre, sei das Wort "und" und das Blindtextchen solle umkehren und wieder in sein eigenes, sicheres Land zurückkehren. Doch alles Gutzureden konnte es nicht überzeugen und so dauerte es nicht lange, bis ihm ein paar heimtückische Werbetexter auflauerten, es mit Longe und Parole betrunken machten und es dann in ihre Agentur schleppten, wo sie es für ihre Projekte wieder und wieder mißbrauchten. Und wenn es nicht umgeschrieben wurde, dann benutzen Sie es immernoch. Weit hinten, hinter den Wortbergen, fern der Länder Vokalien und Konsonantien leben die Blindtexte. Abgeschieden wohnen sie in Buchstabhausen an der Küste des Semantik, eines großen Sprachozeans. Ein kleines Bächlein namens Duden fließt durch ihren Ort und versorgt sie mit den nötigen Regelialien. Es ist ein paradiesmatisches Land, in dem einem gebratene Satzteile in den Mund fliegen. Nicht einmal von der allmächtigen Interpunktion werden die Blindtexte beherrscht – ein geradezu unorthographisches Leben. Eines Tages aber beschloß eine kleine Zeile Blindtext, ihr Name war Lorem Ipsum, hinaus zu gehen in die weite Grammatik. Der große Oxmox riet ihr davon ab, da es dort wimmele von bösen Kommata, wilden Fragezeichen und hinterhältigen Semikoli, doch das Blindtextchen ließ sich nicht beirren. Es packte seine sieben Versalien, schob sich sein Initial in den Gürtel und machte sich auf den Weg. Als es die ersten Hügel des Kursivgebirges erklommen hatte, warf es einen letzten Blick zurück auf die Skyline seiner Heimatstadt Buchstabhausen, die Headline von Alphabetdorf und die Subline seiner eigenen Straße, der Zeilengasse. Wehmütig lief ihm eine rhetorische Frage über die Wange, dann setzte es seinen Weg fort. Unterwegs traf es eine Copy. Die Copy warnte das Blindtextchen, da, wo sie herkäme wäre sie zigmal umgeschrieben worden und alles, was von ihrem Ursprung noch übrig wäre, sei das Wort "und" und das Blindtextchen solle umkehren und wieder in sein eigenes, sicheres Land zurückkehren. Doch alles Gutzureden konnte es nicht überzeugen und so dauerte es nicht lange, bis ihm ein paar heimtückische Werbetexter auflauerten, es mit Longe und Parole betrunken machten und es dann in ihre Agentur schleppten, wo sie es für ihre Projekte wieder und wieder mißbrauchten. Und wenn es nicht umgeschrieben wurde, dann benutzen Sie es immernoch.Weit hinten, hinter den Wortbergen, fern der Länder Vokalien und Konsonantien leben die Blindtexte. Abgeschieden wohnen sie in Buchstabhausen an der Küste des Semantik, eines großen Sprachozeans. Ein kleines Bächlein namens Duden fließt durch ihren Ort und versorgt sie mit den nötigen Regelialien. Es ist ein paradiesmatisches Land, in dem einem gebratene Satzteile in den Mund fliegen. Nicht einmal von der allmächtigen Interpunktion werden die Blindtexte beherrscht – ein geradezu unorthographisches Leben. Eines Tages aber beschloß eine kleine Zeile Blindtext, ihr Name war Lorem Ipsum, hinaus zu gehen in die weite Grammatik. Der große Oxmox riet ihr davon ab, da es dort wimmele von bösen Kommata, wilden Fragezeichen und hinterhältigen Semikoli, doch das Blindtextchen ließ sich nicht beirren. Es packte seine sieben Versalien, schob sich sein Initial in den Gürtel und machte sich auf den Weg. Als es die ersten Hügel des Kursivgebirges erklommen hatte, warf es einen').split(" ")
fn japanese_long_keys() raises -> List[String]:
return String('米くを舵4物委らご氏松ハナテフ月関ソ時平ふいの博情れじフ牟万い元56園フメヤオ試図ロツヤ未備王こと傷喫羅踊んゆし。栃ユヱオ書著作ユソツロ英祉業ア大課ご権質フべ空8午キ切軟づン著郎そゃす格町採ヱオマコ処8付国ムハチア究表でなだ際無ロミヱ地兵ぴげ庭体すク発抜爆位や。楽富むゆず盛航カナセ携代ハ本高きた員59今骸ンラえぜ城解イケ穴訴ぽぎ属住ヤケトヌ抱点ト広注厚でて。 国リ出難セユメ軍手ヘカウ画形サヲシ猛85用ヲキミ心死よしと身処ケヨミオ教主ーぽ事業んく字国たさょ図能シミスヤ社8板ル岡世58次戒知院んれり。市メ誘根カ数問禁竹ゃれえみ給辺のでみき今二ぎさ裕止過こクすと無32郎所ラた生展ヌヘス成度慣葬勇厘ばてか。室ゃ下携疲ム色権がぽりっ銃週ノオ姫千テム健蔵い研手ッ放容ル告属め旅側26企サノヨ宅都福ぞ通待ちぴね種脳イど労希望義通むン。 罰しい続負せ著低たル異師ユハワ東添質コチ転集ルヤ雇聴約ヒ前統らた情厳ゆさでや真胸や有披暑棚豆ゆぼたけ。盛ワセロナ情競クるっわ講3音ずをせ少地めしぜょ手63明視れに判企ヒヌエソ求総58特本ね井比ユラキ禁頭馬るゅリす能率率かがさわ。葉サソ医郡ヱヘソ労帰ナケスミ救写ワヘ株審ネヒニミ安逮イ人画ラ涯車はラ極騒りなド件5級ンかふー劇41著ぱぐ凱討だ文世ぶづどま界善魅マ渓経競融れがや。 連ーぜらご模分ッ視外ばフく運発群ほぼづ育越一ほごクけ案募ヲイソ治会イせフ製君ぜた漢村1変リヒ構5際ツ御文ヲ臭入さドぼ代書ハケ引技ろみれ回観注倉徹ぱ。論ラづ海要サ情座ゃり齢宣ラモエ芸化エマホ覧催回ら戦69本外ト葬岳な政画か連針ぴリフず。約ル闘辺ぽ経2応掲ホサアラ塾小コラ画決クノオ上室レヌヱ勝逮ぜるえむ責豊チノ明意ひけ訟6碁草メタチエ財午召喝塊む。 決めでわ名金つけレわ続人県約ぽぼす尾腹ユサ戦載リシ護賀レモフツ重涯ニ治者むんっみ職更カタチレ提話2何ワ責東まけげふ能政ヌ供禁がびてわ提改倶れめ。読み担後ぽ安加ぎ論鹿ツ統最お気麻月つじもあ竜思いろめ判必満理トコ文連ムイウハ寄串ざほびー。文ゆこっ向27年メイ便能ノセヲ待1王スねたゆ伝派んね点過カト治読よにきべ使人スシ都言え阻8割べづえみ注引敷的岳犠眠どそ。 学用イだ医客開ロ供界もぞだ実隆モイヌ務坂ナコヲ権野ろづ初場ぱ低会づぱじ新倒コ化政レ止奮浸猪ッわえづ。形いやリ要帰ほまむだ業領スル必打さ島14巻リ集日ネヘホタ面幅ち写上そぴ円図ムタコモ報使イわざと会催ヤヲ康証をドぶレ盤岡ホハツ作29管しをめ公問懐蓄っさ。来ゆぼあぱ投秋シ語右ぐ身靖かば辛握捕家記ヘワ神岐囲づ毘観メテクツ政73夕罪57需93誌飲査仁さ。 変レめ束球よんま会特ヱコ聞重だ史純ーどる件32浦レぴよゃ上強ネラリロ査従セユヤ専棋光レ作表ひぶ予正ぜーな誉確フス函6報円ス進治ね能営済否雄でわょ。42生型ば着続ア短実ぎおめび前環闘ラヤヲル診均っとにの声公トヱテマ整試椅情久妊舌頃ざとっく。品キチトテ阿国ラら受87世ヲフセリ川86個ーょぼげ危子ヘレカメ無会ぱかへ事通んかて電条ロツ徴商ぶぞそを居暑メ害広せもがり禁応レミヲ応響割壮憶はぱ。 千れンが織財メニ況界ネトレミ学豊フオホシ近月レたやご的罪ょな菱技ちる警栗エセ提89林危氷48参ア説森クキヒヱ薬社ホコエリ負和ルび紀下ケミイ掲歳特ごず扱底ク護木連ちクを各形ばすか。変ぱなれ町7融ヌ街準以タユヘム質裕ぶで遺語俊ぎずょ事金文キ写多山ーゆに歩帯すで会世クぜよ論写ヲ達71林危氷5間続ぎぜび高怠す。 係8青け応著ミ戦条ナヘネカ思79未ぎ算伊をゃ泉人ーづ需説っ畑鹿27軽ラソツ権2促千護ルロナカ開国ケ暴嶋ご池表だ。佐フナ訪麻はてせば勝効をあ医戦画とさわぴ者両すいあ並来んば載食ぴ件友頂業へえぞ魚祝ネラ聞率スコリケ始全ンこび夫出ドふ今布うぎふゅ実克即哉循やしんな。 暮す備54依紀てッん末刊と柔称むてス無府ケイ変壌をぱ汁連フマス海世ヌ中負知問ナヘケ純推ひ読着ヒ言若私軽れ。掲けフむ王本オコ線人をっさ必和断セソヲハ図芸ちかな防長りぶは投新意相ツ並5余セ職岳ぞ端古空援そ。森ヨエチ題5東っ自兄ち暴5近鹿横ト的京ハ安氷ナキ深際ぎ並節くスむの権工ほルせ京49効タムチ処三ぞぴラ済国ずっ文経ヘトミ水分準そが。').split(" ")
fn s3_action_names() raises -> List[String]:
return String('AbortMultipartUpload CompleteMultipartUpload CopyObject CreateBucket CreateMultipartUpload DeleteBucket DeleteBucketAnalyticsConfiguration DeleteBucketCors DeleteBucketEncryption DeleteBucketIntelligentTieringConfiguration DeleteBucketInventoryConfiguration DeleteBucketLifecycle DeleteBucketMetricsConfiguration DeleteBucketOwnershipControls DeleteBucketPolicy DeleteBucketReplication DeleteBucketTagging DeleteBucketWebsite DeleteObject DeleteObjects DeleteObjectTagging DeletePublicAccessBlock GetBucketAccelerateConfiguration GetBucketAcl GetBucketAnalyticsConfiguration GetBucketCors GetBucketEncryption GetBucketIntelligentTieringConfiguration GetBucketInventoryConfiguration GetBucketLifecycle GetBucketLifecycleConfiguration GetBucketLocation GetBucketLogging GetBucketMetricsConfiguration GetBucketNotification GetBucketNotificationConfiguration GetBucketOwnershipControls GetBucketPolicy GetBucketPolicyStatus GetBucketReplication GetBucketRequestPayment GetBucketTagging GetBucketVersioning GetBucketWebsite GetObject GetObjectAcl GetObjectAttributes GetObjectLegalHold GetObjectLockConfiguration GetObjectRetention GetObjectTagging GetObjectTorrent GetPublicAccessBlock HeadBucket HeadObject ListBucketAnalyticsConfigurations ListBucketIntelligentTieringConfigurations ListBucketInventoryConfigurations ListBucketMetricsConfigurations ListBuckets ListMultipartUploads ListObjects ListObjectsV2 ListObjectVersions ListParts PutBucketAccelerateConfiguration PutBucketAcl PutBucketAnalyticsConfiguration PutBucketCors PutBucketEncryption PutBucketIntelligentTieringConfiguration PutBucketInventoryConfiguration PutBucketLifecycle PutBucketLifecycleConfiguration PutBucketLogging PutBucketMetricsConfiguration PutBucketNotification PutBucketNotificationConfiguration PutBucketOwnershipControls PutBucketPolicy PutBucketReplication PutBucketRequestPayment PutBucketTagging PutBucketVersioning PutBucketWebsite PutObject PutObjectAcl PutObjectLegalHold PutObjectLockConfiguration PutObjectRetention PutObjectTagging PutPublicAccessBlock RestoreObject SelectObjectContent UploadPart UploadPartCopy WriteGetObjectResponse, CreateAccessPoint CreateAccessPointForObjectLambda CreateBucket CreateJob CreateMultiRegionAccessPoint DeleteAccessPoint DeleteAccessPointForObjectLambda DeleteAccessPointPolicy DeleteAccessPointPolicyForObjectLambda DeleteBucket DeleteBucketLifecycleConfiguration DeleteBucketPolicy DeleteBucketReplication DeleteBucketTagging DeleteJobTagging DeleteMultiRegionAccessPoint DeletePublicAccessBlock DeleteStorageLensConfiguration DeleteStorageLensConfigurationTagging DescribeJob DescribeMultiRegionAccessPointOperation GetAccessPoint GetAccessPointConfigurationForObjectLambda GetAccessPointForObjectLambda GetAccessPointPolicy GetAccessPointPolicyForObjectLambda GetAccessPointPolicyStatus GetAccessPointPolicyStatusForObjectLambda GetBucket GetBucketLifecycleConfiguration GetBucketPolicy GetBucketReplication GetBucketTagging GetBucketVersioning GetJobTagging GetMultiRegionAccessPoint GetMultiRegionAccessPointPolicy GetMultiRegionAccessPointPolicyStatus GetMultiRegionAccessPointRoutes GetPublicAccessBlock GetStorageLensConfiguration GetStorageLensConfigurationTagging ListAccessPoints ListAccessPointsForObjectLambda ListJobs ListMultiRegionAccessPoints ListRegionalBuckets ListStorageLensConfigurations PutAccessPointConfigurationForObjectLambda PutAccessPointPolicy PutAccessPointPolicyForObjectLambda PutBucketLifecycleConfiguration PutBucketPolicy PutBucketReplication PutBucketTagging PutBucketVersioning PutJobTagging PutMultiRegionAccessPointPolicy PutPublicAccessBlock PutStorageLensConfiguration PutStorageLensConfigurationTagging SubmitMultiRegionAccessPointRoutes UpdateJobPriority UpdateJobStatus').split(" ")
fn system_words_collection() raises -> List[String]:
return Path("/usr/share/dict/words").read_text().split("\n") | compact-dict/corpora/__init__.mojo | false |
<filename>compact-dict/csv/csv_builder.mojo
from memory.memory import memcpy
from buffer import Buffer, Dim
from .string_utils import find_indices, contains_any_of, string_from_pointer
alias BufferType = Buffer[DType.int8]
alias CR_CHAR = "\r"
alias CR = ord(CR_CHAR)
alias LF_CHAR = "\n"
alias LF = ord(LF_CHAR)
alias COMMA_CHAR = ","
alias COMMA = ord(COMMA_CHAR)
alias QUOTE_CHAR = '"'
alias QUOTE = UInt8(ord(QUOTE_CHAR))
struct CsvBuilder:
var _buffer: DTypePointer[DType.uint8]
var _capacity: Int
var num_bytes: Int
var _column_count: Int
var _elements_count: Int
var _finished: Bool
fn __init__(inout self, column_count: Int):
self._capacity = 1024
self._buffer = DTypePointer[DType.uint8].alloc(self._capacity)
self._column_count = column_count
self._elements_count = 0
self._finished = False
self.num_bytes = 0
fn __init__(inout self, *coulmn_names: StringLiteral):
self._capacity = 1024
self._buffer = DTypePointer[DType.uint8].alloc(self._capacity)
self._elements_count = 0
self._finished = False
self.num_bytes = 0
var column_name_list: VariadicList[StringLiteral] = coulmn_names
self._column_count = len(column_name_list)
for i in range(len(column_name_list)):
self.push(coulmn_names[i])
fn __del__(owned self):
if not self._finished:
self._buffer.free()
fn push[S: Stringable](inout self, value: S, consider_escaping: Bool = False):
self.push(str(value), consider_escaping)
fn push_empty(inout self):
self.push("", False)
fn fill_up_row(inout self):
var num_empty = self._column_count - (self._elements_count % self._column_count)
if num_empty < self._column_count:
for _ in range(num_empty):
self.push_empty()
fn push(inout self, s: String, consider_escaping: Bool = True):
if consider_escaping and contains_any_of(
s, CR_CHAR, LF_CHAR, COMMA_CHAR, QUOTE_CHAR
):
return self.push(QUOTE_CHAR + escape_quotes_in(s) + QUOTE_CHAR, False)
var size = len(s)
self._extend_buffer_if_needed(size + 2)
if self._elements_count > 0:
if self._elements_count % self._column_count == 0:
self._buffer.offset(self.num_bytes).store(CR)
self._buffer.offset(self.num_bytes + 1).store(LF)
self.num_bytes += 2
else:
self._buffer.offset(self.num_bytes).store(COMMA)
self.num_bytes += 1
memcpy(self._buffer.offset(self.num_bytes), s.unsafe_ptr(), size)
s._strref_keepalive()
self.num_bytes += size
self._elements_count += 1
@always_inline
fn _extend_buffer_if_needed(inout self, size: Int):
if self.num_bytes + size < self._capacity:
return
var new_size = self._capacity
while new_size < self.num_bytes + size:
new_size *= 2
var p = DTypePointer[DType.uint8].alloc(new_size)
memcpy(p, self._buffer, self.num_bytes)
self._buffer.free()
self._capacity = new_size
self._buffer = p
fn finish(owned self) -> String:
self._finished = True
self.fill_up_row()
self._buffer.offset(self.num_bytes).store(CR)
self._buffer.offset(self.num_bytes + 1).store(LF)
self.num_bytes += 3
return string_from_pointer(self._buffer, self.num_bytes)
fn escape_quotes_in(s: String) -> String:
var indices = find_indices(s, QUOTE_CHAR)
var i_size = len(indices)
if i_size == 0:
return s
var size = len(s._buffer)
var p_current = DTypePointer(s.unsafe_ptr())
var p_result = DTypePointer[DType.uint8].alloc(size + i_size)
var first_index = int(indices[0])
memcpy(p_result, p_current, first_index)
p_result.offset(first_index).store(QUOTE)
var offset = first_index + 1
for i in range(1, len(indices)):
var c_offset = int(indices[i - 1])
var length = int(indices[i]) - c_offset
memcpy(p_result.offset(offset), p_current.offset(c_offset), length)
offset += length
p_result.offset(offset).store(QUOTE)
offset += 1
var last_index = int(indices[i_size - 1])
memcpy(p_result.offset(offset), p_current.offset(last_index), size - last_index)
return string_from_pointer(p_result, size + i_size)
| compact-dict/csv/csv_builder.mojo | false |
<filename>compact-dict/csv/csv_table.mojo
from .string_utils import find_indices, string_from_pointer
from algorithm.functional import vectorize
from sys.info import simdwidthof
from sys.intrinsics import compressed_store
from math import iota
from memory import stack_allocation
alias QUOTE = ord('"')
alias COMMA = ord(",")
alias LF = ord("\n")
alias CR = ord("\r")
alias simd_width_u8 = simdwidthof[DType.uint8]()
struct CsvTable[sep: Int = COMMA]:
var _inner_string: String
var _starts: List[Int]
var _ends: List[Int]
var column_count: Int
fn __init__(inout self, owned s: String, with_simd: Bool = True):
self._inner_string = s
self._starts = List[Int](capacity=10)
self._ends = List[Int](capacity=10)
self.column_count = -1
if with_simd:
self._simd_parse()
else:
self._parse()
@always_inline
fn _parse(inout self):
var length = len(self._inner_string)
var offset = 0
var in_double_quotes = False
self._starts.append(offset)
while offset < length:
var c = self._inner_string._buffer[offset]
if c == QUOTE:
in_double_quotes = not in_double_quotes
offset += 1
elif not in_double_quotes and c == sep:
self._ends.append(offset)
offset += 1
self._starts.append(offset)
elif not in_double_quotes and c == LF:
self._ends.append(offset)
if self.column_count == -1:
self.column_count = len(self._ends)
offset += 1
self._starts.append(offset)
elif (
not in_double_quotes
and c == CR
and length > offset + 1
and self._inner_string._buffer[offset + 1] == LF
):
self._ends.append(offset)
if self.column_count == -1:
self.column_count = len(self._ends)
offset += 2
self._starts.append(offset)
else:
offset += 1
if self._inner_string[length - 1] == "\n":
_ = self._starts.pop()
else:
self._ends.append(length)
@always_inline
fn _simd_parse(inout self):
var p = DTypePointer(self._inner_string.unsafe_ptr())
var string_byte_length = len(self._inner_string)
var in_quotes = False
var last_chunk__ends_on_cr = False
self._starts.append(0)
@always_inline
@parameter
fn find_indicies[simd_width: Int](offset: Int):
var chars = p.load[width=simd_width](offset)
var quotes = chars == QUOTE
var separators = chars == sep
var lfs = chars == LF
var all_bits = quotes | separators | lfs
var crs = chars == CR
var offsets = iota[DType.uint8, simd_width]()
var sp: DTypePointer[DType.uint8] = stack_allocation[
simd_width, UInt8, simd_width
]()
compressed_store(offsets, sp, all_bits)
var all_len = all_bits.reduce_bit_count()
for i in range(all_len):
var index = int(sp.load(i))
if quotes[index]:
in_quotes = not in_quotes
continue
if in_quotes:
continue
var current_offset = index + offset
var rs_compensation: Int
if index > 0:
rs_compensation = int(lfs[index] & crs[index - 1])
else:
rs_compensation = int(lfs[index] & last_chunk__ends_on_cr)
self._ends.append(current_offset - rs_compensation)
self._starts.append(current_offset + 1)
if self.column_count == -1 and lfs[index]:
self.column_count = len(self._ends)
last_chunk__ends_on_cr = crs[simd_width - 1]
vectorize[find_indicies, simd_width_u8](string_byte_length)
if self._inner_string[string_byte_length - 1] == "\n":
_ = self._starts.pop()
else:
self._ends.append(string_byte_length)
fn get(self, row: Int, column: Int) -> String:
if column >= self.column_count:
return ""
var index = self.column_count * row + column
if index >= len(self._ends):
return ""
if (
self._inner_string[self._starts[index]] == '"'
and self._inner_string[self._ends[index] - 1] == '"'
):
var start = self._starts[index] + 1
var length = (self._ends[index] - 1) - start
var p1 = Pointer[UInt8].alloc(length + 1)
memcpy(p1, DTypePointer(self._inner_string.unsafe_ptr()).offset(start), length)
var _inner_string = string_from_pointer(p1, length + 1)
var quote_indices = find_indices(_inner_string, '"')
var quotes_count = len(quote_indices)
if quotes_count == 0 or quotes_count & 1 == 1:
return _inner_string
var p = DTypePointer(_inner_string.unsafe_ptr())
var length2 = length - (quotes_count >> 1)
var p2 = Pointer[UInt8].alloc(length2 + 1)
var offset2 = 0
memcpy(p2, p, int(quote_indices[0]))
offset2 += int(quote_indices[0])
for i in range(2, quotes_count, 2):
var start = int(quote_indices[i - 1])
var size = int(quote_indices[i]) - start
memcpy(p2.offset(offset2), p.offset(start), size)
offset2 += size
var last = int(quote_indices[quotes_count - 1])
memcpy(p2.offset(offset2), p.offset(last), length - last)
_inner_string._strref_keepalive()
return string_from_pointer(p2, length - (quotes_count >> 1) + 1)
return self._inner_string[self._starts[index] : self._ends[index]]
fn row_count(self) -> Int:
return len(self._starts) // self.column_count
| compact-dict/csv/csv_table.mojo | false |
<filename>compact-dict/csv/string_utils.mojo
from algorithm.functional import vectorize
from sys.info import simdwidthof
from sys.intrinsics import compressed_store
# from math import iota, reduce_bit_count, any_true
from math import iota
from memory import stack_allocation
from time import now
from collections.vector import InlinedFixedVector
alias simd_width_i8 = simdwidthof[DType.int8]()
fn vectorize_and_exit[simd_width: Int, workgroup_function: fn[i: Int](Int) capturing -> Bool](size: Int):
var loops = size // simd_width
for i in range(loops):
if workgroup_function[simd_width](i * simd_width):
return
var rest = size & (simd_width - 1)
@parameter
if simd_width >= 64:
if rest >= 32:
if workgroup_function[32](size - rest):
return
rest -= 32
@parameter
if simd_width >= 32:
if rest >= 16:
if workgroup_function[16](size - rest):
return
rest -= 16
@parameter
if simd_width >= 16:
if rest >= 8:
if workgroup_function[8](size - rest):
return
rest -= 8
@parameter
if simd_width >= 8:
if rest >= 4:
if workgroup_function[4](size - rest):
return
rest -= 4
@parameter
if simd_width >= 4:
if rest >= 2:
if workgroup_function[2](size - rest):
return
rest -= 2
if rest == 1:
_= workgroup_function[1](size - rest)
fn find_indices(s: String, c: String) -> List[UInt64]:
var size = len(s)
var result = List[UInt64]()
var char = UInt8(ord(c))
var p = DTypePointer(s.unsafe_ptr())
@parameter
fn find[simd_width: Int](offset: Int):
@parameter
if simd_width == 1:
if p.offset(offset).load() == char:
return result.append(offset)
else:
var chunk = p.load[width=simd_width](offset)
var occurrence = chunk == char
var offsets = iota[DType.uint64, simd_width]() + offset
var occurrence_count = occurrence.reduce_bit_count()
var current_len = len(result)
result.reserve(current_len + occurrence_count)
result.resize(current_len + occurrence_count, 0)
compressed_store(offsets, DTypePointer[DType.uint64](result.data).offset(current_len), occurrence)
vectorize[find, simd_width_i8](size)
return result
fn occurrence_count(s: String, *c: String) -> Int:
var size = len(s)
var result = 0
var chars = InlinedFixedVector[UInt8](len(c))
for i in range(len(c)):
chars.append(UInt8(ord(c[i])))
var p = DTypePointer(s.unsafe_ptr())
@parameter
fn find[simd_width: Int](offset: Int):
@parameter
if simd_width == 1:
for i in range(len(chars)):
var char = chars[i]
if p.offset(offset).load() == char:
result += 1
return
else:
var chunk = p.load[width=simd_width](offset)
var occurrence = SIMD[DType.bool, simd_width](False)
for i in range(len(chars)):
occurrence |= chunk == chars[i]
var occurrence_count = occurrence.reduce_bit_count()
result += occurrence_count
vectorize[find, simd_width_i8](size)
return result
fn contains_any_of(s: String, *c: String) -> Bool:
var size = len(s)
# var c_list: VariadicListMem[String] = c
var chars = InlinedFixedVector[UInt8](len(c))
for i in range(len(c)):
chars.append(UInt8(ord(c[i])))
var p = DTypePointer(s.unsafe_ptr())
var flag = False
@parameter
fn find[simd_width: Int](i: Int) -> Bool:
var chunk = p.load[width=simd_width]()
p = p.offset(simd_width)
for i in range(len(chars)):
var occurrence = chunk == chars[i]
if occurrence.reduce_or():
flag = True
return flag
return False
vectorize_and_exit[simd_width_i8, find](size)
return flag
@always_inline
fn string_from_pointer(p: DTypePointer[DType.uint8], length: Int) -> String:
# Since Mojo 0.5.0 the pointer needs to provide a 0 terminated byte string
p.store(length - 1, 0)
return String(p, length)
fn print_v(v: List[UInt64]):
print("(" + str(len(v)) + ")[")
for i in range(len(v)):
var end = ", " if i < len(v) - 1 else "]\n"
print(v[i], end=end)
| compact-dict/csv/string_utils.mojo | false |
from .csv_builder import CsvBuilder
from .csv_table import CsvTable
| compact-dict/csv/__init__.mojo | false |
<filename>compact-dict/generic_dict/ahasher.mojo
# This code is based on https://github.com/tkaitchuck/aHash
from bit import rotate_bits_left, byte_swap
from .keys_container import KeyRef
alias U256 = SIMD[DType.uint64, 4]
alias U128 = SIMD[DType.uint64, 2]
alias MULTIPLE = 6364136223846793005
alias ROT = 23
@always_inline
fn folded_multiply(s: UInt64, by: UInt64) -> UInt64:
var b1 = s * byte_swap(by)
var b2 = byte_swap(s) * (~by)
return b1 ^ byte_swap(b2)
@always_inline
fn read_small(data: DTypePointer[DType.uint8], length: Int) -> U128:
if length >= 2:
if length >= 4:
# len 4-8
var a = data.bitcast[DType.uint32]().load().cast[DType.uint64]()
var b = data.offset(length - 4).bitcast[DType.uint32]().load().cast[DType.uint64]()
return U128(a, b)
else:
var a = data.bitcast[DType.uint16]().load().cast[DType.uint64]()
var b = data.offset(length - 1).load().cast[DType.uint64]()
return U128(a, b)
else:
if length > 0:
var a = data.load().cast[DType.uint64]()
return U128(a, a)
else:
return U128(0, 0)
struct AHasher:
var buffer: UInt64
var pad: UInt64
var extra_keys: U128
fn __init__(inout self, key: U256):
var pi_key = key ^ U256(0x243f_6a88_85a3_08d3, 0x1319_8a2e_0370_7344, 0xa409_3822_299f_31d0, 0x082e_fa98_ec4e_6c89,)
self.buffer = pi_key[0]
self.pad = pi_key[1]
self.extra_keys = U128(pi_key[2], pi_key[3])
@always_inline
fn update(inout self, new_data: UInt64):
self.buffer = folded_multiply(new_data ^ self.buffer, MULTIPLE)
@always_inline
fn large_update(inout self, new_data: U128):
var combined = folded_multiply(
new_data[0] ^ self.extra_keys[0], new_data[1] ^ self.extra_keys[1]
)
self.buffer = rotate_bits_left[ROT]((self.buffer + self.pad) ^ combined)
@always_inline
fn short_finish(self) -> UInt64:
return self.buffer + self.pad
@always_inline
fn finish(self) -> UInt64:
var rot = self.buffer & 63
var folded = folded_multiply(self.buffer, self.pad)
return (folded << rot) | (folded >> (64 - rot))
@always_inline
fn write(inout self, data: DTypePointer[DType.uint8], length: Int):
self.buffer = (self.buffer + length) * MULTIPLE
if length > 8:
if length > 16:
var tail = data.offset(length - 16).bitcast[DType.uint64]().load[width=2]()
self.large_update(tail)
var offset = 0
while length - offset > 16:
var block = data.offset(offset).bitcast[DType.uint64]().load[width=2]()
self.large_update(block)
offset += 16
else:
var a = data.bitcast[DType.uint64]().load()
var b = data.offset(length - 8).bitcast[DType.uint64]().load()
self.large_update(U128(a, b))
else:
var value = read_small(data, length)
self.large_update(value)
@always_inline
fn ahash(s: KeyRef) -> UInt64:
var length = s.size
var b = s.pointer
var hasher = AHasher(U256(0, 0, 0, 0))
if length > 8:
hasher.write(b, length)
else:
var value = read_small(b, length)
hasher.buffer = folded_multiply(value[0] ^ hasher.buffer, value[1] ^ hasher.extra_keys[1])
hasher.pad = hasher.pad + length
return hasher.finish()
| compact-dict/generic_dict/ahasher.mojo | false |
from bit import pop_count, bit_width
from memory import memset_zero, memcpy
from .key_eq import eq
from .keys_container import KeysContainer, KeyRef, Keyable
from .ahasher import ahash
from .single_key_builder import SingleKeyBuilder
struct Dict[
V: CollectionElement,
hash: fn(KeyRef) -> UInt64 = ahash,
KeyCountType: DType = DType.uint32,
KeyOffsetType: DType = DType.uint32,
destructive: Bool = True,
caching_hashes: Bool = True,
](Sized):
var keys: KeysContainer[KeyOffsetType]
var key_hashes: DTypePointer[KeyCountType]
var values: List[V]
var slot_to_index: DTypePointer[KeyCountType]
var deleted_mask: DTypePointer[DType.uint8]
var count: Int
var capacity: Int
var key_builder: SingleKeyBuilder
fn __init__(inout self, capacity: Int = 16):
constrained[
KeyCountType == DType.uint8 or
KeyCountType == DType.uint16 or
KeyCountType == DType.uint32 or
KeyCountType == DType.uint64,
"KeyCountType needs to be an unsigned integer"
]()
self.count = 0
if capacity <= 8:
self.capacity = 8
else:
var icapacity = Int64(capacity)
self.capacity = capacity if pop_count(icapacity) == 1 else
1 << int(bit_width(icapacity))
self.keys = KeysContainer[KeyOffsetType](capacity)
self.key_builder = SingleKeyBuilder()
@parameter
if caching_hashes:
self.key_hashes = DTypePointer[KeyCountType].alloc(self.capacity)
else:
self.key_hashes = DTypePointer[KeyCountType].alloc(0)
self.values = List[V](capacity=capacity)
self.slot_to_index = DTypePointer[KeyCountType].alloc(self.capacity)
memset_zero(self.slot_to_index, self.capacity)
@parameter
if destructive:
self.deleted_mask = DTypePointer[DType.uint8].alloc(self.capacity >> 3)
memset_zero(self.deleted_mask, self.capacity >> 3)
else:
self.deleted_mask = DTypePointer[DType.uint8].alloc(0)
fn __copyinit__(inout self, existing: Self):
self.count = existing.count
self.capacity = existing.capacity
self.keys = existing.keys
self.key_builder = self.key_builder
@parameter
if caching_hashes:
self.key_hashes = DTypePointer[KeyCountType].alloc(self.capacity)
memcpy(self.key_hashes, existing.key_hashes, self.capacity)
else:
self.key_hashes = DTypePointer[KeyCountType].alloc(0)
self.values = existing.values
self.slot_to_index = DTypePointer[KeyCountType].alloc(self.capacity)
memcpy(self.slot_to_index, existing.slot_to_index, self.capacity)
@parameter
if destructive:
self.deleted_mask = DTypePointer[DType.uint8].alloc(self.capacity >> 3)
memcpy(self.deleted_mask, existing.deleted_mask, self.capacity >> 3)
else:
self.deleted_mask = DTypePointer[DType.uint8].alloc(0)
fn __moveinit__(inout self, owned existing: Self):
self.count = existing.count
self.capacity = existing.capacity
self.keys = existing.keys^
self.key_builder = existing.key_builder^
self.key_hashes = existing.key_hashes
self.values = existing.values^
self.slot_to_index = existing.slot_to_index
self.deleted_mask = existing.deleted_mask
fn __del__(owned self):
self.slot_to_index.free()
self.deleted_mask.free()
self.key_hashes.free()
fn __len__(self) -> Int:
return self.count
@always_inline
fn __contains__[T: Keyable](inout self, key: T) -> Bool:
try:
self.key_builder.reset()
key.accept(self.key_builder)
var key_ref = self.key_builder.get_key()
return self._find_key_index(key_ref) != 0
except:
return False
fn put[T: Keyable](inout self, key: T, value: V) raises -> Bool:
"""Return True when value is inserted and not updated."""
if self.count / self.capacity >= 0.87:
self._rehash()
key.accept(self.keys)
self.keys.end_key()
var key_ref = self.keys.get_last()
var key_hash = hash(key_ref).cast[KeyCountType]()
var modulo_mask = self.capacity - 1
var slot = int(key_hash & modulo_mask)
while True:
var key_index = int(self.slot_to_index.load(slot))
if key_index == 0:
@parameter
if caching_hashes:
self.key_hashes.store(slot, key_hash)
self.values.append(value)
self.count += 1
self.slot_to_index.store(slot, SIMD[KeyCountType, 1](self.keys.count))
return True
@parameter
if caching_hashes:
var other_key_hash = self.key_hashes[slot]
if other_key_hash == key_hash:
var other_key = self.keys[key_index - 1]
if eq(other_key, key_ref):
self.values[key_index - 1] = value # replace value
self.keys.drop_last()
@parameter
if destructive:
if self._is_deleted(key_index - 1):
self.count += 1
self._not_deleted(key_index - 1)
return True
return False
else:
var other_key = self.keys[key_index - 1]
if eq(other_key, key_ref):
self.values[key_index - 1] = value # replace value
self.keys.drop_last()
@parameter
if destructive:
if self._is_deleted(key_index - 1):
self.count += 1
self._not_deleted(key_index - 1)
return True
return False
slot = (slot + 1) & modulo_mask
@always_inline
fn _is_deleted(self, index: Int) -> Bool:
var offset = index >> 3
var bit_index = index & 7
return self.deleted_mask.offset(offset).load() & (1 << bit_index) != 0
@always_inline
fn _deleted(self, index: Int):
var offset = index >> 3
var bit_index = index & 7
var p = self.deleted_mask.offset(offset)
var mask = p.load()
p.store(mask | (1 << bit_index))
@always_inline
fn _not_deleted(self, index: Int):
var offset = index >> 3
var bit_index = index & 7
var p = self.deleted_mask.offset(offset)
var mask = p.load()
p.store(mask & ~(1 << bit_index))
@always_inline
fn _rehash(inout self) raises:
var old_slot_to_index = self.slot_to_index
var old_capacity = self.capacity
self.capacity <<= 1
var mask_capacity = self.capacity >> 3
self.slot_to_index = DTypePointer[KeyCountType].alloc(self.capacity)
memset_zero(self.slot_to_index, self.capacity)
var key_hashes = self.key_hashes
@parameter
if caching_hashes:
key_hashes = DTypePointer[KeyCountType].alloc(self.capacity)
@parameter
if destructive:
var deleted_mask = DTypePointer[DType.uint8].alloc(mask_capacity)
memset_zero(deleted_mask, mask_capacity)
memcpy(deleted_mask, self.deleted_mask, old_capacity >> 3)
self.deleted_mask.free()
self.deleted_mask = deleted_mask
var modulo_mask = self.capacity - 1
for i in range(old_capacity):
if old_slot_to_index[i] == 0:
continue
var key_hash = SIMD[KeyCountType, 1](0)
@parameter
if caching_hashes:
key_hash = self.key_hashes[i]
else:
key_hash = hash(self.keys[int(old_slot_to_index[i] - 1)]).cast[KeyCountType]()
var slot = int(key_hash & modulo_mask)
while True:
var key_index = int(self.slot_to_index.load(slot))
if key_index == 0:
self.slot_to_index.store(slot, old_slot_to_index[i])
break
else:
slot = (slot + 1) & modulo_mask
@parameter
if caching_hashes:
key_hashes[slot] = key_hash
@parameter
if caching_hashes:
self.key_hashes.free()
self.key_hashes = key_hashes
old_slot_to_index.free()
@always_inline
fn get[T: Keyable](inout self, key: T, default: V) raises -> V:
self.key_builder.reset()
key.accept(self.key_builder)
var key_ref = self.key_builder.get_key()
var key_index = self._find_key_index(key_ref)
if key_index == 0:
return default
@parameter
if destructive:
if self._is_deleted(key_index - 1):
return default
return self.values[key_index - 1]
fn delete[T: Keyable](inout self, key: T) raises:
@parameter
if not destructive:
return
self.key_builder.reset()
key.accept(self.key_builder)
var key_ref = self.key_builder.get_key()
var key_index = self._find_key_index(key_ref)
if key_index == 0:
return
if not self._is_deleted(key_index - 1):
self.count -= 1
self._deleted(key_index - 1)
fn clear(inout self):
self.values.clear()
self.keys.clear()
memset_zero(self.slot_to_index, self.capacity)
@parameter
if destructive:
memset_zero(self.deleted_mask, self.capacity >> 3)
self.count = 0
fn _find_key_index(self, key_ref: KeyRef) raises -> Int:
var key_hash = hash(key_ref).cast[KeyCountType]()
var modulo_mask = self.capacity - 1
var slot = int(key_hash & modulo_mask)
while True:
var key_index = int(self.slot_to_index.load(slot))
if key_index == 0:
return key_index
@parameter
if caching_hashes:
var other_key_hash = self.key_hashes[slot]
if key_hash == other_key_hash:
var other_key = self.keys[key_index - 1]
if eq(other_key, key_ref):
return key_index
else:
var other_key = self.keys[key_index - 1]
if eq(other_key, key_ref):
return key_index
slot = (slot + 1) & modulo_mask
fn debug(self) raises:
print("Dict count:", self.count, "and capacity:", self.capacity)
print("KeyMap:")
for i in range(self.capacity):
var end = ", " if i < self.capacity - 1 else "\n"
print(self.slot_to_index.load(i), end=end)
print("Keys:")
self.keys.print_keys()
@parameter
if caching_hashes:
print("KeyHashes:")
for i in range(self.capacity):
var end = ", " if i < self.capacity - 1 else "\n"
if self.slot_to_index.load(i) > 0:
print(self.key_hashes.load(i), end=end)
else:
print(0, end=end)
| compact-dict/generic_dict/dict.mojo | false |
from collections.vector import InlinedFixedVector
trait Keyable:
fn accept[T: KeysBuilder](self, inout keys_builder: T): ...
alias lookup = String("0123456789abcdef")
@value
struct KeyRef(Stringable):
var pointer: DTypePointer[DType.uint8]
var size: Int
fn __str__(self) -> String:
var result = String("(") + str(self.size) + (")")
for i in range(self.size):
result += lookup[int(self.pointer.load(i) >> 4)]
result += lookup[int(self.pointer.load(i) & 0xf)]
return result
trait KeysBuilder:
fn add[T: DType, size: Int](inout self, value: SIMD[T, size]): ...
fn add_buffer[T: DType](inout self, pointer: DTypePointer[T], size: Int): ...
struct KeysContainer[KeyEndType: DType = DType.uint32](Sized, KeysBuilder):
var keys: DTypePointer[DType.uint8]
var allocated_bytes: Int
var keys_end: DTypePointer[KeyEndType]
var count: Int
var capacity: Int
var key_size: Int
fn __init__(inout self, capacity: Int):
constrained[
KeyEndType == DType.uint8 or
KeyEndType == DType.uint16 or
KeyEndType == DType.uint32 or
KeyEndType == DType.uint64,
"KeyEndType needs to be an unsigned integer"
]()
self.allocated_bytes = capacity << 3
self.keys = DTypePointer[DType.uint8].alloc(self.allocated_bytes)
self.keys_end = DTypePointer[KeyEndType].alloc(capacity)
self.count = 0
self.capacity = capacity
self.key_size = 0
fn __copyinit__(inout self, existing: Self):
self.allocated_bytes = existing.allocated_bytes
self.count = existing.count
self.capacity = existing.capacity
self.key_size = existing.key_size
self.keys = DTypePointer[DType.uint8].alloc(self.allocated_bytes)
memcpy(self.keys, existing.keys, self.allocated_bytes)
self.keys_end = DTypePointer[KeyEndType].alloc(self.allocated_bytes)
memcpy(self.keys_end, existing.keys_end, self.capacity)
fn __moveinit__(inout self, owned existing: Self):
self.allocated_bytes = existing.allocated_bytes
self.count = existing.count
self.capacity = existing.capacity
self.key_size = existing.key_size
self.keys = existing.keys
self.keys_end = existing.keys_end
fn __del__(owned self):
self.keys.free()
self.keys_end.free()
@always_inline
fn add[T: DType, size: Int](inout self, value: SIMD[T, size]):
var prev_end = 0 if self.count == 0 else self.keys_end[self.count - 1]
var key_length = size * T.sizeof()
var old_key_size = self.key_size
self.key_size += key_length
var new_end = prev_end + self.key_size
var needs_realocation = False
while new_end > self.allocated_bytes:
self.allocated_bytes += self.allocated_bytes >> 1
needs_realocation = True
if needs_realocation:
var keys = DTypePointer[DType.uint8].alloc(self.allocated_bytes)
memcpy(keys, self.keys, int(prev_end) + old_key_size)
self.keys.free()
self.keys = keys
self.keys.store(prev_end + old_key_size, bitcast[DType.uint8, size * T.sizeof()](value))
@always_inline
fn add_buffer[T: DType](inout self, pointer: DTypePointer[T], size: Int):
var prev_end = 0 if self.count == 0 else self.keys_end[self.count - 1]
var key_length = size * T.sizeof()
var old_key_size = self.key_size
self.key_size += key_length
var new_end = prev_end + self.key_size
var needs_realocation = False
while new_end > self.allocated_bytes:
self.allocated_bytes += self.allocated_bytes >> 1
needs_realocation = True
if needs_realocation:
var keys = DTypePointer[DType.uint8].alloc(self.allocated_bytes)
memcpy(keys, self.keys, int(prev_end) + old_key_size)
self.keys.free()
self.keys = keys
memcpy(self.keys.offset(prev_end + old_key_size), pointer.bitcast[DType.uint8](), key_length)
@always_inline
fn end_key(inout self):
var prev_end = 0 if self.count == 0 else self.keys_end[self.count - 1]
var count = self.count + 1
if count >= self.capacity:
var new_capacity = self.capacity + (self.capacity >> 1)
var keys_end = DTypePointer[KeyEndType].alloc(self.allocated_bytes)
memcpy(keys_end, self.keys_end, self.capacity)
self.keys_end.free()
self.keys_end = keys_end
self.capacity = new_capacity
self.keys_end.store(self.count, prev_end + self.key_size)
self.count = count
self.key_size = 0
@always_inline
fn drop_last(inout self):
self.count -= 1
@always_inline
fn get_last(self) raises -> KeyRef:
return self.get(self.count - 1)
@always_inline
fn get(self, index: Int) raises -> KeyRef:
if index < 0 or index >= self.count:
raise "Invalid index"
var start = 0 if index == 0 else int(self.keys_end[index - 1])
var length = int(self.keys_end[index]) - start
return KeyRef(self.keys.offset(start), length)
@always_inline
fn clear(inout self):
self.count = 0
@always_inline
fn __getitem__(self, index: Int) raises -> KeyRef:
return self.get(index)
@always_inline
fn __len__(self) -> Int:
return self.count
fn print_keys(self) raises:
print("(" + str(self.count) + ")[")
for i in range(self.count):
var end = ", " if i < self.capacity - 1 else "]\n"
print(self[i], end=end)
| compact-dict/generic_dict/keys_container.mojo | false |
<filename>compact-dict/generic_dict/key_eq.mojo
from .keys_container import KeyRef
@always_inline
fn eq(a: KeyRef, b: KeyRef) -> Bool:
var l = a.size
if l != b.size:
return False
var p1 = a.pointer
var p2 = b.pointer
var offset = 0
alias step = 16
while l - offset >= step:
var unequal = p1.load[width=step](offset) != p2.load[width=step](offset)
if unequal.reduce_or():
return False
offset += step
while l - offset > 0:
if p1.load(offset) != p2.load(offset):
return False
offset += 1
return True
| compact-dict/generic_dict/key_eq.mojo | false |
from .ahasher import ahash
from .key_eq import eq
from .keys_container import KeyRef, KeysContainer
from .single_key_builder import SingleKeyBuilder
from .sparse_array import SparseArray
from bit import pop_count, bit_width
@value
struct _ValuesIter[
list_mutability: Bool, //,
T: CollectionElement,
NextKeyCountType: DType,
list_lifetime: AnyLifetime[list_mutability].type,
]:
alias list_type = List[T]
var current_index: Optional[Int]
var next_index: Optional[Int]
var values: Reference[Self.list_type, list_lifetime]
var next_values: Reference[Self.list_type, list_lifetime]
var next_next_values_index: Reference[SparseArray[NextKeyCountType], list_lifetime]
var first: Bool
fn __iter__(self) -> Self:
return self
fn __next__(
inout self,
) -> Reference[T, list_lifetime]:
var element = self.values[].__get_ref(self.current_index.or_else(0)) if self.first else self.next_values[].__get_ref(self.current_index.or_else(0))
self.first = False
self.current_index = self.next_index
var next = self.next_next_values_index[].get(self.current_index.or_else(-1))
self.next_index = Optional(int(next.or_else(-1))) if next else None
return element[]
fn __len__(self) -> Int:
if not self.current_index:
return 0
if not self.next_index:
return 1
return 2
struct MultiDict[
V: CollectionElement,
hash: fn(KeyRef) -> UInt64 = ahash,
KeyCountType: DType = DType.uint32,
NextKeyCountType: DType = DType.uint16,
KeyOffsetType: DType = DType.uint32,
caching_hashes: Bool = True,
](Sized):
var keys: KeysContainer[KeyOffsetType]
var key_hashes: DTypePointer[KeyCountType]
var values: List[V]
var next_values_index: SparseArray[NextKeyCountType]
var next_values: List[V]
var next_next_values_index: SparseArray[NextKeyCountType]
var slot_to_index: DTypePointer[KeyCountType]
var count: Int
var capacity: Int
var key_builder: SingleKeyBuilder
fn __init__(inout self, capacity: Int = 16):
constrained[
KeyCountType == DType.uint8 or
KeyCountType == DType.uint16 or
KeyCountType == DType.uint32 or
KeyCountType == DType.uint64,
"KeyCountType needs to be an unsigned integer"
]()
constrained[
NextKeyCountType == DType.uint8 or
NextKeyCountType == DType.uint16 or
NextKeyCountType == DType.uint32 or
NextKeyCountType == DType.uint64,
"NextKeyCountType needs to be an unsigned integer"
]()
self.count = 0
if capacity <= 8:
self.capacity = 8
else:
var icapacity = Int64(capacity)
self.capacity = capacity if pop_count(icapacity) == 1 else
1 << int(bit_width(icapacity))
self.keys = KeysContainer[KeyOffsetType](capacity)
self.key_builder = SingleKeyBuilder()
@parameter
if caching_hashes:
self.key_hashes = DTypePointer[KeyCountType].alloc(self.capacity)
else:
self.key_hashes = DTypePointer[KeyCountType].alloc(0)
self.values = List[V](capacity=capacity)
self.slot_to_index = DTypePointer[KeyCountType].alloc(self.capacity)
memset_zero(self.slot_to_index, self.capacity)
#TODO: Think about having an optional here or an empty List
self.next_values = List[V]()
self.next_values_index = SparseArray[NextKeyCountType]()
self.next_next_values_index = SparseArray[NextKeyCountType]()
fn __copyinit__(inout self, existing: Self):
self.count = existing.count
self.capacity = existing.capacity
self.keys = existing.keys
self.key_builder = self.key_builder
@parameter
if caching_hashes:
self.key_hashes = DTypePointer[KeyCountType].alloc(self.capacity)
memcpy(self.key_hashes, existing.key_hashes, self.capacity)
else:
self.key_hashes = DTypePointer[KeyCountType].alloc(0)
self.values = existing.values
self.slot_to_index = DTypePointer[KeyCountType].alloc(self.capacity)
memcpy(self.slot_to_index, existing.slot_to_index, self.capacity)
self.next_values = existing.next_values
self.next_values_index = existing.next_values_index
self.next_next_values_index = existing.next_next_values_index
fn __moveinit__(inout self, owned existing: Self):
self.count = existing.count
self.capacity = existing.capacity
self.keys = existing.keys^
self.key_builder = existing.key_builder^
self.key_hashes = existing.key_hashes
self.values = existing.values^
self.slot_to_index = existing.slot_to_index
self.next_values = existing.next_values^
self.next_values_index = existing.next_values_index^
self.next_next_values_index = existing.next_next_values_index^
fn __del__(owned self):
self.slot_to_index.free()
self.key_hashes.free()
fn __len__(self) -> Int:
return self.count
fn put[T: Keyable](inout self, key: T, value: V) raises:
if self.count / self.capacity >= 0.87:
self._rehash()
key.accept(self.keys)
self.keys.end_key()
var key_ref = self.keys.get_last()
var key_hash = hash(key_ref).cast[KeyCountType]()
var modulo_mask = self.capacity - 1
var slot = int(key_hash & modulo_mask)
while True:
var key_index = int(self.slot_to_index.load(slot))
if key_index == 0:
@parameter
if caching_hashes:
self.key_hashes.store(slot, key_hash)
self.values.append(value)
self.count += 1
self.slot_to_index.store(slot, SIMD[KeyCountType, 1](self.keys.count))
return
@parameter
if caching_hashes:
var other_key_hash = self.key_hashes[slot]
if other_key_hash == key_hash:
var other_key = self.keys[key_index - 1]
if eq(other_key, key_ref):
self._add_next(value, key_index)
return
else:
var other_key = self.keys[key_index - 1]
if eq(other_key, key_ref):
self._add_next(value, key_index)
return
slot = (slot + 1) & modulo_mask
@always_inline
fn _add_next(inout self, value: V, key_index: Int):
self.next_values.append(value)
var next_index = self.next_values_index.get(key_index - 1)
if not next_index:
self.next_values_index[key_index - 1] = len(self.next_values) - 1
else:
var index = int(next_index.value())
var next_next_index = self.next_next_values_index.get(index)
while next_next_index:
index = int(next_next_index.value())
next_next_index = self.next_next_values_index.get(index)
self.next_next_values_index[index] = len(self.next_values) - 1
self.keys.drop_last()
@always_inline
fn _rehash(inout self) raises:
var old_slot_to_index = self.slot_to_index
var old_capacity = self.capacity
self.capacity <<= 1
self.slot_to_index = DTypePointer[KeyCountType].alloc(self.capacity)
memset_zero(self.slot_to_index, self.capacity)
var key_hashes = self.key_hashes
@parameter
if caching_hashes:
key_hashes = DTypePointer[KeyCountType].alloc(self.capacity)
var modulo_mask = self.capacity - 1
for i in range(old_capacity):
if old_slot_to_index[i] == 0:
continue
var key_hash = SIMD[KeyCountType, 1](0)
@parameter
if caching_hashes:
key_hash = self.key_hashes[i]
else:
key_hash = hash(self.keys[int(old_slot_to_index[i] - 1)]).cast[KeyCountType]()
var slot = int(key_hash & modulo_mask)
while True:
var key_index = int(self.slot_to_index.load(slot))
if key_index == 0:
self.slot_to_index.store(slot, old_slot_to_index[i])
break
else:
slot = (slot + 1) & modulo_mask
@parameter
if caching_hashes:
key_hashes[slot] = key_hash
@parameter
if caching_hashes:
self.key_hashes.free()
self.key_hashes = key_hashes
old_slot_to_index.free()
@always_inline
fn get[T: Keyable](inout self, key: T) raises -> List[V]:
var result = List[V]()
self.key_builder.reset()
key.accept(self.key_builder)
var key_ref = self.key_builder.get_key()
var key_index = self._find_key_index(key_ref)
if key_index == 0:
return result
result.append(self.values[key_index - 1])
var next_index = self.next_values_index.get(key_index - 1)
if not next_index:
return result
var index = int(next_index.value())
result.append(self.next_values[index])
var next_next_index = self.next_next_values_index.get(index)
while next_next_index:
index = int(next_next_index.value())
result.append(self.next_values[index])
next_next_index = self.next_next_values_index.get(index)
return result
fn get_itter[T: Keyable](inout self, key: T) raises -> _ValuesIter[V, NextKeyCountType, __lifetime_of(self)]:
self.key_builder.reset()
key.accept(self.key_builder)
var key_ref = self.key_builder.get_key()
var key_index = self._find_key_index(key_ref)
if key_index == 0:
return _ValuesIter(None, None, self.values, self.next_values, self.next_next_values_index, True)
var next_index = self.next_values_index.get(key_index - 1)
if not next_index:
return _ValuesIter(Optional(key_index - 1), None, self.values, self.next_values, self.next_next_values_index, True)
return _ValuesIter(Optional(key_index - 1), Optional(int(next_index.value())), self.values, self.next_values, self.next_next_values_index, True)
fn _find_key_index(self, key_ref: KeyRef) raises -> Int:
var key_hash = hash(key_ref).cast[KeyCountType]()
var modulo_mask = self.capacity - 1
var slot = int(key_hash & modulo_mask)
while True:
var key_index = int(self.slot_to_index.load(slot))
if key_index == 0:
return key_index
@parameter
if caching_hashes:
var other_key_hash = self.key_hashes[slot]
if key_hash == other_key_hash:
var other_key = self.keys[key_index - 1]
if eq(other_key, key_ref):
return key_index
else:
var other_key = self.keys[key_index - 1]
if eq(other_key, key_ref):
return key_index
slot = (slot + 1) & modulo_mask
fn debug(self) raises:
print("Dict count:", self.count, "and capacity:", self.capacity)
print("KeyMap:")
for i in range(self.capacity):
var end = ", " if i < self.capacity - 1 else ""
print(self.slot_to_index.load(i), end=end)
print("Keys:")
self.keys.print_keys()
@parameter
if caching_hashes:
print("KeyHashes:")
for i in range(self.capacity):
var end = ", " if i < self.capacity - 1 else ""
if self.slot_to_index.load(i) > 0:
print(self.key_hashes.load(i), end=end)
else:
print(0, end=end)
print("Next Values:")
self.next_values_index.debug()
print("Next Next Values:")
self.next_next_values_index.debug() | compact-dict/generic_dict/multi_dict.mojo | false |
<filename>compact-dict/generic_dict/single_key_builder.mojo
from .keys_container import KeysBuilder, KeyRef
struct SingleKeyBuilder(KeysBuilder):
var key: DTypePointer[DType.uint8]
var allocated_bytes: Int
var key_size: Int
fn __init__(inout self, bytes: Int = 64):
self.allocated_bytes = bytes
self.key = DTypePointer[DType.uint8].alloc(self.allocated_bytes)
self.key_size = 0
fn __copyinit__(inout self, existing: Self):
self.allocated_bytes = existing.allocated_bytes
self.key = DTypePointer[DType.uint8].alloc(self.allocated_bytes)
memcpy(self.key, existing.key, self.allocated_bytes)
self.key_size = existing.key_size
fn __moveinit__(inout self, owned existing: Self):
self.allocated_bytes = existing.allocated_bytes
self.key = existing.key
self.key_size = existing.key_size
fn __del__(owned self):
self.key.free()
@always_inline
fn add[T: DType, size: Int](inout self, value: SIMD[T, size]):
var key_length = size * T.sizeof()
var old_key_size = self.key_size
self.key_size += key_length
var needs_realocation = False
while self.key_size > self.allocated_bytes:
self.allocated_bytes += self.allocated_bytes >> 1
needs_realocation = True
if needs_realocation:
var key = DTypePointer[DType.uint8].alloc(self.allocated_bytes)
memcpy(key, self.key, old_key_size)
self.key.free()
self.key = key
self.key.store(old_key_size, bitcast[DType.uint8, size * T.sizeof()](value))
@always_inline
fn add_buffer[T: DType](inout self, pointer: DTypePointer[T], size: Int):
var key_length = size * T.sizeof()
var old_key_size = self.key_size
self.key_size += key_length
var needs_realocation = False
while self.key_size > self.allocated_bytes:
self.allocated_bytes += self.allocated_bytes >> 1
needs_realocation = True
if needs_realocation:
var key = DTypePointer[DType.uint8].alloc(self.allocated_bytes)
memcpy(key, self.key, old_key_size)
self.key.free()
self.key = key
memcpy(self.key.offset(old_key_size), pointer.bitcast[DType.uint8](), key_length)
@always_inline
fn get_key(self) -> KeyRef:
return KeyRef(self.key, self.key_size)
@always_inline
fn reset(inout self):
self.key_size = 0 | compact-dict/generic_dict/single_key_builder.mojo | false |
<filename>compact-dict/generic_dict/sparse_array.mojo
from collections import Optional
from bit import pop_count
from tensor import Tensor, TensorSpec
struct SparseArray[T: DType]:
var mask: DTypePointer[DType.uint8]
var values: DTypePointer[T]
var mask_size: Int
var values_count: Int
var values_capacity: Int
fn __init__(inout self, capacity: Int = 8):
var _capacity = capacity if capacity >= 8 else 8
self.mask_size = -(-_capacity >> 3)
self.mask = DTypePointer[DType.uint8].alloc(self.mask_size)
memset_zero(self.mask, self.mask_size)
self.values_capacity = 4
self.values_count = 0
self.values = DTypePointer[T].alloc(self.values_capacity)
fn __copyinit__(inout self, existing: Self):
self.mask_size = existing.mask_size
self.values_count = existing.values_count
self.values_capacity = existing.values_capacity
self.mask = DTypePointer[DType.uint8].alloc(self.mask_size)
memcpy(self.mask, existing.mask, self.mask_size)
self.values = DTypePointer[T].alloc(self.values_capacity)
memcpy(self.values, existing.values, self.values_count)
fn __moveinit__(inout self, owned existing: Self):
self.mask_size = existing.mask_size
self.values_count = existing.values_count
self.values_capacity = existing.values_capacity
self.mask = existing.mask
self.values = existing.values
fn __del__(owned self):
self.mask.free()
self.values.free()
@always_inline
fn __contains__(self, index: Int) -> Bool:
var offset = index >> 3
var bit_index = index & 7
return self.contains(offset, bit_index)
@always_inline
fn contains(self, offset: Int, bit_index: Int) -> Bool:
return offset < self.mask_size and self.mask.load(offset) & (1 << bit_index) != 0
fn __setitem__(inout self, index: Int, value: SIMD[T, 1]):
var offset = index >> 3
var bit_index = index & 7
if self.mask_size <= offset:
var mask = DTypePointer[DType.uint8].alloc(offset + 1)
memcpy(mask, self.mask, self.mask_size)
memset_zero(mask.offset(self.mask_size), offset + 1 - self.mask_size)
self.mask.free()
self.mask = mask
self.mask_size = offset + 1
var p = self.mask.offset(offset)
var mask = p.load()
if self.contains(offset, bit_index):
self.values.store(self._value_index(offset, bit_index), value)
return
p.store(mask | (1 << bit_index))
if self.values_capacity <= self.values_count + 1:
var values_capacity = self.values_capacity + (self.values_capacity >> 1)
var values = DTypePointer[T].alloc(values_capacity)
memcpy(values, self.values, self.values_count)
self.values.free()
self.values = values
self.values_capacity = values_capacity
var value_index = self._value_index(offset, bit_index)
for i in range(self.values_count, value_index, -1):
self.values.store(i, self.values.load(i-1))
self.values.store(value_index, value)
self.values_count += 1
fn get(self, index: Int) -> Optional[SIMD[T, 1]]:
var offset = index >> 3
var bit_index = index & 7
if not self.contains(offset, bit_index):
return None
return self.values.load(self._value_index(offset, bit_index))
@always_inline
fn _value_index(self, offset: Int, bit_index: Int) -> Int:
if not self.contains(offset, bit_index):
return -1
alias width = 32
var cursor = 0
var result = 0
while cursor + width < offset:
var v = self.mask.load[width=width](cursor)
result += int(pop_count(v).cast[DType.int16]().reduce_add[1]())
cursor += width
while cursor <= offset:
var v = self.mask.load(cursor)
result += int(pop_count(v))
cursor += 1
result -= int(pop_count(self.mask.load(offset) >> (bit_index + 1)))
return result - 1
fn dense_values_list(self) -> List[Scalar[T]]:
var data = UnsafePointer[Scalar[T]].alloc(self.values_count)
memcpy(data, self.values, self.values_count)
return List[Scalar[T]](unsafe_pointer=data, size=self.values_count, capacity=self.values_count)
fn debug(self):
print("(" + str(self.mask_size) + ")[")
for i in range(self.mask_size):
var end = ", " if i < self.mask_size - 1 else ""
print(self.mask.load(i), end=end)
print("]")
print("(" + str(self.values_count) + ")[")
for i in range(self.values_count):
var end = ", " if i < self.mask_size - 1 else ""
print(self.values.load(i), end=end)
print("]")
| compact-dict/generic_dict/sparse_array.mojo | false |
<filename>compact-dict/generic_dict/__init__.mojo
from .dict import Dict
from .keys_container import Keyable, KeysBuilder
from .multi_dict import MultiDict
from .sparse_array import SparseArray | compact-dict/generic_dict/__init__.mojo | false |