file_path
stringlengths
7
180
content
stringlengths
0
811k
repo
stringclasses
11 values
nodegen/node/split_to_sequence.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Split_to_sequence(RunAll): @staticmethod def split_to_sequence_u32(): def split_to_sequence_1D(): x = np.random.randint(0, 255, 6).astype(np.uint32) y = [ np.array(x[0:2]).astype(np.uint32), np.array(x[2:4]).astype(np.uint32), np.array(x[4:6]).astype(np.uint32), ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), Tensor(Dtype.U32, y[2].shape, y[2].flatten()), ] name = "split_to_sequence_u32_1d_equal_parts" make_test( [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![3].span(),)))", name) y = [ np.array(x[0:2]).astype(np.uint32), np.array(x[2:6]).astype(np.uint32), ] _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), ] name = "split_to_sequence_u32_1d_variable_parts" make_test( [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name) def split_to_sequence_2D(): x = np.random.randint(0, 255, (2, 6)).astype(np.uint32) y = [ np.array(x[0:2, 0:3]).astype(np.uint32), np.array(x[0:2, 3:6]).astype(np.uint32), ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), ] name = "split_to_sequence_u32_2d_equal_parts" make_test( [_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![2].span(),)))", name) y = [ np.array(x[0:2, 0:2]).astype(np.uint32), np.array(x[0:2, 2:6]).astype(np.uint32) ] _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), ] name = "split_to_sequence_u32_2d_variable_parts" make_test( [_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name) def split_to_sequence_zero_size(): # 1-dimensional tensor with dimension_size=0 x = np.array([]).astype(np.uint32) y = [ np.array([]).astype(np.uint32), np.array([]).astype(np.uint32), np.array([]).astype(np.uint32), ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), Tensor(Dtype.U32, y[2].shape, y[2].flatten()), ] # Split emtpy tensor to tensors of size zero name = "split_to_sequence_u32_zero_size" make_test( [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 0, 0].span(),)))", name) def split_to_sequence_1d_uneven(): x = np.random.randint(0, 255, 7).astype(np.uint32) y = [ np.array(x[0:2]).astype(np.uint32), np.array(x[2:4]).astype(np.uint32), np.array(x[4:6]).astype(np.uint32), np.array(x[6:7]).astype(np.uint32), ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), Tensor(Dtype.U32, y[2].shape, y[2].flatten()), Tensor(Dtype.U32, y[3].shape, y[3].flatten()), ] name = "split_to_sequence_u32_1d_uneven" make_test( [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![4].span(),)))", name) def split_to_sequence_2d_uneven(): x = np.random.randint(0, 255, (2, 8)).astype(np.uint32) y = [ np.array(x[0:2, 0:3]).astype(np.uint32), np.array(x[0:2, 3:6]).astype(np.uint32), np.array(x[0:2, 6:8]).astype(np.uint32) ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), Tensor(Dtype.U32, y[2].shape, y[2].flatten()), ] name = "split_to_sequence_u32_2d_uneven" make_test( [_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![3].span(),)))", name) def split_to_sequence_2d_scalar(): x = np.random.randint(0, 255, (2, 8)).astype(np.uint32) y = [ np.array(x[0:2, 0:1]).astype(np.uint32), np.array(x[0:2, 1:2]).astype(np.uint32), np.array(x[0:2, 2:3]).astype(np.uint32), np.array(x[0:2, 3:4]).astype(np.uint32), np.array(x[0:2, 4:5]).astype(np.uint32), np.array(x[0:2, 5:6]).astype(np.uint32), np.array(x[0:2, 6:7]).astype(np.uint32), np.array(x[0:2, 7:8]).astype(np.uint32) ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), Tensor(Dtype.U32, y[2].shape, y[2].flatten()), Tensor(Dtype.U32, y[3].shape, y[3].flatten()), Tensor(Dtype.U32, y[4].shape, y[4].flatten()), Tensor(Dtype.U32, y[5].shape, y[5].flatten()), Tensor(Dtype.U32, y[6].shape, y[6].flatten()), Tensor(Dtype.U32, y[7].shape, y[7].flatten()), ] name = "split_to_sequence_2d_scalar" make_test( [_x], _y, "input_0.split_to_sequence(1, 1, Option::None(()))", name) def split_to_sequence_2d_nokeepdims(): x = np.random.randint(0, 255, (2, 8)).astype(np.uint32) y = [ np.array(x[0:2, 0:1]).astype(np.uint32), np.array(x[0:2, 1:2]).astype(np.uint32), np.array(x[0:2, 2:3]).astype(np.uint32), np.array(x[0:2, 3:4]).astype(np.uint32), np.array(x[0:2, 4:5]).astype(np.uint32), np.array(x[0:2, 5:6]).astype(np.uint32), np.array(x[0:2, 6:7]).astype(np.uint32), np.array(x[0:2, 7:8]).astype(np.uint32) ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), Tensor(Dtype.U32, y[2].shape, y[2].flatten()), Tensor(Dtype.U32, y[3].shape, y[3].flatten()), Tensor(Dtype.U32, y[4].shape, y[4].flatten()), Tensor(Dtype.U32, y[5].shape, y[5].flatten()), Tensor(Dtype.U32, y[6].shape, y[6].flatten()), Tensor(Dtype.U32, y[7].shape, y[7].flatten()), ] name = "split_to_sequence_2d_nokeepdims" make_test( [_x], _y, "input_0.split_to_sequence(1, 0, Option::None(()))", name) def split_to_sequence_1d_nokeepdims(): x = np.random.randint(0, 255, 8).astype(np.uint32) y = [ np.array(x[0:1]).astype(np.uint32), np.array(x[1:2]).astype(np.uint32), np.array(x[2:3]).astype(np.uint32), np.array(x[3:4]).astype(np.uint32), np.array(x[4:5]).astype(np.uint32), np.array(x[5:6]).astype(np.uint32), np.array(x[6:7]).astype(np.uint32), np.array(x[7:8]).astype(np.uint32) ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), Tensor(Dtype.U32, y[2].shape, y[2].flatten()), Tensor(Dtype.U32, y[3].shape, y[3].flatten()), Tensor(Dtype.U32, y[4].shape, y[4].flatten()), Tensor(Dtype.U32, y[5].shape, y[5].flatten()), Tensor(Dtype.U32, y[6].shape, y[6].flatten()), Tensor(Dtype.U32, y[7].shape, y[7].flatten()), ] name = "split_to_sequence_1d_nokeepdims" make_test( [_x], _y, "input_0.split_to_sequence(0, 0, Option::None(()))", name) split_to_sequence_1D() split_to_sequence_2D() split_to_sequence_zero_size() split_to_sequence_1d_uneven() split_to_sequence_2d_uneven() split_to_sequence_2d_scalar() split_to_sequence_1d_nokeepdims() split_to_sequence_2d_nokeepdims() @staticmethod def split_to_sequence_fp16x16(): def split_to_sequence_1D(): x = to_fp(np.random.randint(-127, 127, 6 ).astype(np.int64), FixedImpl.FP16x16) y = [ np.array(x[0:2]).astype(np.int64), np.array(x[2:4]).astype(np.int64), np.array(x[4:6]).astype(np.int64), ] _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()), ] name = "split_to_sequence_fp16x16_1d_equal_parts" make_test( [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![3].span(),)))", name) y = [ np.array(x[0:2]).astype(np.int64), np.array(x[2:6]).astype(np.int64), ] _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), ] name = "split_to_sequence_fp16x16_1d_variable_parts" make_test( [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name) def split_to_sequence_2D(): x = to_fp(np.random.randint(-127, 127, (2, 6) ).astype(np.int64), FixedImpl.FP16x16) y = [ np.array(x[0:2, 0:3]).astype(np.int64), np.array(x[0:2, 3:6]).astype(np.int64), ] _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), ] name = "split_to_sequence_fp16x16_2d_equal_parts" make_test( [_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![2].span(),)))", name) y = [ np.array(x[0:2, 0:2]).astype(np.int64), np.array(x[0:2, 2:6]).astype(np.int64) ] _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), ] name = "split_to_sequence_fp16x16_2d_variable_parts" make_test( [_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name) def split_to_sequence_zero_size(): # 1-dimensional tensor with dimension_size=0 x = to_fp(np.array([]).astype(np.int64 ).astype(np.int64), FixedImpl.FP16x16) y = [ np.array([]).astype(np.int64), np.array([]).astype(np.int64), np.array([]).astype(np.int64), ] _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()), ] # Split emtpy tensor to tensors of size zero name = "split_to_sequence_fp16x16_zero_size" make_test( [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 0, 0].span(),)))", name) def split_to_sequence_1d_uneven(): x = to_fp(np.random.randint(-127, 127, 7 ).astype(np.int64), FixedImpl.FP16x16) y = [ np.array(x[0:2]).astype(np.int64), np.array(x[2:4]).astype(np.int64), np.array(x[4:6]).astype(np.int64), np.array(x[6:7]).astype(np.int64), ] _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()), Tensor(Dtype.FP16x16, y[3].shape, y[3].flatten()), ] name = "split_to_sequence_fp16x16_1d_uneven" make_test( [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![4].span())))", name) def split_to_sequence_2d_uneven(): x = to_fp(np.random.randint(-127, 127, (2, 8) ).astype(np.int64), FixedImpl.FP16x16) y = [ np.array(x[0:2, 0:3]).astype(np.int64), np.array(x[0:2, 3:6]).astype(np.int64), np.array(x[0:2, 6:8]).astype(np.int64) ] _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()), ] name = "split_to_sequence_fp16x16_2d_uneven" make_test( [_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![3].span(),)))", name) split_to_sequence_1D() split_to_sequence_2D() split_to_sequence_zero_size() split_to_sequence_1d_uneven() split_to_sequence_2d_uneven()
https://github.com/gizatechxyz/orion
nodegen/node/sqrt.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Sqrt(RunAll): @staticmethod def sqrt_fp8x23(): x = np.random.uniform(0, 6, (2, 2)).astype(np.float64) y = np.sqrt(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "sqrt_fp8x23" make_test([x], y, "input_0.sqrt()", name) @staticmethod def sqrt_fp16x16(): x = np.random.uniform(0, 6, (2, 2)).astype(np.float64) y = np.sqrt(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "sqrt_fp16x16" make_test([x], y, "input_0.sqrt()", name)
https://github.com/gizatechxyz/orion
nodegen/node/squeeze.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Squeeze(RunAll): @staticmethod def squeeze_i8(): def squeeze(): x = np.ones((1, 2, 1, 2, 1), dtype=np.int8) y = np.ones((2, 2, 1), dtype=np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "squeeze_i8" make_test( [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() @staticmethod def squeeze(): def squeeze(): x = np.ones((1, 2, 1, 2, 1), dtype=np.int32) y = np.ones((2, 2, 1), dtype=np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "squeeze" make_test( [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() @staticmethod def squeeze_u32(): def squeeze(): x = np.ones((1, 2, 1, 2, 1), dtype=np.uint32) y = np.ones((2, 2, 1), dtype=np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "squeeze_u32" make_test( [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() @staticmethod def squeeze_fP16x16(): def squeeze(): x = to_fp(np.random.randint(0, 255, (1, 2, 1, 2, 1) ).astype(np.int64), FixedImpl.FP16x16) y = to_fp(np.random.randint(0, 255, (2, 2, 1) ).astype(np.int64), FixedImpl.FP16x16) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "squeeze_fP16x16" make_test( [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() @staticmethod def squeeze_fP8x23(): def squeeze(): x = to_fp(np.random.randint(0, 255, (1, 2, 1, 2, 1) ).astype(np.int64), FixedImpl.FP8x23) y = to_fp(np.random.randint(0, 255, (2, 2, 1) ).astype(np.int64), FixedImpl.FP8x23) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "squeeze_fP8x23" make_test( [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze()
https://github.com/gizatechxyz/orion
nodegen/node/sub.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Sub(RunAll): @staticmethod def sub_u32(): def default(): x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 3, (3, 3, 3)).astype(np.uint32) z = x - y x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "sub_u32" make_test([x, y], z, "input_0 - input_1", name) def broadcast(): x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 3, (1, 3, 1)).astype(np.uint32) z = x - y x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "sub_u32_broadcast" make_test([x, y], z, "input_0 - input_1", name) default() broadcast() @staticmethod def sub_i32(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) z = x - y x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "sub_i32" make_test([x, y], z, "input_0 - input_1", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int32) z = x - y x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "sub_i32_broadcast" make_test([x, y], z, "input_0 - input_1", name) default() broadcast() @staticmethod def sub_i8(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) z = x - y x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "sub_i8" make_test([x, y], z, "input_0 - input_1", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int8) z = x - y x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "sub_i8_broadcast" make_test([x, y], z, "input_0 - input_1", name) default() broadcast() @staticmethod def sub_fp8x23(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = x - y x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "sub_fp8x23" make_test([x, y], z, "input_0 - input_1", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64) z = x - y x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "sub_fp8x23_broadcast" make_test([x, y], z, "input_0 - input_1", name) default() broadcast() @staticmethod def sub_fp16x16(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = x - y x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "sub_fp16x16" make_test([x, y], z, "input_0 - input_1", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64) z = x - y x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "sub_fp16x16_broadcast" make_test([x, y], z, "input_0 - input_1", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/tanh.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Tanh(RunAll): @staticmethod def tanh_fp8x23(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = np.tanh(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "tanh_fp8x23" make_test([x], y, "input_0.tanh()", name) @staticmethod def tanh_fp16x16(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = np.tanh(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "tanh_fp16x16" make_test([x], y, "input_0.tanh()", name)
https://github.com/gizatechxyz/orion
nodegen/node/thresholded_relu.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait class Thresholded_relu(RunAll): @staticmethod def thresholded_relu_fp8x23(): alpha = 1.0 x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64) y = np.clip(x, alpha, np.inf) y[y == alpha] = 0 x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "thresholded_relu_fp8x23" make_test([x], y, "NNTrait::thresholded_relu(@input_0, @FixedTrait::new(256, false))", name, Trait.NN) @staticmethod def thresholded_relu_fp16x16(): alpha = 1.0 x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64) y = np.clip(x, alpha, np.inf) y[y == alpha] = 0 x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "thresholded_relu_fp16x16" make_test([x], y, "NNTrait::thresholded_relu(@input_0, @FixedTrait::new(65536, false))", name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/transpose.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Transpose(RunAll): @staticmethod def transpose_u32(): def transpose_2D(): x = np.random.randint(0, 255, (2, 2)).astype(np.uint32) y = np.transpose(x, [1, 0]) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "transpose_u32_2d" make_test( [x], y, "input_0.transpose(array![1, 0].span())", name) def transpose_3D(): x = np.random.randint(0, 255, (2, 2, 2)).astype(np.uint32) y = np.transpose(x, [1, 2, 0]) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "transpose_u32_3d" make_test( [x], y, "input_0.transpose(array![1, 2, 0].span())", name) transpose_2D() transpose_3D() @staticmethod def transpose_i32(): def transpose_2D(): x = np.random.randint(-127, 127, (2, 2)).astype(np.int32) y = np.transpose(x, [1, 0]) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "transpose_i32_2d" make_test( [x], y, "input_0.transpose(array![1, 0].span())", name) def transpose_3D(): x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int32) y = np.transpose(x, [1, 2, 0]) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "transpose_i32_3d" make_test( [x], y, "input_0.transpose(array![1, 2, 0].span())", name) transpose_2D() transpose_3D() @staticmethod def transpose_i8(): def transpose_2D(): x = np.random.randint(-127, 127, (2, 2)).astype(np.int8) y = np.transpose(x, [1, 0]) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "transpose_i8_2d" make_test( [x], y, "input_0.transpose(array![1, 0].span())", name) def transpose_3D(): x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int8) y = np.transpose(x, [1, 2, 0]) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "transpose_i8_3d" make_test( [x], y, "input_0.transpose(array![1, 2, 0].span())", name) transpose_2D() transpose_3D() @staticmethod def transpose_fp8x23(): def transpose_2D(): x = to_fp(np.random.randint(-127, 127, (2, 2) ).astype(np.int64), FixedImpl.FP8x23) y = np.transpose(x, [1, 0]) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "transpose_fp8x23_2d" make_test( [x], y, "input_0.transpose(array![1, 0].span())", name) def transpose_3D(): x = to_fp(np.random.randint(-127, 127, (2, 2, 2) ).astype(np.int64), FixedImpl.FP8x23) y = np.transpose(x, [1, 2, 0]) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "transpose_fp8x23_3d" make_test( [x], y, "input_0.transpose(array![1, 2, 0].span())", name) transpose_2D() transpose_3D() @staticmethod def transpose_fp16x16(): def transpose_2D(): x = to_fp(np.random.randint(-127, 127, (2, 2) ).astype(np.int64), FixedImpl.FP16x16) y = np.transpose(x, [1, 0]) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "transpose_fp16x16_2d" make_test( [x], y, "input_0.transpose(array![1, 0].span())", name) def transpose_3D(): x = to_fp(np.random.randint(-127, 127, (2, 2, 2) ).astype(np.int64), FixedImpl.FP16x16) y = np.transpose(x, [1, 2, 0]) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "transpose_fp16x16_3d" make_test( [x], y, "input_0.transpose(array![1, 2, 0].span())", name) transpose_2D() transpose_3D()
https://github.com/gizatechxyz/orion
nodegen/node/trilu.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Trilu(RunAll): @staticmethod def trilu_u32(): def tril(): x = np.random.randint(0, 255, (4, 5)).astype(np.uint32) y = np.tril(x) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "tril_u32" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_neg(): x = np.random.randint(0, 255, (4, 5)).astype(np.uint32) y = np.tril(x, k=-1) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "tril_u32_neg" make_test( [x], y, "input_0.trilu(false, -1)", name) def tril_one_row(): x = np.random.randint(0, 255, (3, 1, 5)).astype(np.uint32) y = np.tril(x) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "tril_u32_one_row" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_out_neg(): x = np.random.randint(0, 255, (4, 5)).astype(np.uint32) y = np.tril(x, k=-7) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "tril_u32_out_neg" make_test( [x], y, "input_0.trilu(false, -7)", name) def tril_out_pos(): x = np.random.randint(0, 255, (4, 5)).astype(np.uint32) y = np.tril(x, k=6) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "tril_u32_out_pos" make_test( [x], y, "input_0.trilu(false, 6)", name) def tril_pos(): x = np.random.randint(0, 255, (4, 5)).astype(np.uint32) y = np.tril(x, k=2) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "tril_u32_pos" make_test( [x], y, "input_0.trilu(false, 2)", name) def tril_square(): x = np.random.randint(0, 255, (2, 3, 3)).astype(np.uint32) y = np.tril(x, k=0) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "tril_u32_square" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_square_neg(): x = np.random.randint(0, 255, (2, 3, 3)).astype(np.uint32) y = np.tril(x, k=-1) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "tril_u32_square_neg" make_test( [x], y, "input_0.trilu(false, -1)", name) def tril_zero(): x = np.random.randint(0, 255, (3, 0, 5)).astype(np.uint32) y = np.tril(x, k=6) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "tril_u32_zero" make_test( [x], y, "input_0.trilu(false, 6)", name) def triu(): x = np.random.randint(0, 255, (4, 5)).astype(np.uint32) y = np.triu(x) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "triu_u32" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_neg(): x = np.random.randint(0, 255, (4, 5)).astype(np.uint32) y = np.triu(x, k=-1) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "triu_u32_neg" make_test( [x], y, "input_0.trilu(true, -1)", name) def triu_one_row(): x = np.random.randint(0, 255, (3, 1, 5)).astype(np.uint32) y = np.triu(x) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "triu_u32_one_row" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_out_neg(): x = np.random.randint(0, 255, (4, 5)).astype(np.uint32) y = np.triu(x, k=-7) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "triu_u32_out_neg" make_test( [x], y, "input_0.trilu(true, -7)", name) def triu_out_pos(): x = np.random.randint(0, 255, (4, 5)).astype(np.uint32) y = np.triu(x, k=6) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "triu_u32_out_pos" make_test( [x], y, "input_0.trilu(true, 6)", name) def triu_pos(): x = np.random.randint(0, 255, (4, 5)).astype(np.uint32) y = np.triu(x, k=2) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "triu_u32_pos" make_test( [x], y, "input_0.trilu(true, 2)", name) def triu_square(): x = np.random.randint(0, 255, (2, 3, 3)).astype(np.uint32) y = np.triu(x, k=0) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "triu_u32_square" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_square_neg(): x = np.random.randint(0, 255, (2, 3, 3)).astype(np.uint32) y = np.triu(x, k=-1) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "triu_u32_square_neg" make_test( [x], y, "input_0.trilu(true, -1)", name) def triu_zero(): x = np.random.randint(0, 255, (3, 0, 5)).astype(np.uint32) y = np.triu(x, k=6) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "triu_u32_zero" make_test( [x], y, "input_0.trilu(true, 6)", name) tril() tril_neg() tril_one_row() tril_out_neg() tril_out_pos() tril_pos() tril_square() tril_square_neg() tril_zero() triu() triu_neg() triu_one_row() triu_out_neg() triu_out_pos() triu_pos() triu_square() triu_square_neg() triu_zero() @staticmethod def trilu_i32(): def tril(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int32) y = np.tril(x) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "tril_i32" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_neg(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int32) y = np.tril(x, k=-1) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "tril_neg_i32" make_test( [x], y, "input_0.trilu(false, -1)", name) def tril_one_row(): x = np.random.randint(-127, 127, (3, 1, 5)).astype(np.int32) y = np.tril(x) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "tril_i32_one_row" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_out_neg(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int32) y = np.tril(x, k=-7) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "tril_i32_out_neg" make_test( [x], y, "input_0.trilu(false, -7)", name) def tril_out_pos(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int32) y = np.tril(x, k=6) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "tril_i32_out_pos" make_test( [x], y, "input_0.trilu(false, 6)", name) def tril_pos(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int32) y = np.tril(x, k=2) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "tril_i32_pos" make_test( [x], y, "input_0.trilu(false, 2)", name) def tril_square(): x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int32) y = np.tril(x, k=0) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "tril_i32_square" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_square_neg(): x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int32) y = np.tril(x, k=-1) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "tril_i32_square_neg" make_test( [x], y, "input_0.trilu(false, -1)", name) def tril_zero(): x = np.random.randint(-127, 127, (3, 0, 5)).astype(np.int32) y = np.tril(x, k=6) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "tril_i32_zero" make_test( [x], y, "input_0.trilu(false, 6)", name) def triu(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int32) y = np.triu(x) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "triu_i32" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_neg(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int32) y = np.triu(x, k=-1) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "triu_i32_neg" make_test( [x], y, "input_0.trilu(true, -1)", name) def triu_one_row(): x = np.random.randint(-127, 127, (3, 1, 5)).astype(np.int32) y = np.triu(x) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "triu_i32_one_row" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_out_neg(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int32) y = np.triu(x, k=-7) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "triu_i32_out_neg" make_test( [x], y, "input_0.trilu(true, -7)", name) def triu_out_pos(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int32) y = np.triu(x, k=6) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "triu_i32_out_pos" make_test( [x], y, "input_0.trilu(true, 6)", name) def triu_pos(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int32) y = np.triu(x, k=2) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "triu_i32_pos" make_test( [x], y, "input_0.trilu(true, 2)", name) def triu_square(): x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int32) y = np.triu(x, k=0) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "triu_i32_square" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_square_neg(): x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int32) y = np.triu(x, k=-1) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "triu_i32_square_neg" make_test( [x], y, "input_0.trilu(true, -1)", name) def triu_zero(): x = np.random.randint(-127, 127, (3, 0, 5)).astype(np.int32) y = np.triu(x, k=6) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "triu_i32_zero" make_test( [x], y, "input_0.trilu(true, 6)", name) tril() tril_neg() tril_one_row() tril_out_neg() tril_out_pos() tril_pos() tril_square() tril_square_neg() tril_zero() triu() triu_neg() triu_one_row() triu_out_neg() triu_out_pos() triu_pos() triu_square() triu_square_neg() triu_zero() @staticmethod def trilu_i8(): def tril(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int8) y = np.tril(x) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "tril_i8" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_neg(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int8) y = np.tril(x, k=-1) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "tril_i8_neg" make_test( [x], y, "input_0.trilu(false, -1)", name) def tril_one_row(): x = np.random.randint(-127, 127, (3, 1, 5)).astype(np.int8) y = np.tril(x) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "tril_i8_one_row" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_out_neg(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int8) y = np.tril(x, k=-7) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "tril_i8_out_neg" make_test( [x], y, "input_0.trilu(false, -7)", name) def tril_out_pos(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int8) y = np.tril(x, k=6) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "tril_i8_out_pos" make_test( [x], y, "input_0.trilu(false, 6)", name) def tril_pos(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int8) y = np.tril(x, k=2) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "tril_i8_pos" make_test( [x], y, "input_0.trilu(false, 2)", name) def tril_square(): x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int8) y = np.tril(x, k=0) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "tril_i8_square" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_square_neg(): x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int8) y = np.tril(x, k=-1) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "tril_i8_square_neg" make_test( [x], y, "input_0.trilu(false, -1)", name) def tril_zero(): x = np.random.randint(-127, 127, (3, 0, 5)).astype(np.int8) y = np.tril(x, k=6) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "tril_i8_zero" make_test( [x], y, "input_0.trilu(false, 6)", name) def triu(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int8) y = np.triu(x) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "triu_i8" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_neg(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int8) y = np.triu(x, k=-1) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "triu_i8_neg" make_test( [x], y, "input_0.trilu(true, -1)", name) def triu_one_row(): x = np.random.randint(-127, 127, (3, 1, 5)).astype(np.int8) y = np.triu(x) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "triu_i8_one_row" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_out_neg(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int8) y = np.triu(x, k=-7) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "triu_i8_out_neg" make_test( [x], y, "input_0.trilu(true, -7)", name) def triu_out_pos(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int8) y = np.triu(x, k=6) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "triu_i8_out_pos" make_test( [x], y, "input_0.trilu(true, 6)", name) def triu_pos(): x = np.random.randint(-127, 127, (4, 5)).astype(np.int8) y = np.triu(x, k=2) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "triu_i8_pos" make_test( [x], y, "input_0.trilu(true, 2)", name) def triu_square(): x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int8) y = np.triu(x, k=0) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "triu_i8_square" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_square_neg(): x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int8) y = np.triu(x, k=-1) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "triu_i8_square_neg" make_test( [x], y, "input_0.trilu(true, -1)", name) def triu_zero(): x = np.random.randint(-127, 127, (3, 0, 5)).astype(np.int8) y = np.triu(x, k=6) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "triu_i8_zero" make_test( [x], y, "input_0.trilu(true, 6)", name) tril() tril_neg() tril_one_row() tril_out_neg() tril_out_pos() tril_pos() tril_square() tril_square_neg() tril_zero() triu() triu_neg() triu_one_row() triu_out_neg() triu_out_pos() triu_pos() triu_square() triu_square_neg() triu_zero() @staticmethod def trilu_fp8x23(): def tril(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.tril(x) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "tril_fp8x23" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_neg(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.tril(x, k=-1) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "tril_fp8x23_neg" make_test( [x], y, "input_0.trilu(false, -1)", name) def tril_one_row(): x = to_fp(np.random.randint(-127, 127, (3, 1, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.tril(x) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "tril_fp8x23_one_row" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_out_neg(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.tril(x, k=-7) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "tril_fp8x23_out_neg" make_test( [x], y, "input_0.trilu(false, -7)", name) def tril_out_pos(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.tril(x, k=6) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "tril_fp8x23_out_pos" make_test( [x], y, "input_0.trilu(false, 6)", name) def tril_pos(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.tril(x, k=2) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "tril_fp8x23_pos" make_test( [x], y, "input_0.trilu(false, 2)", name) def tril_square(): x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP8x23) y = np.tril(x, k=0) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "tril_fp8x23_square" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_square_neg(): x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP8x23) y = np.tril(x, k=-1) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "tril_fp8x23_square_neg" make_test( [x], y, "input_0.trilu(false, -1)", name) def tril_zero(): x = to_fp(np.random.randint(-127, 127, (3, 0, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.tril(x, k=6) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "tril_fp8x23_zero" make_test( [x], y, "input_0.trilu(false, 6)", name) def triu(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.triu(x) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "triu_fp8x23" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_neg(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.triu(x, k=-1) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "triu_fp8x23_neg" make_test( [x], y, "input_0.trilu(true, -1)", name) def triu_one_row(): x = to_fp(np.random.randint(-127, 127, (3, 1, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.triu(x) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "triu_fp8x23_one_row" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_out_neg(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.triu(x, k=-7) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "triu_fp8x23_out_neg" make_test( [x], y, "input_0.trilu(true, -7)", name) def triu_out_pos(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.triu(x, k=6) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "triu_fp8x23_out_pos" make_test( [x], y, "input_0.trilu(true, 6)", name) def triu_pos(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.triu(x, k=2) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "triu_fp8x23_pos" make_test( [x], y, "input_0.trilu(true, 2)", name) def triu_square(): x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP8x23) y = np.triu(x, k=0) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "triu_fp8x23_square" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_square_neg(): x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP8x23) y = np.triu(x, k=-1) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "triu_fp8x23_square_neg" make_test( [x], y, "input_0.trilu(true, -1)", name) def triu_zero(): x = to_fp(np.random.randint(-127, 127, (3, 0, 5)).astype(np.int64), FixedImpl.FP8x23) y = np.triu(x, k=6) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "triu_fp8x23_zero" make_test( [x], y, "input_0.trilu(true, 6)", name) tril() tril_neg() tril_one_row() tril_out_neg() tril_out_pos() tril_pos() tril_square() tril_square_neg() tril_zero() triu() triu_neg() triu_one_row() triu_out_neg() triu_out_pos() triu_pos() triu_square() triu_square_neg() triu_zero() @staticmethod def trilu_fp16x16(): def tril(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.tril(x) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "tril_fp16x16" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_neg(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.tril(x, k=-1) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "tril_fp16x16_neg" make_test( [x], y, "input_0.trilu(false, -1)", name) def tril_one_row(): x = to_fp(np.random.randint(-127, 127, (3, 1, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.tril(x) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "tril_fp16x16_one_row" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_out_neg(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.tril(x, k=-7) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "tril_fp16x16_out_neg" make_test( [x], y, "input_0.trilu(false, -7)", name) def tril_out_pos(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.tril(x, k=6) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "tril_fp16x16_out_pos" make_test( [x], y, "input_0.trilu(false, 6)", name) def tril_pos(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.tril(x, k=2) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "tril_fp16x16_pos" make_test( [x], y, "input_0.trilu(false, 2)", name) def tril_square(): x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP16x16) y = np.tril(x, k=0) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "tril_fp16x16_square" make_test( [x], y, "input_0.trilu(false, 0)", name) def tril_square_neg(): x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP16x16) y = np.tril(x, k=-1) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "tril_fp16x16_square_neg" make_test( [x], y, "input_0.trilu(false, -1)", name) def tril_zero(): x = to_fp(np.random.randint(-127, 127, (3, 0, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.tril(x, k=6) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "tril_fp16x16_zero" make_test( [x], y, "input_0.trilu(false, 6)", name) def triu(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.triu(x) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "triu_fp16x16" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_neg(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.triu(x, k=-1) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "triu_fp16x16_neg" make_test( [x], y, "input_0.trilu(true, -1)", name) def triu_one_row(): x = to_fp(np.random.randint(-127, 127, (3, 1, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.triu(x) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "triu_fp16x16_one_row" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_out_neg(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.triu(x, k=-7) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "triu_fp16x16_out_neg" make_test( [x], y, "input_0.trilu(true, -7)", name) def triu_out_pos(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.triu(x, k=6) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "triu_fp16x16_out_pos" make_test( [x], y, "input_0.trilu(true, 6)", name) def triu_pos(): x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.triu(x, k=2) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "triu_fp16x16_pos" make_test( [x], y, "input_0.trilu(true, 2)", name) def triu_square(): x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP16x16) y = np.triu(x, k=0) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "triu_fp16x16_square" make_test( [x], y, "input_0.trilu(true, 0)", name) def triu_square_neg(): x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP16x16) y = np.triu(x, k=-1) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "triu_fp16x16_square_neg" make_test( [x], y, "input_0.trilu(true, -1)", name) def triu_zero(): x = to_fp(np.random.randint(-127, 127, (3, 0, 5)).astype(np.int64), FixedImpl.FP16x16) y = np.triu(x, k=6) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "triu_fp16x16_zero" make_test( [x], y, "input_0.trilu(true, 6)", name) tril() tril_neg() tril_one_row() tril_out_neg() tril_out_pos() tril_pos() tril_square() tril_square_neg() tril_zero() triu() triu_neg() triu_one_row() triu_out_neg() triu_out_pos() triu_pos() triu_square() triu_square_neg() triu_zero()
https://github.com/gizatechxyz/orion
nodegen/node/unique.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl from typing import Optional def _unsort_outputs( x: np.ndarray, axis: Optional[int], unique_values: np.ndarray, indices: np.ndarray, inverse_indices: np.ndarray, counts: np.ndarray, ) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray): """Unsort the result of np.unique(). This is done because numpy unique does not retain original order (it sorts the output unique values). (see: https://github.com/numpy/numpy/issues/8621) Code taken from onnx: https://github.com/onnx/onnx/blob/main/onnx/backend/test/case/node/unique.py """ argsorted_indices = np.argsort(indices) inverse_indices_map = dict( zip(argsorted_indices, np.arange(len(argsorted_indices))) ) indices = indices[argsorted_indices] unique_values = np.take(x, indices, axis=axis) inverse_indices = np.asarray( [inverse_indices_map[i] for i in inverse_indices], dtype=np.int32 ) counts = counts[argsorted_indices] return (unique_values, indices, inverse_indices, counts) class Unique(RunAll): @staticmethod def unique_u32(): def without_axis_sorted(): x = np.random.randint(0, 5, (3, 3, 3)).astype(np.uint32) axis = None unique_values, indices, inverse_indices, counts = np.unique( x, axis=axis, return_index=True, return_inverse=True, return_counts=True ) x = Tensor(Dtype.U32, x.shape, x.flatten()) unique_values = Tensor( Dtype.U32, unique_values.shape, unique_values.flatten() ) indices = Tensor(Dtype.I32, indices.shape, indices.flatten()) inverse_indices = Tensor( Dtype.I32, inverse_indices.shape, inverse_indices.flatten() ) counts = Tensor(Dtype.I32, counts.shape, counts.flatten()) name = "unique_u32_without_axis_sorted" make_test( [x], (unique_values, indices, inverse_indices, counts), "input_0.unique(Option::None(()), Option::Some(true))", name, ) def without_axis_not_sorted(): x = np.random.randint(0, 5, (3, 3, 3)).astype(np.uint32) axis = None unique_values, indices, inverse_indices, counts = np.unique( x, axis=axis, return_index=True, return_inverse=True, return_counts=True ) unique_values, indices, inverse_indices, counts = _unsort_outputs( x, axis, unique_values, indices, inverse_indices, counts ) x = Tensor(Dtype.U32, x.shape, x.flatten()) unique_values = Tensor( Dtype.U32, unique_values.shape, unique_values.flatten() ) indices = Tensor(Dtype.I32, indices.shape, indices.flatten()) inverse_indices = Tensor( Dtype.I32, inverse_indices.shape, inverse_indices.flatten() ) counts = Tensor(Dtype.I32, counts.shape, counts.flatten()) name = "unique_u32_without_axis_not_sorted" make_test( [x], (unique_values, indices, inverse_indices, counts), "input_0.unique(Option::None(()), Option::Some(false))", name, ) def with_axis_zero_sorted(): x = np.random.randint(0, 5, (3, 3, 3)).astype(np.uint32) axis = 0 unique_values, indices, inverse_indices, counts = np.unique( x, axis=axis, return_index=True, return_inverse=True, return_counts=True ) x = Tensor(Dtype.U32, x.shape, x.flatten()) unique_values = Tensor( Dtype.U32, unique_values.shape, unique_values.flatten() ) indices = Tensor(Dtype.I32, indices.shape, indices.flatten()) inverse_indices = Tensor( Dtype.I32, inverse_indices.shape, inverse_indices.flatten() ) counts = Tensor(Dtype.I32, counts.shape, counts.flatten()) name = "unique_u32_with_axis_zero_sorted" make_test( [x], (unique_values, indices, inverse_indices, counts), "input_0.unique(Option::Some(0), Option::Some(true))", name, ) def with_axis_zero_not_sorted(): x = np.random.randint(0, 5, (3, 3, 3)).astype(np.uint32) axis = 0 unique_values, indices, inverse_indices, counts = np.unique( x, axis=axis, return_index=True, return_inverse=True, return_counts=True ) unique_values, indices, inverse_indices, counts = _unsort_outputs( x, axis, unique_values, indices, inverse_indices, counts ) x = Tensor(Dtype.U32, x.shape, x.flatten()) unique_values = Tensor( Dtype.U32, unique_values.shape, unique_values.flatten() ) indices = Tensor(Dtype.I32, indices.shape, indices.flatten()) inverse_indices = Tensor( Dtype.I32, inverse_indices.shape, inverse_indices.flatten() ) counts = Tensor(Dtype.I32, counts.shape, counts.flatten()) name = "unique_u32_with_axis_zero_not_sorted" make_test( [x], (unique_values, indices, inverse_indices, counts), "input_0.unique(Option::Some(0), Option::Some(false))", name, ) def with_axis_one_sorted(): x = np.random.randint(0, 5, (3, 3, 3)).astype(np.uint32) axis = 1 unique_values, indices, inverse_indices, counts = np.unique( x, axis=axis, return_index=True, return_inverse=True, return_counts=True ) x = Tensor(Dtype.U32, x.shape, x.flatten()) unique_values = Tensor( Dtype.U32, unique_values.shape, unique_values.flatten() ) indices = Tensor(Dtype.I32, indices.shape, indices.flatten()) inverse_indices = Tensor( Dtype.I32, inverse_indices.shape, inverse_indices.flatten() ) counts = Tensor(Dtype.I32, counts.shape, counts.flatten()) name = "unique_u32_with_axis_one_sorted" make_test( [x], (unique_values, indices, inverse_indices, counts), "input_0.unique(Option::Some(1), Option::Some(true))", name, ) def with_axis_one_not_sorted(): x = np.random.randint(0, 5, (3, 3, 3)).astype(np.uint32) axis = 1 unique_values, indices, inverse_indices, counts = np.unique( x, axis=axis, return_index=True, return_inverse=True, return_counts=True ) unique_values, indices, inverse_indices, counts = _unsort_outputs( x, axis, unique_values, indices, inverse_indices, counts ) x = Tensor(Dtype.U32, x.shape, x.flatten()) unique_values = Tensor( Dtype.U32, unique_values.shape, unique_values.flatten() ) indices = Tensor(Dtype.I32, indices.shape, indices.flatten()) inverse_indices = Tensor( Dtype.I32, inverse_indices.shape, inverse_indices.flatten() ) counts = Tensor(Dtype.I32, counts.shape, counts.flatten()) name = "unique_u32_with_axis_one_not_sorted" make_test( [x], (unique_values, indices, inverse_indices, counts), "input_0.unique(Option::Some(1), Option::Some(false))", name, ) without_axis_sorted() without_axis_not_sorted() with_axis_zero_sorted() with_axis_zero_not_sorted() with_axis_one_sorted() with_axis_one_not_sorted() @staticmethod def unique_fp16x16(): def without_axis_sorted(): x = np.random.uniform(0, 3, (3, 3, 3)).astype(np.float64) axis = None unique_values, indices, inverse_indices, counts = np.unique( x, axis=axis, return_index=True, return_inverse=True, return_counts=True ) unique_values = unique_values.astype(np.float16) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) unique_values = Tensor( Dtype.FP16x16, unique_values.shape, to_fp(unique_values.flatten(), FixedImpl.FP16x16), ) indices = Tensor(Dtype.I32, indices.shape, indices.flatten()) inverse_indices = Tensor( Dtype.I32, inverse_indices.shape, inverse_indices.flatten() ) counts = Tensor(Dtype.I32, counts.shape, counts.flatten()) name = "unique_fp16x16_without_axis_sorted" make_test( [x], (unique_values, indices, inverse_indices, counts), "input_0.unique(Option::None(()), Option::Some(true))", name, ) def with_axis_zero_sorted(): x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64) axis = 0 unique_values, indices, inverse_indices, counts = np.unique( x, axis=axis, return_index=True, return_inverse=True, return_counts=True ) unique_values = unique_values.astype(np.float16) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) unique_values = Tensor( Dtype.FP16x16, unique_values.shape, to_fp(unique_values.flatten(), FixedImpl.FP16x16), ) indices = Tensor(Dtype.I32, indices.shape, indices.flatten()) inverse_indices = Tensor( Dtype.I32, inverse_indices.shape, inverse_indices.flatten() ) counts = Tensor(Dtype.I32, counts.shape, counts.flatten()) name = "unique_fp16x16_with_axis_zero_sorted" make_test( [x], (unique_values, indices, inverse_indices, counts), "input_0.unique(Option::Some(0), Option::Some(true))", name, ) without_axis_sorted() with_axis_zero_sorted()
https://github.com/gizatechxyz/orion
nodegen/node/unsqueeze.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Unsqueeze(RunAll): @staticmethod def unsqueeze_u32(): def unsqueeze_2D(): x = np.random.randint(0, 255, (2, 4)).astype(np.uint32) y = np.expand_dims(x, axis=0) y = np.expand_dims(y, axis=1) y = np.expand_dims(y, axis=4) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "unsqueeze_u32_2d" make_test( [x], y, "input_0.unsqueeze(array![1, 4, 0].span())", name) def unsqueeze_3D(): x = np.random.randint(0, 255, (20, 10, 5)).astype(np.uint32) y = np.expand_dims(x, axis=2) y = np.expand_dims(y, axis=4) y = np.expand_dims(y, axis=5) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "unsqueeze_u32_3d" make_test( [x], y, "input_0.unsqueeze(array![5, 4, 2].span())", name) unsqueeze_2D() unsqueeze_3D() @staticmethod def unsqueeze_i32(): def unsqueeze_2D(): x = np.random.randint(-127, 127, (2, 4)).astype(np.int32) y = np.expand_dims(x, axis=0) y = np.expand_dims(y, axis=1) y = np.expand_dims(y, axis=4) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "unsqueeze_i32_2d" make_test( [x], y, "input_0.unsqueeze(array![1, 4, 0].span())", name) def unsqueeze_3D(): x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int32) y = np.expand_dims(x, axis=2) y = np.expand_dims(y, axis=4) y = np.expand_dims(y, axis=5) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "unsqueeze_i32_3d" make_test( [x], y, "input_0.unsqueeze(array![5, 4, 2].span())", name) unsqueeze_2D() unsqueeze_3D() @staticmethod def unsqueeze_i8(): def unsqueeze_2D(): x = np.random.randint(-127, 127, (2, 4)).astype(np.int8) y = np.expand_dims(x, axis=0) y = np.expand_dims(y, axis=1) y = np.expand_dims(y, axis=4) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "unsqueeze_i8_2d" make_test( [x], y, "input_0.unsqueeze(array![1, 4, 0].span())", name) def unsqueeze_3D(): x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int8) y = np.expand_dims(x, axis=2) y = np.expand_dims(y, axis=4) y = np.expand_dims(y, axis=5) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "unsqueeze_i8_3d" make_test( [x], y, "input_0.unsqueeze(array![5, 4, 2].span())", name) unsqueeze_2D() unsqueeze_3D() @staticmethod def unsqueeze_fp8x23(): def unsqueeze_2D(): x = to_fp(np.random.randint(-127, 127, (2, 4) ).astype(np.int64), FixedImpl.FP8x23) y = np.expand_dims(x, axis=0) y = np.expand_dims(y, axis=1) y = np.expand_dims(y, axis=4) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "unsqueeze_fp8x23_2d" make_test( [x], y, "input_0.unsqueeze(array![1, 4, 0].span())", name) def unsqueeze_3D(): x = to_fp(np.random.randint(-127, 127, (20, 10, 5) ).astype(np.int64), FixedImpl.FP8x23) y = np.expand_dims(x, axis=2) y = np.expand_dims(y, axis=4) y = np.expand_dims(y, axis=5) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "unsqueeze_fp8x23_3d" make_test( [x], y, "input_0.unsqueeze(array![5, 4, 2].span())", name) unsqueeze_2D() unsqueeze_3D() @staticmethod def unsqueeze_fp16x16(): def unsqueeze_2D(): x = to_fp(np.random.randint(-127, 127, (2, 4) ).astype(np.int64), FixedImpl.FP16x16) y = np.expand_dims(x, axis=0) y = np.expand_dims(y, axis=1) y = np.expand_dims(y, axis=4) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "unsqueeze_fp16x16_2d" make_test( [x], y, "input_0.unsqueeze(array![1, 4, 0].span())", name) def unsqueeze_3D(): x = to_fp(np.random.randint(-127, 127, (20, 10, 5) ).astype(np.int64), FixedImpl.FP16x16) y = np.expand_dims(x, axis=2) y = np.expand_dims(y, axis=4) y = np.expand_dims(y, axis=5) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "unsqueeze_fp16x16_3d" make_test( [x], y, "input_0.unsqueeze(array![5, 4, 2].span())", name) unsqueeze_2D() unsqueeze_3D()
https://github.com/gizatechxyz/orion
nodegen/node/where.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Where(RunAll): @staticmethod def where_u32(): def default(): cond = np.random.choice([1, 0], (3, 3, 3)).astype(np.uint32) x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) z = np.where(cond, x, y).astype(x.dtype) cond = Tensor(Dtype.U32, cond.shape, cond.flatten()) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "where_u32" make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name) def broadcast(): cond = np.random.choice([1, 0], (1, 1)).astype(np.uint32) x = np.random.randint(0, 6, (2, 2)).astype(np.uint32) y = np.random.randint(0, 6, (1, 2)).astype(np.uint32) z = np.where(cond, x, y).astype(x.dtype) cond = Tensor(Dtype.U32, cond.shape, cond.flatten()) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "where_u32_broadcast" make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name) default() broadcast() @staticmethod def where_i32(): def default(): cond = np.random.choice([1, 0], (3, 3, 3)).astype(np.int32) x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32) z = np.where(cond, x, y).astype(x.dtype) cond = Tensor(Dtype.I32, cond.shape, cond.flatten()) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "where_i32" make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name) def broadcast(): cond = np.random.choice([1, 0], (1, 1)).astype(np.int32) x = np.random.randint(0, 6, (2, 2)).astype(np.int32) y = np.random.randint(0, 6, (1, 2)).astype(np.int32) z = np.where(cond, x, y).astype(x.dtype) cond = Tensor(Dtype.I32, cond.shape, cond.flatten()) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "where_i32_broadcast" make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name) default() broadcast() @staticmethod def where_i8(): def default(): cond = np.random.choice([1, 0], (3, 3, 3)).astype(np.int8) x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8) z = np.where(cond, x, y).astype(x.dtype) cond = Tensor(Dtype.I8, cond.shape, cond.flatten()) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "where_i8" make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name) def broadcast(): cond = np.random.choice([1, 0], (1, 1)).astype(np.int8) x = np.random.randint(0, 6, (2, 2)).astype(np.int8) y = np.random.randint(0, 6, (1, 2)).astype(np.int8) z = np.where(cond, x, y).astype(x.dtype) cond = Tensor(Dtype.I8, cond.shape, cond.flatten()) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "where_i8_broadcast" make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name) default() broadcast() @staticmethod def where_fp8x23(): def default(): cond = np.random.choice([1, 0], (3, 3, 3)).astype(np.float64) x = np.random.randint(0, 6, (3, 3, 3)).astype(np.float64) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.float64) z = np.where(cond, x, y).astype(x.dtype) cond = Tensor(Dtype.FP8x23, cond.shape, to_fp( cond.flatten(), FixedImpl.FP8x23)) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "where_fp8x23" make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name) def broadcast(): cond = np.random.choice([1, 0], (1, 1)).astype(np.float64) x = np.random.randint(0, 6, (2, 2)).astype(np.float64) y = np.random.randint(0, 6, (1, 2)).astype(np.float64) z = np.where(cond, x, y).astype(x.dtype) cond = Tensor(Dtype.FP8x23, cond.shape, to_fp( cond.flatten(), FixedImpl.FP8x23)) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "where_fp8x23_broadcast" make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name) default() broadcast() @staticmethod def where_fp16x16(): def default(): cond = np.random.choice([1, 0], (3, 3, 3)).astype(np.float64) x = np.random.randint(0, 6, (3, 3, 3)).astype(np.float64) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.float64) z = np.where(cond, x, y).astype(x.dtype) cond = Tensor(Dtype.FP16x16, cond.shape, to_fp( cond.flatten(), FixedImpl.FP16x16)) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "where_fp16x16" make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name) def broadcast(): cond = np.random.choice([1, 0], (1, 1)).astype(np.float64) x = np.random.randint(0, 6, (2, 2)).astype(np.float64) y = np.random.randint(0, 6, (1, 2)).astype(np.float64) z = np.where(cond, x, y).astype(x.dtype) cond = Tensor(Dtype.FP16x16, cond.shape, to_fp( cond.flatten(), FixedImpl.FP16x16)) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "where_fp16x16_broadcast" make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/xor.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Xor(RunAll): @staticmethod def xor_u32(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) z = np.logical_xor(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_u32" make_test([x, y], z, "input_0.xor(@input_1)", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.uint32) y = np.random.randint(0, 6, (1, 2)).astype(np.uint32) z = np.logical_xor(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_u32_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) default() broadcast() @staticmethod def xor_i32(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) z = np.logical_xor(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_i32" make_test([x, y], z, "input_0.xor(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.int32) y = np.random.randint(-3, 3, (1, 2)).astype(np.int32) z = np.logical_xor(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_i32_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) default() broadcast() @staticmethod def xor_i8(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) z = np.logical_xor(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_i8" make_test([x, y], z, "input_0.xor(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.int8) y = np.random.randint(-3, 3, (1, 2)).astype(np.int8) z = np.logical_xor(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_i8_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) default() broadcast() @staticmethod def xor_fp8x23(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.logical_xor(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_fp8x23" make_test([x, y], z, "input_0.xor(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.logical_xor(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_fp8x23_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) default() broadcast() @staticmethod def xor_fp16x16(): def default(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.logical_xor(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_fp16x16" make_test([x, y], z, "input_0.xor(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.logical_xor(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_fp16x16_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/test_list.py
import os import glob import subprocess # Directory path where Python files/modules are located directory_path = 'nodegen/node/' # Get all files in the directory all_files = os.listdir(directory_path) # Filter Python files using glob and '*.py' pattern python_files = [file[:-3] for file in all_files if file.endswith('.py')] # Print the names of Python files/modules command = 'python --version' os.system(command)
https://github.com/gizatechxyz/orion
src/lib.cairo
mod operators; mod numbers; mod utils; mod test_helper;
https://github.com/gizatechxyz/orion
src/numbers.cairo
mod fixed_point; mod complex_number; use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE as ONE_fp8x23}; use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE as ONE_fp16x16}; use orion::numbers::fixed_point::implementations::fp64x64::core::{ONE as ONE_fp64x64}; use orion::numbers::fixed_point::implementations::fp32x32::core::{ONE as ONE_fp32x32}; // Common methods from Fixed Point and Signed Integers. trait NumberTrait<T, MAG> { fn new(mag: MAG, sign: bool) -> T; fn new_unscaled(mag: MAG, sign: bool) -> T; fn from_felt(val: felt252) -> T; fn abs(self: T) -> T; fn neg(self: T) -> T; fn ceil(self: T) -> T; fn exp(self: T) -> T; fn exp2(self: T) -> T; fn floor(self: T) -> T; fn ln(self: T) -> T; fn log2(self: T) -> T; fn log10(self: T) -> T; fn pow(self: T, b: T) -> T; fn round(self: T) -> T; fn sqrt(self: T) -> T; fn acos(self: T) -> T; fn asin(self: T) -> T; fn atan(self: T) -> T; fn cos(self: T) -> T; fn sin(self: T) -> T; fn tan(self: T) -> T; fn acosh(self: T) -> T; fn asinh(self: T) -> T; fn atanh(self: T) -> T; fn cosh(self: T) -> T; fn sinh(self: T) -> T; fn tanh(self: T) -> T; fn zero() -> T; fn is_zero(self: T) -> bool; fn half() -> T; fn one() -> T; fn is_one(self: T) -> bool; fn neg_one() -> T; fn min_value() -> T; fn max_value() -> T; fn min(self: T, other: T) -> T; fn max(self: T, other: T) -> T; fn mag(self: T) -> MAG; fn is_neg(self: T) -> bool; fn xor(lhs: T, rhs: T) -> bool; fn or(lhs: T, rhs: T) -> bool; fn sign(self: T) -> T; fn and(lhs: T, rhs: T) -> bool; fn where(self: T, x: T, y: T) -> T; fn NaN() -> T; fn is_nan(self: T) -> bool; fn INF() -> T; fn is_inf(self: T) -> bool; fn is_pos_inf(self: T) -> bool; fn is_neg_inf(self: T) -> bool; fn bitwise_and(lhs: T, rhs: T) -> T; fn bitwise_xor(lhs: T, rhs: T) -> T; fn bitwise_or(lhs: T, rhs: T) -> T; fn add(lhs: T, rhs: T) -> T; fn sub(lhs: T, rhs: T) -> T; } use orion::numbers::fixed_point::implementations::fp8x23::core::{ FP8x23Impl, FP8x23, FP8x23Add, FP8x23Sub }; use orion::numbers::fixed_point::implementations::fp8x23::math::core as core_fp8x23; use orion::numbers::fixed_point::implementations::fp8x23::math::comp as comp_fp8x23; impl FP8x23Number of NumberTrait<FP8x23, u32> { fn new(mag: u32, sign: bool) -> FP8x23 { FP8x23Impl::new(mag, sign) } fn new_unscaled(mag: u32, sign: bool) -> FP8x23 { FP8x23Impl::new_unscaled(mag, sign) } fn from_felt(val: felt252) -> FP8x23 { FP8x23Impl::from_felt(val) } fn ceil(self: FP8x23) -> FP8x23 { FP8x23Impl::ceil(self) } fn exp(self: FP8x23) -> FP8x23 { FP8x23Impl::exp(self) } fn exp2(self: FP8x23) -> FP8x23 { FP8x23Impl::exp2(self) } fn floor(self: FP8x23) -> FP8x23 { FP8x23Impl::floor(self) } fn ln(self: FP8x23) -> FP8x23 { FP8x23Impl::ln(self) } fn log2(self: FP8x23) -> FP8x23 { FP8x23Impl::log2(self) } fn log10(self: FP8x23) -> FP8x23 { FP8x23Impl::log10(self) } fn pow(self: FP8x23, b: FP8x23) -> FP8x23 { FP8x23Impl::pow(self, b) } fn round(self: FP8x23) -> FP8x23 { FP8x23Impl::round(self) } fn sqrt(self: FP8x23) -> FP8x23 { FP8x23Impl::sqrt(self) } fn acos(self: FP8x23) -> FP8x23 { FP8x23Impl::acos(self) } fn asin(self: FP8x23) -> FP8x23 { FP8x23Impl::asin(self) } fn atan(self: FP8x23) -> FP8x23 { FP8x23Impl::atan(self) } fn cos(self: FP8x23) -> FP8x23 { FP8x23Impl::cos(self) } fn sin(self: FP8x23) -> FP8x23 { FP8x23Impl::sin(self) } fn tan(self: FP8x23) -> FP8x23 { FP8x23Impl::tan(self) } fn acosh(self: FP8x23) -> FP8x23 { FP8x23Impl::acosh(self) } fn asinh(self: FP8x23) -> FP8x23 { FP8x23Impl::asinh(self) } fn atanh(self: FP8x23) -> FP8x23 { FP8x23Impl::atanh(self) } fn cosh(self: FP8x23) -> FP8x23 { FP8x23Impl::cosh(self) } fn sinh(self: FP8x23) -> FP8x23 { FP8x23Impl::sinh(self) } fn tanh(self: FP8x23) -> FP8x23 { FP8x23Impl::tanh(self) } fn zero() -> FP8x23 { FP8x23Impl::ZERO() } fn is_zero(self: FP8x23) -> bool { core_fp8x23::eq(@self, @FP8x23Impl::ZERO()) } fn half() -> FP8x23 { FP8x23Impl::HALF() } fn one() -> FP8x23 { FP8x23Impl::ONE() } fn neg_one() -> FP8x23 { FP8x23 { mag: core_fp8x23::ONE, sign: true } } fn is_one(self: FP8x23) -> bool { core_fp8x23::eq(@self, @FP8x23Impl::ONE()) } fn abs(self: FP8x23) -> FP8x23 { core_fp8x23::abs(self) } fn neg(self: FP8x23) -> FP8x23 { core_fp8x23::neg(self) } fn min_value() -> FP8x23 { FP8x23 { mag: core_fp8x23::MAX, sign: true } } fn max_value() -> FP8x23 { FP8x23 { mag: core_fp8x23::MAX, sign: false } } fn min(self: FP8x23, other: FP8x23) -> FP8x23 { comp_fp8x23::min(self, other) } fn max(self: FP8x23, other: FP8x23) -> FP8x23 { comp_fp8x23::max(self, other) } fn mag(self: FP8x23) -> u32 { self.mag } fn is_neg(self: FP8x23) -> bool { self.sign } fn xor(lhs: FP8x23, rhs: FP8x23) -> bool { comp_fp8x23::xor(lhs, rhs) } fn or(lhs: FP8x23, rhs: FP8x23) -> bool { comp_fp8x23::or(lhs, rhs) } fn sign(self: FP8x23) -> FP8x23 { core_fp8x23::sign(self) } fn and(lhs: FP8x23, rhs: FP8x23) -> bool { comp_fp8x23::and(lhs, rhs) } fn where(self: FP8x23, x: FP8x23, y: FP8x23) -> FP8x23 { comp_fp8x23::where(self, x, y) } fn NaN() -> FP8x23 { FP8x23Impl::NaN() } fn is_nan(self: FP8x23) -> bool { FP8x23Impl::is_nan(self) } fn INF() -> FP8x23 { FP8x23Impl::INF() } fn is_inf(self: FP8x23) -> bool { FP8x23Impl::is_inf(self) } fn is_pos_inf(self: FP8x23) -> bool { FP8x23Impl::is_pos_inf(self) } fn is_neg_inf(self: FP8x23) -> bool { FP8x23Impl::is_neg_inf(self) } fn bitwise_and(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { comp_fp8x23::bitwise_and(lhs, rhs) } fn bitwise_xor(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { comp_fp8x23::bitwise_xor(lhs, rhs) } fn bitwise_or(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { comp_fp8x23::bitwise_or(lhs, rhs) } fn add(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { FP8x23Add::add(lhs, rhs) } fn sub(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { FP8x23Sub::sub(lhs, rhs) } } use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ FP8x23WImpl, FP8x23W, FP8x23WAdd, FP8x23WSub }; use orion::numbers::fixed_point::implementations::fp8x23wide::math::core as core_fp8x23wide; use orion::numbers::fixed_point::implementations::fp8x23wide::math::comp as comp_fp8x23wide; impl FP8x23WNumber of NumberTrait<FP8x23W, u64> { fn new(mag: u64, sign: bool) -> FP8x23W { FP8x23WImpl::new(mag, sign) } fn new_unscaled(mag: u64, sign: bool) -> FP8x23W { FP8x23WImpl::new_unscaled(mag, sign) } fn from_felt(val: felt252) -> FP8x23W { FP8x23WImpl::from_felt(val) } fn ceil(self: FP8x23W) -> FP8x23W { FP8x23WImpl::ceil(self) } fn exp(self: FP8x23W) -> FP8x23W { FP8x23WImpl::exp(self) } fn exp2(self: FP8x23W) -> FP8x23W { FP8x23WImpl::exp2(self) } fn floor(self: FP8x23W) -> FP8x23W { FP8x23WImpl::floor(self) } fn ln(self: FP8x23W) -> FP8x23W { FP8x23WImpl::ln(self) } fn log2(self: FP8x23W) -> FP8x23W { FP8x23WImpl::log2(self) } fn log10(self: FP8x23W) -> FP8x23W { FP8x23WImpl::log10(self) } fn pow(self: FP8x23W, b: FP8x23W) -> FP8x23W { FP8x23WImpl::pow(self, b) } fn round(self: FP8x23W) -> FP8x23W { FP8x23WImpl::round(self) } fn sqrt(self: FP8x23W) -> FP8x23W { FP8x23WImpl::sqrt(self) } fn acos(self: FP8x23W) -> FP8x23W { FP8x23WImpl::acos(self) } fn asin(self: FP8x23W) -> FP8x23W { FP8x23WImpl::asin(self) } fn atan(self: FP8x23W) -> FP8x23W { FP8x23WImpl::atan(self) } fn cos(self: FP8x23W) -> FP8x23W { FP8x23WImpl::cos(self) } fn sin(self: FP8x23W) -> FP8x23W { FP8x23WImpl::sin(self) } fn tan(self: FP8x23W) -> FP8x23W { FP8x23WImpl::tan(self) } fn acosh(self: FP8x23W) -> FP8x23W { FP8x23WImpl::acosh(self) } fn asinh(self: FP8x23W) -> FP8x23W { FP8x23WImpl::asinh(self) } fn atanh(self: FP8x23W) -> FP8x23W { FP8x23WImpl::atanh(self) } fn cosh(self: FP8x23W) -> FP8x23W { FP8x23WImpl::cosh(self) } fn sinh(self: FP8x23W) -> FP8x23W { FP8x23WImpl::sinh(self) } fn tanh(self: FP8x23W) -> FP8x23W { FP8x23WImpl::tanh(self) } fn zero() -> FP8x23W { FP8x23WImpl::ZERO() } fn is_zero(self: FP8x23W) -> bool { core_fp8x23wide::eq(@self, @FP8x23WImpl::ZERO()) } fn half() -> FP8x23W { FP8x23WImpl::HALF() } fn one() -> FP8x23W { FP8x23WImpl::ONE() } fn neg_one() -> FP8x23W { FP8x23W { mag: core_fp8x23wide::ONE, sign: true } } fn is_one(self: FP8x23W) -> bool { core_fp8x23wide::eq(@self, @FP8x23WImpl::ONE()) } fn abs(self: FP8x23W) -> FP8x23W { core_fp8x23wide::abs(self) } fn neg(self: FP8x23W) -> FP8x23W { core_fp8x23wide::neg(self) } fn min_value() -> FP8x23W { FP8x23W { mag: core_fp8x23wide::MAX, sign: true } } fn max_value() -> FP8x23W { FP8x23W { mag: core_fp8x23wide::MAX, sign: false } } fn min(self: FP8x23W, other: FP8x23W) -> FP8x23W { comp_fp8x23wide::min(self, other) } fn max(self: FP8x23W, other: FP8x23W) -> FP8x23W { comp_fp8x23wide::max(self, other) } fn mag(self: FP8x23W) -> u64 { self.mag } fn is_neg(self: FP8x23W) -> bool { self.sign } fn xor(lhs: FP8x23W, rhs: FP8x23W) -> bool { comp_fp8x23wide::xor(lhs, rhs) } fn or(lhs: FP8x23W, rhs: FP8x23W) -> bool { comp_fp8x23wide::or(lhs, rhs) } fn sign(self: FP8x23W) -> FP8x23W { core_fp8x23wide::sign(self) } fn and(lhs: FP8x23W, rhs: FP8x23W) -> bool { comp_fp8x23wide::and(lhs, rhs) } fn where(self: FP8x23W, x: FP8x23W, y: FP8x23W) -> FP8x23W { comp_fp8x23wide::where(self, x, y) } fn NaN() -> FP8x23W { FP8x23WImpl::NaN() } fn is_nan(self: FP8x23W) -> bool { FP8x23WImpl::is_nan(self) } fn INF() -> FP8x23W { FP8x23WImpl::INF() } fn is_inf(self: FP8x23W) -> bool { FP8x23WImpl::is_inf(self) } fn is_pos_inf(self: FP8x23W) -> bool { FP8x23WImpl::is_pos_inf(self) } fn is_neg_inf(self: FP8x23W) -> bool { FP8x23WImpl::is_neg_inf(self) } fn bitwise_and(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { comp_fp8x23wide::bitwise_and(lhs, rhs) } fn bitwise_xor(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { comp_fp8x23wide::bitwise_xor(lhs, rhs) } fn bitwise_or(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { comp_fp8x23wide::bitwise_or(lhs, rhs) } fn add(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { FP8x23WAdd::add(lhs, rhs) } fn sub(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { FP8x23WSub::sub(lhs, rhs) } } use orion::numbers::fixed_point::implementations::fp16x16::core::{ FP16x16Impl, FP16x16, FP16x16Add, FP16x16Sub }; use orion::numbers::fixed_point::implementations::fp16x16::math::core as core_fp16x16; use orion::numbers::fixed_point::implementations::fp16x16::math::comp as comp_fp16x16; impl FP16x16Number of NumberTrait<FP16x16, u32> { fn new(mag: u32, sign: bool) -> FP16x16 { FP16x16Impl::new(mag, sign) } fn new_unscaled(mag: u32, sign: bool) -> FP16x16 { FP16x16Impl::new_unscaled(mag, sign) } fn from_felt(val: felt252) -> FP16x16 { FP16x16Impl::from_felt(val) } fn ceil(self: FP16x16) -> FP16x16 { FP16x16Impl::ceil(self) } fn exp(self: FP16x16) -> FP16x16 { FP16x16Impl::exp(self) } fn exp2(self: FP16x16) -> FP16x16 { FP16x16Impl::exp2(self) } fn floor(self: FP16x16) -> FP16x16 { FP16x16Impl::floor(self) } fn ln(self: FP16x16) -> FP16x16 { FP16x16Impl::ln(self) } fn log2(self: FP16x16) -> FP16x16 { FP16x16Impl::log2(self) } fn log10(self: FP16x16) -> FP16x16 { FP16x16Impl::log10(self) } fn pow(self: FP16x16, b: FP16x16) -> FP16x16 { FP16x16Impl::pow(self, b) } fn round(self: FP16x16) -> FP16x16 { FP16x16Impl::round(self) } fn sqrt(self: FP16x16) -> FP16x16 { FP16x16Impl::sqrt(self) } fn acos(self: FP16x16) -> FP16x16 { FP16x16Impl::acos(self) } fn asin(self: FP16x16) -> FP16x16 { FP16x16Impl::asin(self) } fn atan(self: FP16x16) -> FP16x16 { FP16x16Impl::atan(self) } fn cos(self: FP16x16) -> FP16x16 { FP16x16Impl::cos(self) } fn sin(self: FP16x16) -> FP16x16 { FP16x16Impl::sin(self) } fn tan(self: FP16x16) -> FP16x16 { FP16x16Impl::tan(self) } fn acosh(self: FP16x16) -> FP16x16 { FP16x16Impl::acosh(self) } fn asinh(self: FP16x16) -> FP16x16 { FP16x16Impl::asinh(self) } fn atanh(self: FP16x16) -> FP16x16 { FP16x16Impl::atanh(self) } fn cosh(self: FP16x16) -> FP16x16 { FP16x16Impl::cosh(self) } fn sinh(self: FP16x16) -> FP16x16 { FP16x16Impl::sinh(self) } fn tanh(self: FP16x16) -> FP16x16 { FP16x16Impl::tanh(self) } fn zero() -> FP16x16 { FP16x16Impl::ZERO() } fn is_zero(self: FP16x16) -> bool { core_fp16x16::eq(@self, @FP16x16Impl::ZERO()) } fn half() -> FP16x16 { FP16x16Impl::HALF() } fn one() -> FP16x16 { FP16x16Impl::ONE() } fn neg_one() -> FP16x16 { FP16x16 { mag: core_fp16x16::ONE, sign: true } } fn is_one(self: FP16x16) -> bool { core_fp16x16::eq(@self, @FP16x16Impl::ONE()) } fn abs(self: FP16x16) -> FP16x16 { core_fp16x16::abs(self) } fn neg(self: FP16x16) -> FP16x16 { core_fp16x16::neg(self) } fn min_value() -> FP16x16 { FP16x16 { mag: core_fp16x16::MAX, sign: true } } fn max_value() -> FP16x16 { FP16x16 { mag: core_fp16x16::MAX, sign: false } } fn min(self: FP16x16, other: FP16x16) -> FP16x16 { comp_fp16x16::min(self, other) } fn max(self: FP16x16, other: FP16x16) -> FP16x16 { comp_fp16x16::max(self, other) } fn mag(self: FP16x16) -> u32 { self.mag } fn is_neg(self: FP16x16) -> bool { self.sign } fn xor(lhs: FP16x16, rhs: FP16x16) -> bool { comp_fp16x16::xor(lhs, rhs) } fn or(lhs: FP16x16, rhs: FP16x16) -> bool { comp_fp16x16::or(lhs, rhs) } fn sign(self: FP16x16) -> FP16x16 { core_fp16x16::sign(self) } fn and(lhs: FP16x16, rhs: FP16x16) -> bool { comp_fp16x16::and(lhs, rhs) } fn where(self: FP16x16, x: FP16x16, y: FP16x16) -> FP16x16 { comp_fp16x16::where(self, x, y) } fn NaN() -> FP16x16 { FP16x16Impl::NaN() } fn is_nan(self: FP16x16) -> bool { FP16x16Impl::is_nan(self) } fn INF() -> FP16x16 { FP16x16Impl::INF() } fn is_inf(self: FP16x16) -> bool { FP16x16Impl::is_inf(self) } fn is_pos_inf(self: FP16x16) -> bool { FP16x16Impl::is_pos_inf(self) } fn is_neg_inf(self: FP16x16) -> bool { FP16x16Impl::is_neg_inf(self) } fn bitwise_and(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { comp_fp16x16::bitwise_and(lhs, rhs) } fn bitwise_xor(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { comp_fp16x16::bitwise_xor(lhs, rhs) } fn bitwise_or(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { comp_fp16x16::bitwise_or(lhs, rhs) } fn add(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { FP16x16Add::add(lhs, rhs) } fn sub(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { FP16x16Sub::sub(lhs, rhs) } } use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ FP16x16WImpl, FP16x16W, FP16x16WAdd, FP16x16WSub }; use orion::numbers::fixed_point::implementations::fp16x16wide::math::core as core_fp16x16wide; use orion::numbers::fixed_point::implementations::fp16x16wide::math::comp as comp_fp16x16wide; impl FP16x16WNumber of NumberTrait<FP16x16W, u64> { fn new(mag: u64, sign: bool) -> FP16x16W { FP16x16WImpl::new(mag, sign) } fn new_unscaled(mag: u64, sign: bool) -> FP16x16W { FP16x16WImpl::new_unscaled(mag, sign) } fn from_felt(val: felt252) -> FP16x16W { FP16x16WImpl::from_felt(val) } fn ceil(self: FP16x16W) -> FP16x16W { FP16x16WImpl::ceil(self) } fn exp(self: FP16x16W) -> FP16x16W { FP16x16WImpl::exp(self) } fn exp2(self: FP16x16W) -> FP16x16W { FP16x16WImpl::exp2(self) } fn floor(self: FP16x16W) -> FP16x16W { FP16x16WImpl::floor(self) } fn ln(self: FP16x16W) -> FP16x16W { FP16x16WImpl::ln(self) } fn log2(self: FP16x16W) -> FP16x16W { FP16x16WImpl::log2(self) } fn log10(self: FP16x16W) -> FP16x16W { FP16x16WImpl::log10(self) } fn pow(self: FP16x16W, b: FP16x16W) -> FP16x16W { FP16x16WImpl::pow(self, b) } fn round(self: FP16x16W) -> FP16x16W { FP16x16WImpl::round(self) } fn sqrt(self: FP16x16W) -> FP16x16W { FP16x16WImpl::sqrt(self) } fn acos(self: FP16x16W) -> FP16x16W { FP16x16WImpl::acos(self) } fn asin(self: FP16x16W) -> FP16x16W { FP16x16WImpl::asin(self) } fn atan(self: FP16x16W) -> FP16x16W { FP16x16WImpl::atan(self) } fn cos(self: FP16x16W) -> FP16x16W { FP16x16WImpl::cos(self) } fn sin(self: FP16x16W) -> FP16x16W { FP16x16WImpl::sin(self) } fn tan(self: FP16x16W) -> FP16x16W { FP16x16WImpl::tan(self) } fn acosh(self: FP16x16W) -> FP16x16W { FP16x16WImpl::acosh(self) } fn asinh(self: FP16x16W) -> FP16x16W { FP16x16WImpl::asinh(self) } fn atanh(self: FP16x16W) -> FP16x16W { FP16x16WImpl::atanh(self) } fn cosh(self: FP16x16W) -> FP16x16W { FP16x16WImpl::cosh(self) } fn sinh(self: FP16x16W) -> FP16x16W { FP16x16WImpl::sinh(self) } fn tanh(self: FP16x16W) -> FP16x16W { FP16x16WImpl::tanh(self) } fn zero() -> FP16x16W { FP16x16WImpl::ZERO() } fn is_zero(self: FP16x16W) -> bool { core_fp16x16wide::eq(@self, @FP16x16WImpl::ZERO()) } fn half() -> FP16x16W { FP16x16WImpl::HALF() } fn one() -> FP16x16W { FP16x16WImpl::ONE() } fn neg_one() -> FP16x16W { FP16x16W { mag: core_fp16x16wide::ONE, sign: true } } fn is_one(self: FP16x16W) -> bool { core_fp16x16wide::eq(@self, @FP16x16WImpl::ONE()) } fn abs(self: FP16x16W) -> FP16x16W { core_fp16x16wide::abs(self) } fn neg(self: FP16x16W) -> FP16x16W { core_fp16x16wide::neg(self) } fn min_value() -> FP16x16W { FP16x16W { mag: core_fp16x16wide::MAX, sign: true } } fn max_value() -> FP16x16W { FP16x16W { mag: core_fp16x16wide::MAX, sign: false } } fn min(self: FP16x16W, other: FP16x16W) -> FP16x16W { comp_fp16x16wide::min(self, other) } fn max(self: FP16x16W, other: FP16x16W) -> FP16x16W { comp_fp16x16wide::max(self, other) } fn mag(self: FP16x16W) -> u64 { self.mag } fn is_neg(self: FP16x16W) -> bool { self.sign } fn xor(lhs: FP16x16W, rhs: FP16x16W) -> bool { comp_fp16x16wide::xor(lhs, rhs) } fn or(lhs: FP16x16W, rhs: FP16x16W) -> bool { comp_fp16x16wide::or(lhs, rhs) } fn sign(self: FP16x16W) -> FP16x16W { core_fp16x16wide::sign(self) } fn and(lhs: FP16x16W, rhs: FP16x16W) -> bool { comp_fp16x16wide::and(lhs, rhs) } fn where(self: FP16x16W, x: FP16x16W, y: FP16x16W) -> FP16x16W { comp_fp16x16wide::where(self, x, y) } fn NaN() -> FP16x16W { FP16x16WImpl::NaN() } fn is_nan(self: FP16x16W) -> bool { FP16x16WImpl::is_nan(self) } fn INF() -> FP16x16W { FP16x16WImpl::INF() } fn is_inf(self: FP16x16W) -> bool { FP16x16WImpl::is_inf(self) } fn is_pos_inf(self: FP16x16W) -> bool { FP16x16WImpl::is_pos_inf(self) } fn is_neg_inf(self: FP16x16W) -> bool { FP16x16WImpl::is_neg_inf(self) } fn bitwise_and(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { comp_fp16x16wide::bitwise_and(lhs, rhs) } fn bitwise_xor(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { comp_fp16x16wide::bitwise_xor(lhs, rhs) } fn bitwise_or(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { comp_fp16x16wide::bitwise_or(lhs, rhs) } fn add(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { FP16x16WAdd::add(lhs, rhs) } fn sub(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { FP16x16WSub::sub(lhs, rhs) } } use orion::numbers::fixed_point::implementations::fp64x64::core::{ FP64x64Impl, FP64x64, FP64x64Add, FP64x64Sub }; use orion::numbers::fixed_point::implementations::fp64x64::{core as core_fp64x64}; use orion::numbers::fixed_point::implementations::fp64x64::comp as comp_fp64x64; use cubit::f128 as fp64x64; impl FP64x64Number of NumberTrait<FP64x64, u128> { fn new(mag: u128, sign: bool) -> FP64x64 { FP64x64Impl::new(mag, sign) } fn new_unscaled(mag: u128, sign: bool) -> FP64x64 { FP64x64Impl::new_unscaled(mag, sign) } fn from_felt(val: felt252) -> FP64x64 { FP64x64Impl::from_felt(val) } fn ceil(self: FP64x64) -> FP64x64 { FP64x64Impl::ceil(self) } fn exp(self: FP64x64) -> FP64x64 { FP64x64Impl::exp(self) } fn exp2(self: FP64x64) -> FP64x64 { FP64x64Impl::exp2(self) } fn floor(self: FP64x64) -> FP64x64 { FP64x64Impl::floor(self) } fn ln(self: FP64x64) -> FP64x64 { FP64x64Impl::ln(self) } fn log2(self: FP64x64) -> FP64x64 { FP64x64Impl::log2(self) } fn log10(self: FP64x64) -> FP64x64 { FP64x64Impl::log10(self) } fn pow(self: FP64x64, b: FP64x64) -> FP64x64 { FP64x64Impl::pow(self, b) } fn round(self: FP64x64) -> FP64x64 { FP64x64Impl::round(self) } fn sqrt(self: FP64x64) -> FP64x64 { FP64x64Impl::sqrt(self) } fn acos(self: FP64x64) -> FP64x64 { FP64x64Impl::acos(self) } fn asin(self: FP64x64) -> FP64x64 { FP64x64Impl::asin(self) } fn atan(self: FP64x64) -> FP64x64 { FP64x64Impl::atan(self) } fn cos(self: FP64x64) -> FP64x64 { FP64x64Impl::cos(self) } fn sin(self: FP64x64) -> FP64x64 { FP64x64Impl::sin(self) } fn tan(self: FP64x64) -> FP64x64 { FP64x64Impl::tan(self) } fn acosh(self: FP64x64) -> FP64x64 { FP64x64Impl::acosh(self) } fn asinh(self: FP64x64) -> FP64x64 { FP64x64Impl::asinh(self) } fn atanh(self: FP64x64) -> FP64x64 { FP64x64Impl::atanh(self) } fn cosh(self: FP64x64) -> FP64x64 { FP64x64Impl::cosh(self) } fn sinh(self: FP64x64) -> FP64x64 { FP64x64Impl::sinh(self) } fn tanh(self: FP64x64) -> FP64x64 { FP64x64Impl::tanh(self) } fn zero() -> FP64x64 { FP64x64Impl::ZERO() } fn is_zero(self: FP64x64) -> bool { fp64x64::ops::eq(@self, @FP64x64Impl::ZERO()) } fn half() -> FP64x64 { FP64x64Impl::HALF() } fn one() -> FP64x64 { FP64x64Impl::ONE() } fn neg_one() -> FP64x64 { FP64x64 { mag: core_fp64x64::ONE, sign: true } } fn is_one(self: FP64x64) -> bool { core_fp64x64::eq(@self, @FP64x64Impl::ONE()) } fn abs(self: FP64x64) -> FP64x64 { fp64x64::ops::abs(self) } fn neg(self: FP64x64) -> FP64x64 { fp64x64::ops::neg(self) } fn min_value() -> FP64x64 { FP64x64 { mag: core_fp64x64::MAX, sign: true } } fn max_value() -> FP64x64 { FP64x64 { mag: core_fp64x64::MAX, sign: false } } fn min(self: FP64x64, other: FP64x64) -> FP64x64 { fp64x64::comp::min(self, other) } fn max(self: FP64x64, other: FP64x64) -> FP64x64 { fp64x64::comp::max(self, other) } fn mag(self: FP64x64) -> u128 { self.mag } fn is_neg(self: FP64x64) -> bool { self.sign } fn xor(lhs: FP64x64, rhs: FP64x64) -> bool { comp_fp64x64::xor(lhs, rhs) } fn or(lhs: FP64x64, rhs: FP64x64) -> bool { comp_fp64x64::or(lhs, rhs) } fn sign(self: FP64x64) -> FP64x64 { FP64x64Impl::sign(self) } fn and(lhs: FP64x64, rhs: FP64x64) -> bool { comp_fp64x64::and(lhs, rhs) } fn where(self: FP64x64, x: FP64x64, y: FP64x64) -> FP64x64 { comp_fp64x64::where(self, x, y) } fn NaN() -> FP64x64 { FP64x64Impl::NaN() } fn is_nan(self: FP64x64) -> bool { FP64x64Impl::is_nan(self) } fn INF() -> FP64x64 { FP64x64Impl::INF() } fn is_inf(self: FP64x64) -> bool { FP64x64Impl::is_inf(self) } fn is_pos_inf(self: FP64x64) -> bool { FP64x64Impl::is_pos_inf(self) } fn is_neg_inf(self: FP64x64) -> bool { FP64x64Impl::is_neg_inf(self) } fn bitwise_and(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { comp_fp64x64::bitwise_and(lhs, rhs) } fn bitwise_xor(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { comp_fp64x64::bitwise_xor(lhs, rhs) } fn bitwise_or(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { comp_fp64x64::bitwise_or(lhs, rhs) } fn add(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { FP64x64Add::add(lhs, rhs) } fn sub(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { FP64x64Sub::sub(lhs, rhs) } } use orion::numbers::fixed_point::implementations::fp32x32::core::{ FP32x32Impl, FP32x32, FP32x32Add, FP32x32Sub }; use orion::numbers::fixed_point::implementations::fp32x32::core as core_fp32x32; use orion::numbers::fixed_point::implementations::fp32x32::comp as comp_fp32x32; use cubit::f64 as fp32x32; impl FP32x32Number of NumberTrait<FP32x32, u64> { fn new(mag: u64, sign: bool) -> FP32x32 { FP32x32Impl::new(mag, sign) } fn new_unscaled(mag: u64, sign: bool) -> FP32x32 { FP32x32Impl::new_unscaled(mag, sign) } fn from_felt(val: felt252) -> FP32x32 { FP32x32Impl::from_felt(val) } fn ceil(self: FP32x32) -> FP32x32 { FP32x32Impl::ceil(self) } fn exp(self: FP32x32) -> FP32x32 { FP32x32Impl::exp(self) } fn exp2(self: FP32x32) -> FP32x32 { FP32x32Impl::exp2(self) } fn floor(self: FP32x32) -> FP32x32 { FP32x32Impl::floor(self) } fn ln(self: FP32x32) -> FP32x32 { FP32x32Impl::ln(self) } fn log2(self: FP32x32) -> FP32x32 { FP32x32Impl::log2(self) } fn log10(self: FP32x32) -> FP32x32 { FP32x32Impl::log10(self) } fn pow(self: FP32x32, b: FP32x32) -> FP32x32 { FP32x32Impl::pow(self, b) } fn round(self: FP32x32) -> FP32x32 { FP32x32Impl::round(self) } fn sqrt(self: FP32x32) -> FP32x32 { FP32x32Impl::sqrt(self) } fn acos(self: FP32x32) -> FP32x32 { FP32x32Impl::acos(self) } fn asin(self: FP32x32) -> FP32x32 { FP32x32Impl::asin(self) } fn atan(self: FP32x32) -> FP32x32 { FP32x32Impl::atan(self) } fn cos(self: FP32x32) -> FP32x32 { FP32x32Impl::cos(self) } fn sin(self: FP32x32) -> FP32x32 { FP32x32Impl::sin(self) } fn tan(self: FP32x32) -> FP32x32 { FP32x32Impl::tan(self) } fn acosh(self: FP32x32) -> FP32x32 { FP32x32Impl::acosh(self) } fn asinh(self: FP32x32) -> FP32x32 { FP32x32Impl::asinh(self) } fn atanh(self: FP32x32) -> FP32x32 { FP32x32Impl::atanh(self) } fn cosh(self: FP32x32) -> FP32x32 { FP32x32Impl::cosh(self) } fn sinh(self: FP32x32) -> FP32x32 { FP32x32Impl::sinh(self) } fn tanh(self: FP32x32) -> FP32x32 { FP32x32Impl::tanh(self) } fn zero() -> FP32x32 { FP32x32Impl::ZERO() } fn is_zero(self: FP32x32) -> bool { fp32x32::ops::eq(@self, @FP32x32Impl::ZERO()) } fn half() -> FP32x32 { FP32x32Impl::HALF() } fn one() -> FP32x32 { FP32x32Impl::ONE() } fn neg_one() -> FP32x32 { FP32x32 { mag: core_fp32x32::ONE, sign: true } } fn is_one(self: FP32x32) -> bool { core_fp32x32::eq(@self, @FP32x32Impl::ONE()) } fn abs(self: FP32x32) -> FP32x32 { fp32x32::ops::abs(self) } fn neg(self: FP32x32) -> FP32x32 { fp32x32::ops::neg(self) } fn min_value() -> FP32x32 { FP32x32 { mag: core_fp32x32::MAX, sign: true } } fn max_value() -> FP32x32 { FP32x32 { mag: core_fp32x32::MAX, sign: false } } fn min(self: FP32x32, other: FP32x32) -> FP32x32 { fp32x32::comp::min(self, other) } fn max(self: FP32x32, other: FP32x32) -> FP32x32 { fp32x32::comp::max(self, other) } fn mag(self: FP32x32) -> u64 { self.mag } fn is_neg(self: FP32x32) -> bool { self.sign } fn xor(lhs: FP32x32, rhs: FP32x32) -> bool { comp_fp32x32::xor(lhs, rhs) } fn or(lhs: FP32x32, rhs: FP32x32) -> bool { comp_fp32x32::or(lhs, rhs) } fn sign(self: FP32x32) -> FP32x32 { FP32x32Impl::sign(self) } fn and(lhs: FP32x32, rhs: FP32x32) -> bool { comp_fp32x32::and(lhs, rhs) } fn where(self: FP32x32, x: FP32x32, y: FP32x32) -> FP32x32 { comp_fp32x32::where(self, x, y) } fn NaN() -> FP32x32 { FP32x32Impl::NaN() } fn is_nan(self: FP32x32) -> bool { FP32x32Impl::is_nan(self) } fn INF() -> FP32x32 { FP32x32Impl::INF() } fn is_inf(self: FP32x32) -> bool { FP32x32Impl::is_inf(self) } fn is_pos_inf(self: FP32x32) -> bool { FP32x32Impl::is_pos_inf(self) } fn is_neg_inf(self: FP32x32) -> bool { FP32x32Impl::is_neg_inf(self) } fn bitwise_and(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { comp_fp32x32::bitwise_and(lhs, rhs) } fn bitwise_xor(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { comp_fp32x32::bitwise_xor(lhs, rhs) } fn bitwise_or(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { comp_fp32x32::bitwise_or(lhs, rhs) } fn add(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { FP32x32Add::add(lhs, rhs) } fn sub(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { FP32x32Sub::sub(lhs, rhs) } } impl I8Number of NumberTrait<i8, i8> { fn new(mag: i8, sign: bool) -> i8 { if sign { return -mag; } mag } fn new_unscaled(mag: i8, sign: bool) -> i8 { mag } fn from_felt(val: felt252) -> i8 { panic(array!['not supported!']) } fn ceil(self: i8) -> i8 { panic(array!['not supported!']) } fn exp(self: i8) -> i8 { panic(array!['not supported!']) } fn exp2(self: i8) -> i8 { panic(array!['not supported!']) } fn floor(self: i8) -> i8 { panic(array!['not supported!']) } fn ln(self: i8) -> i8 { panic(array!['not supported!']) } fn log2(self: i8) -> i8 { panic(array!['not supported!']) } fn log10(self: i8) -> i8 { panic(array!['not supported!']) } fn pow(self: i8, b: i8) -> i8 { panic(array!['not supported!']) } fn round(self: i8) -> i8 { panic(array!['not supported!']) } fn sqrt(self: i8) -> i8 { panic(array!['not supported!']) } fn acos(self: i8) -> i8 { panic(array!['not supported!']) } fn asin(self: i8) -> i8 { panic(array!['not supported!']) } fn atan(self: i8) -> i8 { panic(array!['not supported!']) } fn cos(self: i8) -> i8 { panic(array!['not supported!']) } fn sin(self: i8) -> i8 { panic(array!['not supported!']) } fn tan(self: i8) -> i8 { panic(array!['not supported!']) } fn acosh(self: i8) -> i8 { panic(array!['not supported!']) } fn asinh(self: i8) -> i8 { panic(array!['not supported!']) } fn atanh(self: i8) -> i8 { panic(array!['not supported!']) } fn cosh(self: i8) -> i8 { panic(array!['not supported!']) } fn sinh(self: i8) -> i8 { panic(array!['not supported!']) } fn tanh(self: i8) -> i8 { panic(array!['not supported!']) } fn zero() -> i8 { 0 } fn is_zero(self: i8) -> bool { self == 0 } fn half() -> i8 { panic(array!['not supported!']) } fn one() -> i8 { 1 } fn neg_one() -> i8 { -1 } fn is_one(self: i8) -> bool { self == 1 } fn abs(self: i8) -> i8 { if self >= 0 { self } else { self * -1_i8 } } fn neg(self: i8) -> i8 { self * -1_i8 } fn min_value() -> i8 { -127 } fn max_value() -> i8 { 127 } fn min(self: i8, other: i8) -> i8 { if self < other { self } else { other } } fn max(self: i8, other: i8) -> i8 { if self > other { self } else { other } } fn mag(self: i8) -> i8 { self } fn is_neg(self: i8) -> bool { self < 0 } fn xor(lhs: i8, rhs: i8) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { true } else { false } } fn or(lhs: i8, rhs: i8) -> bool { if lhs == 0 && rhs == 0 { false } else { true } } fn sign(self: i8) -> i8 { if self == 0 { 0_i8 } else if self > 0 { 1_i8 } else { -1_i8 } } fn and(lhs: i8, rhs: i8) -> bool { if lhs == 0 || rhs == 0 { false } else { true } } fn where(self: i8, x: i8, y: i8) -> i8 { if self == 0 { y } else { x } } fn NaN() -> i8 { panic(array!['not supported!']) } fn is_nan(self: i8) -> bool { panic(array!['not supported!']) } fn INF() -> i8 { 127 } fn is_inf(self: i8) -> bool { self == 127 || self == -127 } fn is_pos_inf(self: i8) -> bool { self == 127 } fn is_neg_inf(self: i8) -> bool { self == -127 } fn bitwise_and(lhs: i8, rhs: i8) -> i8 { panic(array!['not supported!']) } fn bitwise_xor(lhs: i8, rhs: i8) -> i8 { panic(array!['not supported!']) } fn bitwise_or(lhs: i8, rhs: i8) -> i8 { panic(array!['not supported!']) } fn add(lhs: i8, rhs: i8) -> i8 { lhs + rhs } fn sub(lhs: i8, rhs: i8) -> i8 { lhs - rhs } } impl I8Div of Div<i8> { fn div(lhs: i8, rhs: i8) -> i8 { assert(rhs != 0, 'divisor cannot be 0'); let mut lhs_positive = lhs; let mut rhs_positive = rhs; // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; } if rhs < 0 { rhs_positive = rhs * -1; } //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i8 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { signed_int_result * -1 } else { signed_int_result } } } impl I8DivEq of DivEq<i8> { #[inline(always)] fn div_eq(ref self: i8, other: i8) { self = Div::div(self, other); } } impl I8IntoFP8x23 of Into<i8, FP8x23> { fn into(self: i8) -> FP8x23 { let number_sign: bool = self < 0; let mut self_positive: i8 = self; if number_sign { self_positive = self_positive * -1_i8 } let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); FP8x23 { mag: number_u32 * ONE_fp8x23, sign: number_sign } } } impl I8IntoFP16x16 of Into<i8, FP16x16> { fn into(self: i8) -> FP16x16 { let number_sign: bool = self < 0; let mut self_positive: i8 = self; if number_sign { self_positive = self_positive * -1_i8 } let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); FP16x16 { mag: number_u32 * ONE_fp16x16, sign: number_sign } } } impl I8IntoFP64x64 of Into<i8, FP64x64> { fn into(self: i8) -> FP64x64 { let number_sign: bool = self < 0; let mut self_positive: i8 = self; if number_sign { self_positive = self_positive * -1_i8 } let number_felt: felt252 = self_positive.into(); let number_u128: u128 = number_felt.try_into().unwrap(); FP64x64 { mag: number_u128 * ONE_fp64x64, sign: number_sign } } } impl I8IntoFP32x32 of Into<i8, FP32x32> { fn into(self: i8) -> FP32x32 { let number_sign: bool = self < 0; let mut self_positive: i8 = self; if number_sign { self_positive = self_positive * -1_i8 } let number_felt: felt252 = self_positive.into(); let number_u128: u64 = number_felt.try_into().unwrap(); FP32x32 { mag: number_u128 * ONE_fp32x32, sign: number_sign } } } impl I16Number of NumberTrait<i16, i16> { fn new(mag: i16, sign: bool) -> i16 { if sign { return -mag; } mag } fn new_unscaled(mag: i16, sign: bool) -> i16 { mag } fn from_felt(val: felt252) -> i16 { panic(array!['not supported!']) } fn ceil(self: i16) -> i16 { panic(array!['not supported!']) } fn exp(self: i16) -> i16 { panic(array!['not supported!']) } fn exp2(self: i16) -> i16 { panic(array!['not supported!']) } fn floor(self: i16) -> i16 { panic(array!['not supported!']) } fn ln(self: i16) -> i16 { panic(array!['not supported!']) } fn log2(self: i16) -> i16 { panic(array!['not supported!']) } fn log10(self: i16) -> i16 { panic(array!['not supported!']) } fn pow(self: i16, b: i16) -> i16 { panic(array!['not supported!']) } fn round(self: i16) -> i16 { panic(array!['not supported!']) } fn sqrt(self: i16) -> i16 { panic(array!['not supported!']) } fn acos(self: i16) -> i16 { panic(array!['not supported!']) } fn asin(self: i16) -> i16 { panic(array!['not supported!']) } fn atan(self: i16) -> i16 { panic(array!['not supported!']) } fn cos(self: i16) -> i16 { panic(array!['not supported!']) } fn sin(self: i16) -> i16 { panic(array!['not supported!']) } fn tan(self: i16) -> i16 { panic(array!['not supported!']) } fn acosh(self: i16) -> i16 { panic(array!['not supported!']) } fn asinh(self: i16) -> i16 { panic(array!['not supported!']) } fn atanh(self: i16) -> i16 { panic(array!['not supported!']) } fn cosh(self: i16) -> i16 { panic(array!['not supported!']) } fn sinh(self: i16) -> i16 { panic(array!['not supported!']) } fn tanh(self: i16) -> i16 { panic(array!['not supported!']) } fn zero() -> i16 { 0 } fn is_zero(self: i16) -> bool { self == 0 } fn half() -> i16 { panic(array!['not supported!']) } fn one() -> i16 { 1 } fn neg_one() -> i16 { -1 } fn is_one(self: i16) -> bool { self == 1 } fn abs(self: i16) -> i16 { if self >= 0 { self } else { self * -1_i16 } } fn neg(self: i16) -> i16 { self * -1_i16 } fn min_value() -> i16 { -32767 } fn max_value() -> i16 { 32767 } fn min(self: i16, other: i16) -> i16 { if self < other { self } else { other } } fn max(self: i16, other: i16) -> i16 { if self > other { self } else { other } } fn mag(self: i16) -> i16 { self } fn is_neg(self: i16) -> bool { self < 0 } fn xor(lhs: i16, rhs: i16) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { true } else { false } } fn or(lhs: i16, rhs: i16) -> bool { if lhs == 0 && rhs == 0 { false } else { true } } fn sign(self: i16) -> i16 { if self == 0 { 0_i16 } else if self > 0 { 1_i16 } else { -1_i16 } } fn and(lhs: i16, rhs: i16) -> bool { if lhs == 0 || rhs == 0 { false } else { true } } fn where(self: i16, x: i16, y: i16) -> i16 { if self == 0 { y } else { x } } fn NaN() -> i16 { panic(array!['not supported!']) } fn is_nan(self: i16) -> bool { panic(array!['not supported!']) } fn INF() -> i16 { 32767 } fn is_inf(self: i16) -> bool { self == 32767 || self == -32767 } fn is_pos_inf(self: i16) -> bool { self == 32767 } fn is_neg_inf(self: i16) -> bool { self == -32767 } fn bitwise_and(lhs: i16, rhs: i16) -> i16 { panic(array!['not supported!']) } fn bitwise_xor(lhs: i16, rhs: i16) -> i16 { panic(array!['not supported!']) } fn bitwise_or(lhs: i16, rhs: i16) -> i16 { panic(array!['not supported!']) } fn add(lhs: i16, rhs: i16) -> i16 { lhs + rhs } fn sub(lhs: i16, rhs: i16) -> i16 { lhs - rhs } } impl I16Div of Div<i16> { fn div(lhs: i16, rhs: i16) -> i16 { assert(rhs != 0, 'divisor cannot be 0'); let mut lhs_positive = lhs; let mut rhs_positive = rhs; // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; } if rhs < 0 { rhs_positive = rhs * -1; } //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i16 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { signed_int_result * -1 } else { signed_int_result } } } impl I16DivEq of DivEq<i16> { #[inline(always)] fn div_eq(ref self: i16, other: i16) { self = Div::div(self, other); } } impl I32Number of NumberTrait<i32, i32> { fn new(mag: i32, sign: bool) -> i32 { if sign { return -mag; } mag } fn new_unscaled(mag: i32, sign: bool) -> i32 { mag } fn from_felt(val: felt252) -> i32 { panic(array!['not supported!']) } fn ceil(self: i32) -> i32 { panic(array!['not supported!']) } fn exp(self: i32) -> i32 { panic(array!['not supported!']) } fn exp2(self: i32) -> i32 { panic(array!['not supported!']) } fn floor(self: i32) -> i32 { panic(array!['not supported!']) } fn ln(self: i32) -> i32 { panic(array!['not supported!']) } fn log2(self: i32) -> i32 { panic(array!['not supported!']) } fn log10(self: i32) -> i32 { panic(array!['not supported!']) } fn pow(self: i32, b: i32) -> i32 { panic(array!['not supported!']) } fn round(self: i32) -> i32 { panic(array!['not supported!']) } fn sqrt(self: i32) -> i32 { panic(array!['not supported!']) } fn acos(self: i32) -> i32 { panic(array!['not supported!']) } fn asin(self: i32) -> i32 { panic(array!['not supported!']) } fn atan(self: i32) -> i32 { panic(array!['not supported!']) } fn cos(self: i32) -> i32 { panic(array!['not supported!']) } fn sin(self: i32) -> i32 { panic(array!['not supported!']) } fn tan(self: i32) -> i32 { panic(array!['not supported!']) } fn acosh(self: i32) -> i32 { panic(array!['not supported!']) } fn asinh(self: i32) -> i32 { panic(array!['not supported!']) } fn atanh(self: i32) -> i32 { panic(array!['not supported!']) } fn cosh(self: i32) -> i32 { panic(array!['not supported!']) } fn sinh(self: i32) -> i32 { panic(array!['not supported!']) } fn tanh(self: i32) -> i32 { panic(array!['not supported!']) } fn zero() -> i32 { 0 } fn is_zero(self: i32) -> bool { self == 0 } fn half() -> i32 { panic(array!['not supported!']) } fn one() -> i32 { 1 } fn neg_one() -> i32 { -1 } fn is_one(self: i32) -> bool { self == 1 } fn abs(self: i32) -> i32 { if self >= 0 { self } else { self * -1_i32 } } fn neg(self: i32) -> i32 { self * -1_i32 } fn min_value() -> i32 { -2147483647 } fn max_value() -> i32 { 2147483647 } fn min(self: i32, other: i32) -> i32 { if self < other { self } else { other } } fn max(self: i32, other: i32) -> i32 { if self > other { self } else { other } } fn mag(self: i32) -> i32 { self } fn is_neg(self: i32) -> bool { self < 0 } fn xor(lhs: i32, rhs: i32) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { true } else { false } } fn or(lhs: i32, rhs: i32) -> bool { if lhs == 0 && rhs == 0 { false } else { true } } fn sign(self: i32) -> i32 { if self == 0 { 0_i32 } else if self > 0 { 1_i32 } else { -1_i32 } } fn and(lhs: i32, rhs: i32) -> bool { if lhs == 0 || rhs == 0 { false } else { true } } fn where(self: i32, x: i32, y: i32) -> i32 { if self == 0 { y } else { x } } fn NaN() -> i32 { panic(array!['not supported!']) } fn is_nan(self: i32) -> bool { panic(array!['not supported!']) } fn INF() -> i32 { 2147483647 } fn is_inf(self: i32) -> bool { self == 2147483647 || self == -2147483647 } fn is_pos_inf(self: i32) -> bool { self == 2147483647 } fn is_neg_inf(self: i32) -> bool { self == -2147483647 } fn bitwise_and(lhs: i32, rhs: i32) -> i32 { panic(array!['not supported!']) } fn bitwise_xor(lhs: i32, rhs: i32) -> i32 { panic(array!['not supported!']) } fn bitwise_or(lhs: i32, rhs: i32) -> i32 { panic(array!['not supported!']) } fn add(lhs: i32, rhs: i32) -> i32 { lhs + rhs } fn sub(lhs: i32, rhs: i32) -> i32 { lhs - rhs } } impl I32Div of Div<i32> { fn div(lhs: i32, rhs: i32) -> i32 { assert(rhs != 0, 'divisor cannot be 0'); let mut lhs_positive = lhs; let mut rhs_positive = rhs; // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; } if rhs < 0 { rhs_positive = rhs * -1; } //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i32 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { signed_int_result * -1 } else { signed_int_result } } } impl I32DivEq of DivEq<i32> { #[inline(always)] fn div_eq(ref self: i32, other: i32) { self = Div::div(self, other); } } impl I32IntoU32 of Into<i32, u32> { fn into(self: i32) -> u32 { let number_sign: bool = self < 0; let mut self_positive: i32 = self; if number_sign { self_positive = self_positive * -1_i32 } let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); number_u32 } } impl I64Number of NumberTrait<i64, i64> { fn new(mag: i64, sign: bool) -> i64 { if sign { return -mag; } mag } fn new_unscaled(mag: i64, sign: bool) -> i64 { mag } fn from_felt(val: felt252) -> i64 { panic(array!['not supported!']) } fn ceil(self: i64) -> i64 { panic(array!['not supported!']) } fn exp(self: i64) -> i64 { panic(array!['not supported!']) } fn exp2(self: i64) -> i64 { panic(array!['not supported!']) } fn floor(self: i64) -> i64 { panic(array!['not supported!']) } fn ln(self: i64) -> i64 { panic(array!['not supported!']) } fn log2(self: i64) -> i64 { panic(array!['not supported!']) } fn log10(self: i64) -> i64 { panic(array!['not supported!']) } fn pow(self: i64, b: i64) -> i64 { panic(array!['not supported!']) } fn round(self: i64) -> i64 { panic(array!['not supported!']) } fn sqrt(self: i64) -> i64 { panic(array!['not supported!']) } fn acos(self: i64) -> i64 { panic(array!['not supported!']) } fn asin(self: i64) -> i64 { panic(array!['not supported!']) } fn atan(self: i64) -> i64 { panic(array!['not supported!']) } fn cos(self: i64) -> i64 { panic(array!['not supported!']) } fn sin(self: i64) -> i64 { panic(array!['not supported!']) } fn tan(self: i64) -> i64 { panic(array!['not supported!']) } fn acosh(self: i64) -> i64 { panic(array!['not supported!']) } fn asinh(self: i64) -> i64 { panic(array!['not supported!']) } fn atanh(self: i64) -> i64 { panic(array!['not supported!']) } fn cosh(self: i64) -> i64 { panic(array!['not supported!']) } fn sinh(self: i64) -> i64 { panic(array!['not supported!']) } fn tanh(self: i64) -> i64 { panic(array!['not supported!']) } fn zero() -> i64 { 0 } fn is_zero(self: i64) -> bool { self == 0 } fn half() -> i64 { panic(array!['not supported!']) } fn one() -> i64 { 1 } fn neg_one() -> i64 { -1 } fn is_one(self: i64) -> bool { self == 1 } fn abs(self: i64) -> i64 { if self >= 0 { self } else { self * -1_i64 } } fn neg(self: i64) -> i64 { self * -1_i64 } fn min_value() -> i64 { -9223372036854775807 } fn max_value() -> i64 { 9223372036854775807 } fn min(self: i64, other: i64) -> i64 { if self < other { self } else { other } } fn max(self: i64, other: i64) -> i64 { if self > other { self } else { other } } fn mag(self: i64) -> i64 { self } fn is_neg(self: i64) -> bool { self < 0 } fn xor(lhs: i64, rhs: i64) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { true } else { false } } fn or(lhs: i64, rhs: i64) -> bool { if lhs == 0 && rhs == 0 { false } else { true } } fn sign(self: i64) -> i64 { if self == 0 { 0_i64 } else if self > 0 { 1_i64 } else { -1_i64 } } fn and(lhs: i64, rhs: i64) -> bool { if lhs == 0 || rhs == 0 { false } else { true } } fn where(self: i64, x: i64, y: i64) -> i64 { if self == 0 { y } else { x } } fn NaN() -> i64 { panic(array!['not supported!']) } fn is_nan(self: i64) -> bool { panic(array!['not supported!']) } fn INF() -> i64 { 9223372036854775807 } fn is_inf(self: i64) -> bool { self == 9223372036854775807 || self == -9223372036854775807 } fn is_pos_inf(self: i64) -> bool { self == 9223372036854775807 } fn is_neg_inf(self: i64) -> bool { self == -9223372036854775807 } fn bitwise_and(lhs: i64, rhs: i64) -> i64 { panic(array!['not supported!']) } fn bitwise_xor(lhs: i64, rhs: i64) -> i64 { panic(array!['not supported!']) } fn bitwise_or(lhs: i64, rhs: i64) -> i64 { panic(array!['not supported!']) } fn add(lhs: i64, rhs: i64) -> i64 { lhs + rhs } fn sub(lhs: i64, rhs: i64) -> i64 { lhs - rhs } } impl I64Div of Div<i64> { fn div(lhs: i64, rhs: i64) -> i64 { assert(rhs != 0, 'divisor cannot be 0'); let mut lhs_positive = lhs; let mut rhs_positive = rhs; // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; } if rhs < 0 { rhs_positive = rhs * -1; } //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i64 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { signed_int_result * -1 } else { signed_int_result } } } impl I64DivEq of DivEq<i64> { #[inline(always)] fn div_eq(ref self: i64, other: i64) { self = Div::div(self, other); } } impl I128Number of NumberTrait<i128, i128> { fn new(mag: i128, sign: bool) -> i128 { if sign { return -mag; } mag } fn new_unscaled(mag: i128, sign: bool) -> i128 { mag } fn from_felt(val: felt252) -> i128 { panic(array!['not supported!']) } fn ceil(self: i128) -> i128 { panic(array!['not supported!']) } fn exp(self: i128) -> i128 { panic(array!['not supported!']) } fn exp2(self: i128) -> i128 { panic(array!['not supported!']) } fn floor(self: i128) -> i128 { panic(array!['not supported!']) } fn ln(self: i128) -> i128 { panic(array!['not supported!']) } fn log2(self: i128) -> i128 { panic(array!['not supported!']) } fn log10(self: i128) -> i128 { panic(array!['not supported!']) } fn pow(self: i128, b: i128) -> i128 { panic(array!['not supported!']) } fn round(self: i128) -> i128 { panic(array!['not supported!']) } fn sqrt(self: i128) -> i128 { panic(array!['not supported!']) } fn acos(self: i128) -> i128 { panic(array!['not supported!']) } fn asin(self: i128) -> i128 { panic(array!['not supported!']) } fn atan(self: i128) -> i128 { panic(array!['not supported!']) } fn cos(self: i128) -> i128 { panic(array!['not supported!']) } fn sin(self: i128) -> i128 { panic(array!['not supported!']) } fn tan(self: i128) -> i128 { panic(array!['not supported!']) } fn acosh(self: i128) -> i128 { panic(array!['not supported!']) } fn asinh(self: i128) -> i128 { panic(array!['not supported!']) } fn atanh(self: i128) -> i128 { panic(array!['not supported!']) } fn cosh(self: i128) -> i128 { panic(array!['not supported!']) } fn sinh(self: i128) -> i128 { panic(array!['not supported!']) } fn tanh(self: i128) -> i128 { panic(array!['not supported!']) } fn zero() -> i128 { 0 } fn is_zero(self: i128) -> bool { self == 0 } fn half() -> i128 { panic(array!['not supported!']) } fn one() -> i128 { 1 } fn neg_one() -> i128 { -1 } fn is_one(self: i128) -> bool { self == 1 } fn abs(self: i128) -> i128 { if self >= 0 { self } else { self * -1_i128 } } fn neg(self: i128) -> i128 { self * -1_i128 } fn min_value() -> i128 { -170141183460469231731687303715884105727 } fn max_value() -> i128 { 170141183460469231731687303715884105727 } fn min(self: i128, other: i128) -> i128 { if self < other { self } else { other } } fn max(self: i128, other: i128) -> i128 { if self > other { self } else { other } } fn mag(self: i128) -> i128 { self } fn is_neg(self: i128) -> bool { self < 0 } fn xor(lhs: i128, rhs: i128) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { true } else { false } } fn or(lhs: i128, rhs: i128) -> bool { if lhs == 0 && rhs == 0 { false } else { true } } fn sign(self: i128) -> i128 { if self == 0 { 0_i128 } else if self > 0 { 1_i128 } else { -1_i128 } } fn and(lhs: i128, rhs: i128) -> bool { if lhs == 0 || rhs == 0 { false } else { true } } fn where(self: i128, x: i128, y: i128) -> i128 { if self == 0 { y } else { x } } fn NaN() -> i128 { panic(array!['not supported!']) } fn is_nan(self: i128) -> bool { panic(array!['not supported!']) } fn INF() -> i128 { 170141183460469231731687303715884105727 } fn is_inf(self: i128) -> bool { self == 170141183460469231731687303715884105727 || self == -170141183460469231731687303715884105727 } fn is_pos_inf(self: i128) -> bool { self == 170141183460469231731687303715884105727 } fn is_neg_inf(self: i128) -> bool { self == -170141183460469231731687303715884105727 } fn bitwise_and(lhs: i128, rhs: i128) -> i128 { panic(array!['not supported!']) } fn bitwise_xor(lhs: i128, rhs: i128) -> i128 { panic(array!['not supported!']) } fn bitwise_or(lhs: i128, rhs: i128) -> i128 { panic(array!['not supported!']) } fn add(lhs: i128, rhs: i128) -> i128 { lhs + rhs } fn sub(lhs: i128, rhs: i128) -> i128 { lhs - rhs } } impl I128Div of Div<i128> { fn div(lhs: i128, rhs: i128) -> i128 { assert(rhs != 0, 'divisor cannot be 0'); let mut lhs_positive = lhs; let mut rhs_positive = rhs; // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; } if rhs < 0 { rhs_positive = rhs * -1; } //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i128 = felt_result.try_into().unwrap(); // assigning the sign and returning if lhs * rhs < 0 { signed_int_result * -1 } else { signed_int_result } } } impl I128DivEq of DivEq<i128> { #[inline(always)] fn div_eq(ref self: i128, other: i128) { self = Div::div(self, other); } } impl u32Number of NumberTrait<u32, u32> { fn new(mag: u32, sign: bool) -> u32 { mag } fn new_unscaled(mag: u32, sign: bool) -> u32 { mag } fn from_felt(val: felt252) -> u32 { panic(array!['not supported!']) } fn ceil(self: u32) -> u32 { panic(array!['not supported!']) } fn exp(self: u32) -> u32 { panic(array!['not supported!']) } fn exp2(self: u32) -> u32 { panic(array!['not supported!']) } fn floor(self: u32) -> u32 { panic(array!['not supported!']) } fn ln(self: u32) -> u32 { panic(array!['not supported!']) } fn log2(self: u32) -> u32 { panic(array!['not supported!']) } fn log10(self: u32) -> u32 { panic(array!['not supported!']) } fn pow(self: u32, b: u32) -> u32 { panic(array!['not supported!']) } fn round(self: u32) -> u32 { panic(array!['not supported!']) } fn sqrt(self: u32) -> u32 { panic(array!['not supported!']) } fn acos(self: u32) -> u32 { panic(array!['not supported!']) } fn asin(self: u32) -> u32 { panic(array!['not supported!']) } fn atan(self: u32) -> u32 { panic(array!['not supported!']) } fn cos(self: u32) -> u32 { panic(array!['not supported!']) } fn sin(self: u32) -> u32 { panic(array!['not supported!']) } fn tan(self: u32) -> u32 { panic(array!['not supported!']) } fn acosh(self: u32) -> u32 { panic(array!['not supported!']) } fn asinh(self: u32) -> u32 { panic(array!['not supported!']) } fn atanh(self: u32) -> u32 { panic(array!['not supported!']) } fn cosh(self: u32) -> u32 { panic(array!['not supported!']) } fn sinh(self: u32) -> u32 { panic(array!['not supported!']) } fn tanh(self: u32) -> u32 { panic(array!['not supported!']) } fn zero() -> u32 { 0 } fn is_zero(self: u32) -> bool { self == 0 } fn half() -> u32 { panic(array!['not supported!']) } fn one() -> u32 { 1 } fn neg_one() -> u32 { panic(array!['not supported']) } fn is_one(self: u32) -> bool { self == 1 } fn abs(self: u32) -> u32 { self } fn neg(self: u32) -> u32 { panic(array!['not supported']) } fn min_value() -> u32 { 0 } fn max_value() -> u32 { 4294967295 } fn min(self: u32, other: u32) -> u32 { if self < other { self } else { other } } fn max(self: u32, other: u32) -> u32 { if self > other { self } else { other } } fn mag(self: u32) -> u32 { self } fn is_neg(self: u32) -> bool { false } fn xor(lhs: u32, rhs: u32) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { true } else { false } } fn or(lhs: u32, rhs: u32) -> bool { if lhs == 0 && rhs == 0 { false } else { true } } fn sign(self: u32) -> u32 { panic(array!['not supported!']) } fn and(lhs: u32, rhs: u32) -> bool { if lhs == 0 || rhs == 0 { false } else { true } } fn where(self: u32, x: u32, y: u32) -> u32 { if self == 0 { y } else { x } } fn NaN() -> u32 { 4242424242 } fn is_nan(self: u32) -> bool { self == 4242424242 } fn INF() -> u32 { 4294967295 } fn is_inf(self: u32) -> bool { self == 4294967295 } fn is_pos_inf(self: u32) -> bool { self == 4294967295 } fn is_neg_inf(self: u32) -> bool { panic(array!['not supported!']) } fn bitwise_and(lhs: u32, rhs: u32) -> u32 { lhs & rhs } fn bitwise_xor(lhs: u32, rhs: u32) -> u32 { lhs ^ rhs } fn bitwise_or(lhs: u32, rhs: u32) -> u32 { lhs | rhs } fn add(lhs: u32, rhs: u32) -> u32 { lhs + rhs } fn sub(lhs: u32, rhs: u32) -> u32 { lhs - rhs } } use orion::numbers::complex_number::complex_trait::ComplexTrait; use orion::numbers::complex_number::complex64::{ Complex64Impl, complex64, Complex64Add, Complex64Sub }; impl Complex64Number of NumberTrait<complex64, FP64x64> { fn new(mag: FP64x64, sign: bool) -> complex64 { panic(array!['not supported!']) } fn new_unscaled(mag: FP64x64, sign: bool) -> complex64 { panic(array!['not supported!']) } fn from_felt(val: felt252) -> complex64 { panic(array!['not supported!']) } fn ceil(self: complex64) -> complex64 { panic(array!['not supported!']) } fn exp(self: complex64) -> complex64 { Complex64Impl::exp(self) } fn exp2(self: complex64) -> complex64 { Complex64Impl::exp2(self) } fn floor(self: complex64) -> complex64 { panic(array!['not supported!']) } fn ln(self: complex64) -> complex64 { Complex64Impl::ln(self) } fn log2(self: complex64) -> complex64 { Complex64Impl::log2(self) } fn log10(self: complex64) -> complex64 { Complex64Impl::log10(self) } fn pow(self: complex64, b: complex64) -> complex64 { Complex64Impl::pow(self, b) } fn round(self: complex64) -> complex64 { panic(array!['not supported!']) } fn sqrt(self: complex64) -> complex64 { Complex64Impl::sqrt(self) } fn acos(self: complex64) -> complex64 { Complex64Impl::acos(self) } fn asin(self: complex64) -> complex64 { Complex64Impl::asin(self) } fn atan(self: complex64) -> complex64 { Complex64Impl::atan(self) } fn cos(self: complex64) -> complex64 { Complex64Impl::cos(self) } fn sin(self: complex64) -> complex64 { Complex64Impl::sin(self) } fn tan(self: complex64) -> complex64 { Complex64Impl::tan(self) } fn acosh(self: complex64) -> complex64 { Complex64Impl::acosh(self) } fn asinh(self: complex64) -> complex64 { Complex64Impl::asinh(self) } fn atanh(self: complex64) -> complex64 { Complex64Impl::atanh(self) } fn cosh(self: complex64) -> complex64 { Complex64Impl::cosh(self) } fn sinh(self: complex64) -> complex64 { Complex64Impl::sinh(self) } fn tanh(self: complex64) -> complex64 { Complex64Impl::tanh(self) } fn zero() -> complex64 { Complex64Impl::zero() } fn is_zero(self: complex64) -> bool { if self == Complex64Impl::zero() { return true; } false } fn half() -> complex64 { panic(array!['not supported!']) } fn one() -> complex64 { Complex64Impl::one() } fn neg_one() -> complex64 { Complex64Impl::new(FP64x64 { mag: core_fp64x64::ONE, sign: true }, FP64x64Impl::ZERO()) } fn is_one(self: complex64) -> bool { if self == Complex64Impl::one() { return true; } false } fn abs(self: complex64) -> complex64 { Complex64Impl::new(Complex64Impl::mag(self), FP64x64Impl::ZERO()) } fn neg(self: complex64) -> complex64 { panic(array!['not supported!']) } fn min_value() -> complex64 { panic(array!['not supported!']) } fn max_value() -> complex64 { panic(array!['not supported!']) } fn min(self: complex64, other: complex64) -> complex64 { panic(array!['not supported!']) } fn max(self: complex64, other: complex64) -> complex64 { panic(array!['not supported!']) } fn mag(self: complex64) -> FP64x64 { Complex64Impl::mag(self) } fn is_neg(self: complex64) -> bool { panic(array!['not supported!']) } fn xor(lhs: complex64, rhs: complex64) -> bool { panic(array!['not supported!']) } fn or(lhs: complex64, rhs: complex64) -> bool { panic(array!['not supported!']) } fn sign(self: complex64) -> complex64 { panic(array!['not supported!']) } fn and(lhs: complex64, rhs: complex64) -> bool { panic(array!['not supported!']) } fn where(self: complex64, x: complex64, y: complex64) -> complex64 { panic(array!['not supported!']) } fn NaN() -> complex64 { panic(array!['not supported!']) } fn is_nan(self: complex64) -> bool { panic(array!['not supported!']) } fn INF() -> complex64 { panic(array!['not supported!']) } fn is_inf(self: complex64) -> bool { panic(array!['not supported!']) } fn is_pos_inf(self: complex64) -> bool { panic(array!['not supported!']) } fn is_neg_inf(self: complex64) -> bool { panic(array!['not supported!']) } fn bitwise_and(lhs: complex64, rhs: complex64) -> complex64 { panic(array!['not supported!']) } fn bitwise_xor(lhs: complex64, rhs: complex64) -> complex64 { panic(array!['not supported!']) } fn bitwise_or(lhs: complex64, rhs: complex64) -> complex64 { panic(array!['not supported!']) } fn add(lhs: complex64, rhs: complex64) -> complex64 { Complex64Add::add(lhs, rhs) } fn sub(lhs: complex64, rhs: complex64) -> complex64 { Complex64Sub::sub(lhs, rhs) } } impl U32IntoI32 of Into<u32, i32> { fn into(self: u32) -> i32 { let number_felt: felt252 = self.into(); let number_i32: i32 = number_felt.try_into().unwrap(); number_i32 } }
https://github.com/gizatechxyz/orion
src/numbers/complex_number.cairo
mod complex_trait; mod complex64;
https://github.com/gizatechxyz/orion
src/numbers/complex_number/complex64.cairo
use core::debug::PrintTrait; use orion::numbers::complex_number::complex_trait::ComplexTrait; use orion::numbers::{FP64x64, FP64x64Impl, FP32x32, FP32x32Impl, FixedTrait}; // ====================== Complex 64 ====================== // complex64 represents a complex number in the Cartesian form z = a + bi where a and b are Fixed Points FP64x64. // The real field holds the value of the real part. // The img field holds the value of the imaginary part. #[derive(Serde, Copy, Drop)] struct complex64 { real: FP64x64, img: FP64x64, } // CONSTANTS for FP64x64 const PI: u128 = 57952155664616982739; const HALF_PI: u128 = 28976077832308491370; const TWO: u128 = 36893488147419103232; const E: u128 = 50143449208471493718; const HALF: u128 = 9223372036854775808; impl Complex64Impl of ComplexTrait<complex64, FP64x64> { fn new(real: FP64x64, img: FP64x64) -> complex64 { complex64 { real, img } } fn real(self: complex64) -> FP64x64 { self.real } fn img(self: complex64) -> FP64x64 { self.img } fn conjugate(self: complex64) -> complex64 { ComplexTrait::new(self.real, -self.img) } fn zero() -> complex64 { complex64 { real: FixedTrait::ZERO(), img: FP64x64Impl::ZERO() } } fn one() -> complex64 { complex64 { real: FP64x64Impl::ONE(), img: FP64x64Impl::ZERO() } } fn mag(self: complex64) -> FP64x64 { let two = FP64x64Impl::new(TWO, false); (self.real.pow(two) + self.img.pow(two)).sqrt() } fn arg(self: complex64) -> FP64x64 { atan2(self.real, self.img) } fn exp(self: complex64) -> complex64 { let real = self.real.exp() * self.img.cos(); let img = self.real.exp() * self.img.sin(); complex64 { real, img } } fn exp2(self: complex64) -> complex64 { let two = complex64 { real: FP64x64Impl::new(TWO, false), img: FP64x64Impl::ZERO() }; two.pow(self) } fn sqrt(self: complex64) -> complex64 { let x = self.real; let y = self.img; let two = FP64x64Impl::new(TWO, false); let real = (((x.pow(two) + y.pow(two)).sqrt() + x) / two).sqrt(); let img = if y == FP64x64Impl::ZERO() { FP64x64Impl::ZERO() } else { (((x.pow(two) + y.pow(two)).sqrt() - x) / two).sqrt() }; let img = FP64x64Impl::new(img.mag, y.sign); complex64 { real, img } } fn ln(self: complex64) -> complex64 { let real = self.mag().ln(); let img = self.arg(); complex64 { real, img } } fn log2(self: complex64) -> complex64 { let ln_2 = FP64x64Impl::new(12786309186476892720, false); let ln = self.ln(); complex64 { real: (ln.real / ln_2), img: (ln.img / ln_2) } } fn log10(self: complex64) -> complex64 { let ln_10 = FP64x64Impl::new(42475197399893398429, false); let ln = self.ln(); complex64 { real: (ln.real / ln_10), img: (ln.img / ln_10) } } fn pow(self: complex64, b: complex64) -> complex64 { let two = FP64x64Impl::new(TWO, false); let x = self.real; let y = self.img; //z^2=(a^2-b^2)+2abi if (b.real == two && b.img == FP64x64Impl::new(0, false)) { let real = x.pow(two) - y.pow(two); let img = two * x * y; return complex64 { real, img }; } //(a+bi)^n=r^n(cos(nθ)+isin(nθ)) if (b.img == FP64x64Impl::new(0, false)) { let mag_pow_n = self.mag().pow(b.real); let arg_mul_n = b.real * self.arg(); let real = mag_pow_n * arg_mul_n.cos(); let img = mag_pow_n * arg_mul_n.sin(); return complex64 { real, img }; } //let z = (a+bi) (a+bi)^(c+di)= e^(c * ln(mag(z)) - d arg(z)) * cos (c * arg(z) + d ln(mag(r))) + i * e^(c * ln(mag(z)) - d arg(z)) * cos (c * arg(z) + d ln(mag(r))) //let A = e^(c * ln(mag(z)) - d arg(z)) and B = c * arg(z) + d ln(mag(r)) //(a+bi)^(c+di)= A * cos (B) + i * A * sin B let A = FP64x64Impl::new(E, false).pow(b.real * self.mag().ln() - b.img * self.arg()); let B = b.real * self.arg() + b.img * self.mag().ln(); let real = A * B.cos(); let img = A * B.sin(); complex64 { real, img } } //cos(z) = cos(a+bi) = cos(a)cosh(b)-isin(a)sinh(b) fn cos(self: complex64) -> complex64 { let a = self.real; let b = self.img; complex64 { real: FP64x64Impl::cos(a) * FP64x64Impl::cosh(b), img: -FP64x64Impl::sin(a) * FP64x64Impl::sinh(b) } } //sin(z) = sin(a+bi) = sin(a)cosh(b)+icos(a)sinh(b) fn sin(self: complex64) -> complex64 { let a = self.real; let b = self.img; complex64 { real: FP64x64Impl::sin(a) * FP64x64Impl::cosh(b), img: FP64x64Impl::cos(a) * FP64x64Impl::sinh(b) } } //tan(z) = tan(a+bi) = sin(2a) / (cosh(2b) + cos(2a)) + i sinh(2b) / (cosh(2b) + cos(2a)) fn tan(self: complex64) -> complex64 { let two = FP64x64Impl::new(TWO, false); let a = self.real; let b = self.img; let den = FP64x64Impl::cosh(two * b) + FP64x64Impl::cos(two * a); complex64 { real: FP64x64Impl::sin(two * a) / den, img: FP64x64Impl::sinh(two * b) / den } } //acos(z) = pi/2 + i ln (iz sqrt(1 - z**2)) fn acos(self: complex64) -> complex64 { let pi = Complex64Impl::new(FP64x64Impl::new(PI, false), FP64x64Impl::ZERO()); let two = Complex64Impl::new(FP64x64Impl::new(TWO, false), FP64x64Impl::ZERO()); let i = Complex64Impl::new(FP64x64Impl::ZERO(), FP64x64Impl::ONE()); let one = Complex64Impl::new(FP64x64Impl::ONE(), FP64x64Impl::ZERO()); let acos = pi / two + i * Complex64Impl::ln(i * self + Complex64Impl::sqrt(one - (self.pow(two)))); acos } //asin(z) = - i ln (iz sqrt(1 - z**2)) fn asin(self: complex64) -> complex64 { let two = Complex64Impl::new(FP64x64Impl::new(TWO, false), FP64x64Impl::ZERO()); let i = Complex64Impl::new(FP64x64Impl::ZERO(), FP64x64Impl::ONE()); let one = Complex64Impl::new(FP64x64Impl::ONE(), FP64x64Impl::ZERO()); let asin = -i * Complex64Impl::ln(i * self + Complex64Impl::sqrt(one - (self.pow(two)))); asin } //atan(z) = 1/2 * i[ln (1 - iz) - ln(1 + iz)] fn atan(self: complex64) -> complex64 { let two = Complex64Impl::new(FP64x64Impl::new(TWO, false), FP64x64Impl::ZERO()); let i = Complex64Impl::new(FP64x64Impl::ZERO(), FP64x64Impl::ONE()); let one = Complex64Impl::new(FP64x64Impl::ONE(), FP64x64Impl::ZERO()); let atan = one / two * i * (Complex64Impl::ln(one - i * self) - Complex64Impl::ln(one + i * self)); atan } //acosh(z) = ln (z + sqrt(z + 1) * sqrt(z - 1)) fn acosh(self: complex64) -> complex64 { let one = Complex64Impl::new(FP64x64Impl::ONE(), FP64x64Impl::ZERO()); let acosh = Complex64Impl::ln( self + Complex64Impl::sqrt(self + one) * Complex64Impl::sqrt(self - one) ); acosh } //asinh(z) = ln (z + sqrt(z**2 + 1)) fn asinh(self: complex64) -> complex64 { let one = Complex64Impl::new(FP64x64Impl::ONE(), FP64x64Impl::ZERO()); let two = Complex64Impl::new(FP64x64Impl::new(TWO, false), FP64x64Impl::ZERO()); let asinh = Complex64Impl::ln(self + Complex64Impl::sqrt(one + (self.pow(two)))); asinh } //atanh(z) = 1/2 * [ln (1 + z) - ln(1 - z)] fn atanh(self: complex64) -> complex64 { let two = Complex64Impl::new(FP64x64Impl::new(TWO, false), FP64x64Impl::ZERO()); let one = Complex64Impl::new(FP64x64Impl::ONE(), FP64x64Impl::ZERO()); let atanh = (Complex64Impl::ln(one + self) - Complex64Impl::ln(one - self)) / two; atanh } //acos(z) = acos(a+bi) = cosh a * cos b + i sinh a * sin b fn cosh(self: complex64) -> complex64 { let a = self.real; let b = self.img; complex64 { real: FP64x64Impl::cosh(a) * FP64x64Impl::cos(b), img: FP64x64Impl::sinh(a) * FP64x64Impl::sin(b) } } //sinh(z) = sin(a+bi) = sinh(a)cos(b)+icosh(a)sin(b) fn sinh(self: complex64) -> complex64 { let a = self.real; let b = self.img; complex64 { real: FP64x64Impl::sinh(a) * FP64x64Impl::cos(b), img: FP64x64Impl::cosh(a) * FP64x64Impl::sin(b) } } //tanh(z) = tan(a+bi) = sin(2a) / (cosh(2a) + cos(2b)) + i sinh(2b) / (cosh(2a) + cos(2b)) fn tanh(self: complex64) -> complex64 { let two = FP64x64Impl::new(TWO, false); let a = self.real; let b = self.img; let den = FP64x64Impl::cosh(two * a) + FP64x64Impl::cos(two * b); complex64 { real: FP64x64Impl::sinh(two * a) / den, img: FP64x64Impl::sin(two * b) / den } } fn to_polar(self: complex64) -> (FP64x64, FP64x64) { let mag = self.mag(); let arg = self.arg(); (mag, arg) } fn from_polar(mag: FP64x64, arg: FP64x64) -> complex64 { let real = mag * arg.cos(); let img = mag * arg.sin(); complex64 { real, img } } fn reciprocal(self: complex64) -> complex64 { let two = FP64x64Impl::new(TWO, false); let x = self.real; let y = self.img; let real = x / (x.pow(two) + y.pow(two)); let img = -y / (x.pow(two) + y.pow(two)); complex64 { real, img } } } fn atan2(x: FP64x64, y: FP64x64) -> FP64x64 { let two = FP64x64Impl::new(TWO, false); if (y != FP64x64Impl::ZERO() || x > FP64x64Impl::ZERO()) { return two * (y / (x + (x.pow(two) + y.pow(two)).sqrt())).atan(); } else if x < FP64x64Impl::ZERO() { return FP64x64Impl::new(PI, false); } else { panic(array!['undifined']) } } impl Complex64Print of PrintTrait<complex64> { fn print(self: complex64) { self.real.print(); '+'.print(); self.img.print(); 'i'.print(); } } // Implements the Add trait for complex64. impl Complex64Add of Add<complex64> { fn add(lhs: complex64, rhs: complex64) -> complex64 { complex64_add(lhs, rhs) } } // Implements the AddEq trait for complex64. impl Complex64AddEq of AddEq<complex64> { #[inline(always)] fn add_eq(ref self: complex64, other: complex64) { self = Add::add(self, other); } } // Implements the Sub trait for complex64. impl Complex64Sub of Sub<complex64> { fn sub(lhs: complex64, rhs: complex64) -> complex64 { complex64_sub(lhs, rhs) } } // Implements the SubEq trait for complex64. impl Complex64SubEq of SubEq<complex64> { #[inline(always)] fn sub_eq(ref self: complex64, other: complex64) { self = Sub::sub(self, other); } } // Implements the Mul trait for complex64. impl Complex64Mul of Mul<complex64> { fn mul(lhs: complex64, rhs: complex64) -> complex64 { complex64_mul(lhs, rhs) } } // Implements the MulEq trait for complex64. impl Complex64MulEq of MulEq<complex64> { #[inline(always)] fn mul_eq(ref self: complex64, other: complex64) { self = Mul::mul(self, other); } } // Implements the Div trait for complex64. impl Complex64Div of Div<complex64> { fn div(lhs: complex64, rhs: complex64) -> complex64 { complex64_div(lhs, rhs) } } // Implements the DivEq trait for complex64. impl Complex64DivEq of DivEq<complex64> { #[inline(always)] fn div_eq(ref self: complex64, other: complex64) { self = Div::div(self, other); } } // Implements the PartialEq trait for complex64. impl Complex64PartialEq of PartialEq<complex64> { fn eq(lhs: @complex64, rhs: @complex64) -> bool { complex64_eq(*lhs, *rhs) } fn ne(lhs: @complex64, rhs: @complex64) -> bool { complex64_ne(*lhs, *rhs) } } // Implements the Neg trait for complex64. impl Complex64Neg of Neg<complex64> { fn neg(a: complex64) -> complex64 { complex64_neg(a) } } /// Cf: ComplexTrait::new docstring // Adds two complex64 complex numbers. // // The sum of two complex numbers (x + yi) + (u + vi) = (x + u) + (y + v)i. // The result is a new complex number where the real part equals (x + u) and the imaginary part equals (y + v). // # Arguments // * `a` - The first complex64 to add. // * `b` - The second complex64 to add. // # Returns // * `complex64` - The sum of `a` and `b`. fn complex64_add(a: complex64, b: complex64) -> complex64 { let real = a.real + b.real; let img = a.img + b.img; ComplexTrait::new(real, img) } // Subtracts complex64 complex numbers. // // The sum of two complex numbers (x + yi) - (u + vi) = (x - u) + (y - v)i. // The result is a new complex number where the real part equals (x - u) and the imaginary part equals (y - v). // # Arguments // * `a` - The first complex64 to subtract. // * `b` - The second complex64 to subtract. // # Returns // * `complex64` - The difference of `a` and `b`. fn complex64_sub(a: complex64, b: complex64) -> complex64 { let real = a.real - b.real; let img = a.img - b.img; ComplexTrait::new(real, img) } // Multiplies two complex64 integers. // // The sum of two complex numbers (x + yi) * (u + vi) = (xu - yv) + (xv - yu)i. // The result is a new complex number where the real part equals (xu - yv) and the imaginary part equals (xv - yu). // # Arguments // // * `a` - The first complex64 to multiply. // * `b` - The second complex64 to multiply. // // # Returns // // * `complex64` - The product of `a` and `b`. fn complex64_mul(a: complex64, b: complex64) -> complex64 { let real = a.real * b.real - a.img * b.img; let img = a.real * b.img + a.img * b.real; ComplexTrait::new(real, img) } // Divides the first complex64 by the second complex64. // # Arguments // * `a` - The complex64 dividend. // * `b` - The complex64 divisor. // # Returns // * `complex64` - The quotient of `a` and `b`. fn complex64_div(a: complex64, b: complex64) -> complex64 { complex64_mul(a, b.reciprocal()) } // Compares two complex64 complex numbers for equality. // # Arguments // * `a` - The first complex64 complex number to compare. // * `b` - The second complex64 complex number to compare. // # Returns // * `bool` - `true` if the two complex numbers are equal, `false` otherwise. fn complex64_eq(a: complex64, b: complex64) -> bool { // Check if the two complex numbers have the same real part and the same imaginary part. if a.real == b.real && a.img == b.img { return true; } false } // Compares two complex64 complex numbers for inequality. // # Arguments // * `a` - The first complex64 complex number to compare. // * `b` - The second complex64 complex number to compare. // # Returns // * `bool` - `true` if the two complex numbers are not equal, `false` otherwise. fn complex64_ne(a: complex64, b: complex64) -> bool { // The result is the inverse of the equal function. !complex64_eq(a, b) } // Negates the given complex64 complex number. // # Arguments // * `x` - The complex64 complex number to negate. // # Returns // * `complex64` - The negation of `x`. fn complex64_neg(x: complex64) -> complex64 { // The negation of an complex number is obtained by negating its real part and its imaginary part. ComplexTrait::new(-x.real, -x.img) }
https://github.com/gizatechxyz/orion
src/numbers/complex_number/complex_trait.cairo
/// Trait /// /// new - Constructs a new `complex_number`. /// from_felt - Creates a new `complex_number` instance from two felt252 values. /// real - Returns the real part of the `complex_number`. /// img - Returns the imaginary part of the `complex_number`. /// conjugate - Returns the conjugate of the `complex_number`. /// zero - Returns the additive identity element zero. /// one - Returns the multiplicative identity element one. /// mag - Returns the magnitude of the `complex_number`. /// arg - Returns the argument of the `complex_number`. /// exp - Returns the value of e raised to the power of the `complex_number`. /// exp2 - Returns the value of 2 raised to the power of the `complex_number`. /// ln - Returns the natural logarithm of the `complex_number`. /// log2 - Returns the base-2 logarithm of the `complex_number`. /// log10 - Returns the base-10 logarithm of the `complex_number`. /// pow - Returns the result of raising the `complex_number` to the power of another `complex_number`. /// sqrt - Returns the value of the squre root of the `complex_number`. /// acos - Returns the arccosine (inverse of cosine) of the `complex_number`. /// asin - Returns the arcsine (inverse of sine) of the `complex_number`. /// atan - Returns the arctangent (inverse of tangent) of the input `complex_number`. /// cos - Returns the cosine of the `complex_number`. /// sin - Returns the sine of the `complex_number`. /// tan - Returns the tangent of the `complex_number`. /// acosh - Returns the value of the inverse hyperbolic cosine of the `complex_number`. /// asinh - Returns the value of the inverse hyperbolic sine of the `complex_number`. /// atanh - Returns the value of the inverse hyperbolic tangent of the `complex_number`. /// cosh - Returns the value of the hyperbolic cosine of the `complex_number`. /// sinh - Returns the value of the hyperbolic sine of the `complex_number`. /// tanh - Returns the value of the hyperbolic tangent of the `complex_number`. /// to_polar - Returns the polar coordinates of the `complex_number`. /// from_polar - Returns a `complex_number` from the polar coordinates of the `complex_number`. /// reciprocal - Returns a the reciprocal of the `complex_number`. /// trait ComplexTrait<T, F> { /// # ComplexTrait::new /// /// ```rust /// fn new(real: F, img: F) -> T; /// ``` /// /// ## Args /// /// * `real`(`F`) - The real part of the complex number. /// * `img`(`F`) - The imaginary part of the complex number. /// /// ## Returns /// /// A new complex number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// /// fn new_complex64_example() -> complex64 { /// ComplexTrait::new(FixedTrait::new(184467440737095516160, false), FixedTrait::new(18446744073709551616, false)) /// } /// >>> {real: {mag: 184467440737095516160, sign: false}, im: {mag: 18446744073709551616, sign: false}} // 10 + i /// ``` /// fn new(real: F, img: F) -> T; /// # ComplexTrait::real /// /// ```rust /// fn real(self: T) -> F; /// ``` /// /// Returns the real part of a complex number. The complex number is represented in Cartesian form `z = a + bi` where `a` is the real part. /// /// ## Args /// /// * `self`(`T`) - The complex number from which we want the real part. /// /// ## Returns /// /// A fixed point number `<F>`, representing the real part of `self` . /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn real_complex64_example() -> FP64x64 { /// let z: complex64 = ComplexTrait::new(FixedTrait::new(184467440737095516160, false), FixedTrait::new(18446744073709551616, false)); /// z.real() /// } /// >>> {mag: 184467440737095516160, sign: false} // 10 /// ``` /// fn real(self: T) -> F; /// # ComplexTrait::img /// /// ```rust /// fn img(self: T) -> F; /// ``` /// /// Returns the imaginary part of a complex number. The complex number is represented in Cartesian form `z = a + bi` where `b` is the imaginary part. /// /// ## Args /// /// * `self`(`T`) - The complex number from which we want the imaginary part. /// /// ## Returns /// /// A fixed point number `<F>`, representing the imaginary part of `self` . /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn img_complex64_example() -> FP64x64 { /// let z: complex64 = ComplexTrait::new(FixedTrait::new(184467440737095516160, false), FixedTrait::new(18446744073709551616, false)); /// z.img() /// } /// >>> {mag: 18446744073709551616, sign: false} // 1 /// ``` /// fn img(self: T) -> F; /// # ComplexTrait::conjugate /// /// ```rust /// fn conjugate(self: T) -> T; /// ``` /// /// Returns the conjugate of a complex number. The complex number is represented in Cartesian form `z = a + bi`. /// The conjugate of `z = a + bi` is `z̅ = a - bi` /// /// ## Args /// /// * `self`(`T`) - The complex number from which we want the conjugate. /// /// ## Returns /// /// A complex number `<T>`, representing the imaginary part of `self` . /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn conjugate_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new(FixedTrait::new(184467440737095516160, false), FixedTrait::new(18446744073709551616, false)); /// z.conjugate() /// } /// >>> {real: {mag: 184467440737095516160, sign: false}, im: {mag: 18446744073709551616, sign: true}} // 10 - i /// ``` /// fn conjugate(self: T) -> T; /// # ComplexTrait::zero /// /// ```rust /// fn zero(self: T) -> T; /// ``` /// /// Returns the additive identity element zero /// /// ## Returns /// /// A complex number `<T>`, representing the additive identity element of the complex field `0`. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn zero_complex64_example() -> complex64 { /// ComplexTrait::zero() /// } /// >>> {real: {mag: 0, sign: false}, im: {mag: 0, sign: false}} // 0 + 0i /// ``` /// fn zero() -> T; /// # ComplexTrait::one /// /// ```rust /// fn one(self: T) -> T; /// ``` /// /// Returns the multiplicative identity element one /// /// ## Returns /// /// A complex number `<T>`, representing the multiplicative identity element of the complex field : `1 + 0i`. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn one_complex64_example() -> complex64 { /// ComplexTrait::one() /// } /// >>> {real: {mag: 18446744073709551616, sign: false}, im: {mag: 0, sign: false}} // 1 + 0i /// ``` /// fn one() -> T; /// # ComplexTrait::mag /// /// ```rust /// fn mag(self: T) -> F; /// ``` /// /// Returns the magnitude of the complex number /// /// ## Args /// /// * `self`(`T`) - The input complex number /// /// ## Returns /// /// A fixed point number '<F>', representing the magnitude of the complex number. /// 'mag(z) = sqrt(a^2 + b^2)'. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn mag_complex64_example() -> FP64x64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(73786976294838206464, false), /// FixedTrait::new(774763251095801167872, false) /// ); // 4 + 42i /// z.mag() /// } /// >>> {mag: 0x2a30a6de7900000000, sign: false} // mag = 42.190046219457976 /// ``` /// fn mag(self: T) -> F; /// # ComplexTrait::arg /// /// ```rust /// fn arg(self: T) -> F; /// ``` /// /// Returns the argument of the complex number /// /// ## Args /// /// * `self`(`T`) - The input complex number /// /// ## Returns /// /// A fixed point number '<F>', representing the argument of the complex number in radian. /// 'arg(z) = atan2(b, a)'. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn arg_complex64_example() -> FP64x64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(73786976294838206464, false), /// FixedTrait::new(774763251095801167872, false) /// ); // 4 + 42i /// z.arg() /// } /// >>> {mag: 27224496882576083824, sign: false} // arg = 1.4758446204521403 (rad) /// ``` /// fn arg(self: T) -> F; /// # ComplexTrait::exp /// /// ```rust /// fn exp(self: T) -> T; /// ``` /// /// Returns the value of e raised to the power of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number /// /// ## Returns /// /// The natural exponent of the input complex number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn exp_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(73786976294838206464, false), /// FixedTrait::new(774763251095801167872, false) /// ); // 4 + 42i /// ComplexTrait::exp(z) /// } /// >>> {real: {mag: 402848450095324460000, sign: true}, im: {mag: 923082101320478400000, sign: true}} // -21.838458238788455-50.04038098170736 i /// ``` /// fn exp(self: T) -> T; /// # ComplexTrait::exp2 /// /// ```rust /// fn exp2(self: T) -> T; /// ``` /// /// Returns the value of 2 raised to the power of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number /// /// ## Returns /// /// The binary exponent of the input complex number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn exp2_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(73786976294838206464, false), /// FixedTrait::new(774763251095801167872, false) /// ); // 4 + 42i /// ComplexTrait::exp2(z) /// } /// >>> {real: {mag: 197471674372309809080, sign: true}, im: {mag: 219354605088992285353, sign: true}} // -10.70502356986 -11.89127707 i /// ``` /// fn exp2(self: T) -> T; /// # ComplexTrait::ln /// /// ```rust /// fn ln(self: T) -> T; /// ``` /// /// Returns the natural logarithm of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// A complex number representing the natural logarithm of the input number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn ln_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(73786976294838206464, false), /// FixedTrait::new(774763251095801167872, false) /// ); // 4 + 42i /// z.ln() /// } /// >>> {real: {mag: 69031116512113681970, sign: false}, im: {mag: 27224496882576083824, sign: false}} // 3.7421843216430655 + 1.4758446204521403 i /// ``` /// fn ln(self: T) -> T; /// # ComplexTrait::log2 /// /// ```rust /// fn log2(self: T) -> T; /// ``` /// /// Returns the base-2 logarithm of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Panics /// /// * Panics if the input is negative. /// /// ## Returns /// /// A complex number representing the binary logarithm of the input number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn log2_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.log2() /// } /// >>> {real: {mag: 34130530934667840346, sign: false}, im: {mag: 26154904847122126193, sign: false}} // 1.85021986 + 1.41787163 i /// ``` /// fn log2(self: T) -> T; /// # ComplexTrait::log10 /// /// ```rust /// fn log10(self: T) -> T; /// ``` /// /// Returns the base-10 logarithm of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// A complex number representing the base 10 logarithm of the input number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn log10_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.log10() /// } /// >>> {real: {mag: 10274314139629458970, sign: false}, im: {mag: 7873411322133748801, sign: false}} // 0.5569716761 + 0.4268218908 i /// ``` /// fn log10(self: T) -> T; /// # ComplexTrait::pow /// /// ```rust /// fn pow(self: T, b: T) -> T; /// ``` /// /// Returns the result of raising the complex number to the power of another complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// * `b`(`T`) - The exponent complex number. /// /// ## Returns /// /// A complex number representing the result of z^w. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::complex_trait::ComplexTrait; /// use orion::numbers::complex_number::complex64::{TWO, complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn pow_2_complex64_example() -> complex64 { /// let two = ComplexTrait::new(FP64x64Impl::new(TWO, false),FP64x64Impl::new(0, false)); /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(73786976294838206464, false), /// FixedTrait::new(774763251095801167872, false) /// ); // 4 + 42i /// z.pow(two) /// } /// >>> {real: {mag: 32244908640844296224768, sign: true}, im: {mag: 6198106008766409342976, sign: false}} // -1748 + 336 i /// /// fn pow_w_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(73786976294838206464, false), /// FixedTrait::new(774763251095801167872, false) /// ); // 4 + 42i /// /// let w: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(18446744073709551616, false) /// ); // 2 + i /// z.pow(w) /// } /// >>> {real: {mag: 6881545343236111419203, sign: false}, im: {mag: 2996539405459717736042, sign: false}} // -373.0485407816205 + 162.4438823807959 i /// ``` /// fn pow(self: T, b: T) -> T; /// # ComplexTrait::sqrt /// /// ```rust /// fn arg(self: T) -> F; /// ``` /// /// Returns the value of the squre root of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number /// /// ## Returns /// /// A complex number '<T>', representing the square root of the complex number. /// 'arg(z) = atan2(b, a)'. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn sqrt_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(73786976294838206464, false), /// FixedTrait::new(774763251095801167872, false) /// ); // 4 + 42i /// z.sqrt() /// } /// >>> {real: {mag: 88650037379463118848, sign: false}, im: {mag: 80608310115317055488, sign: false}} // 4.80572815603723 + 4.369785247552674 i /// ``` /// fn sqrt(self: T) -> T; /// # ComplexTrait::acos /// /// ```rust /// fn acos(self: T) -> T; /// ``` /// /// Returns the arccosine (inverse of cosine) of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// A complex number representing the acos of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn acos_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.acos() /// } /// >>> {real: {mag: 18449430688981877061, sign: false}, im: {mag: 36587032881711954470, sign: true}} // 1.000143542473797 - 1.98338702991653i /// ``` /// fn acos(self: T) -> T; /// # ComplexTrait::asin /// /// ```rust /// fn asin(self: T) -> T; /// ``` /// /// Returns the arcsine (inverse of sine) of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// A complex number representing the asin of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn asin_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.asin() /// } /// >>> {real: {mag: 10526647143326614308, sign: false}, im: {mag: 36587032881711954470, sign: false}} // 0.57065278432 + 1.9833870299i /// ``` /// fn asin(self: T) -> T; /// # ComplexTrait::atan /// /// ```rust /// fn atan(self: T) -> T; /// ``` /// /// Returns the arctangent (inverse of tangent) of the input complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// A complex number representing the arctangent (inverse of tangent) of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn atan_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.atan() /// } /// >>> {real: {mag: 26008453796191787243, sign: false}, im: {mag: 4225645162986888119, sign: false}} // 1.40992104959 + 0.2290726829i /// ``` /// fn atan(self: T) -> T; /// # ComplexTrait::cos /// /// ```rust /// fn cos(self: T) -> T; /// ``` /// /// Returns the cosine of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// A complex number representing the cosine of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn cos_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.cos() /// } /// >>> {real: {mag: 77284883172661882094, sign: true}, im: {mag: 168035443352962049425, sign: true}} // -4.18962569 + -9.10922789375i /// ``` /// fn cos(self: T) -> T; /// # ComplexTrait::sin /// /// ```rust /// fn sin(self: T) -> T; /// ``` /// /// Returns the sine of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// A complex number representing the sin of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn sin_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.sin() /// } /// >>> {real: {mag: 168870549816927860082, sign: false}, im: {mag: 76902690389051588309, sign: true}} // 9.15449914 - 4.168906959 i /// ``` /// fn sin(self: T) -> T; /// # ComplexTrait::tan /// /// ```rust /// fn tan(self: T) -> T; /// ``` /// /// Returns the tangent of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// A complex number representing the tan of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn tan_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.tan() /// } /// >>> {real: {mag: 69433898428143694, sign: true}, im: {mag: 18506486100303669886, sign: false}} // -0.00376402 + 1.00323862i /// ``` /// fn tan(self: T) -> T; /// # ComplexTrait::acosh /// /// ```rust /// fn acosh(self: T) -> T; /// ``` /// /// Returns the value of the inverse hyperbolic cosine of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// The inverse hyperbolic cosine of the input complex number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn acosh_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.acosh() /// } /// >>> {real: {mag: 36587032878947915965, sign: false}, im: {mag: 18449360714192945790, sign: false}} // 1.9833870 + 1.0001435424i /// ``` /// fn acosh(self: T) -> T; /// # ComplexTrait::asinh /// /// ```rust /// fn asinh(self: T) -> T; /// ``` /// /// Returns the value of the inverse hyperbolic sine of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// The inverse hyperbolic sine of the input complex number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn asinh_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.asinh() /// } /// >>> {real: {mag: 36314960239770126586, sign: false}, im: {mag: 17794714057579789616, sign: false}} //1.9686379 + 0.964658504i /// ``` /// fn asinh(self: T) -> T; /// # ComplexTrait::atanh /// /// ```rust /// fn atanh(self: T) -> T; /// ``` /// /// Returns the value of the inverse hyperbolic tangent of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// The inverse hyperbolic tangent of the input complex number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn atanh_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.atanh() /// } /// >>> {real: {mag: 2710687792925618924, sign: false}, im: {mag: 24699666646262346226, sign: false}} // 0.146946666 + 1.33897252i /// ``` /// fn atanh(self: T) -> T; /// # ComplexTrait::cosh /// /// ```rust /// fn cosh(self: T) -> T; /// ``` /// /// Returns the value of the hyperbolic cosine of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// The hyperbolic cosine of the input complex number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn cosh_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.cosh() /// } /// >>> {real: {mag: 68705646899632870392, sign: true}, im: {mag: 9441447324287988702, sign: false}} // -3.72454550491 + 0.511822569987i /// ``` /// fn cosh(self: T) -> T; /// # ComplexTrait::sinh /// /// ```rust /// fn sinh(self: T) -> T; /// ``` /// /// Returns the value of the hyperbolic sine of the complex number. /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// The hyperbolic sine of the input complex number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn sinh_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.sinh() /// } /// >>> {real: {mag: 66234138518106676624, sign: true}, im: {mag: 9793752294470951790, sign: false}} // -3.59056458998 + 0.530921086i /// ``` /// fn sinh(self: T) -> T; /// # ComplexTrait::tanh /// /// ```rust /// fn tanh(self: T) -> T; /// ``` /// /// Returns the value of the hyperbolic tangent of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// The hyperbolic tangent of the input complex number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn tanh_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(36893488147419103232, false), /// FixedTrait::new(55340232221128654848, false) /// ); // 2 + 3i /// z.tanh() /// } /// >>> {real: {mag: 17808227710002974080, sign: false}, im: {mag: 182334107030204896, sign: true}} // 0.96538587902 + 0.009884375i /// ``` /// fn tanh(self: T) -> T; /// # ComplexTrait::to_polar /// /// ```rust /// fn to_polar(self: T) -> (F, F); /// ``` /// /// Returns the polar coordinates (magnitude and argument) of the complex number. /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// A tuple of two fixed point numbers representing the polar coordinates of the input number. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn to_polar_complex64_example() -> (FP64x64, FP64x64) { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(73786976294838206464, false), /// FixedTrait::new(774763251095801167872, false) /// ); // 4 + 42i /// z.to_polar() /// } /// >>> ({mag: 778268985067028086784, sign: false}, {mag: 27224496882576083824, sign: false}) // mag : 42.190046219457976 + arg : 1.4758446204521403 /// ``` /// fn to_polar(self: T) -> (F, F); /// # ComplexTrait::from_polar /// /// /// ```rust /// fn from_polar(mag: F, arg: F) -> T; /// ``` /// /// Returns a complex number (in the Cartesian form) from the polar coordinates of the complex number. /// /// ## Args /// /// * `mag`(`F`) - The input fixed point number representing the magnitude. /// * `arg`(`F`) - The input fixed point number representing the argument. /// /// ## Returns /// /// The complex number representing the Cartesian form calculated from the input polar coordinates. /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn from_polar_complex64_example() -> complex64 { /// let mag: FP64x64 = FixedTrait::new(778268985067028086784, false); // 42.190046219457976 /// let arg: FP64x64 = FixedTrait::new(27224496882576083824, false); //1.4758446204521403 /// ComplexTrait::from_polar(mag,arg) /// } /// >>> {real: {mag: 73787936714814843012, sign: false}, im: {mag: 774759489569697723777, sign: false}} // 4 + 42 i /// ``` /// fn from_polar(mag: F, arg: F) -> T; /// # ComplexTrait::reciprocal /// /// /// ```rust /// fn reciprocal(self: T) -> T; /// ``` /// /// Returns a the reciprocal of the complex number (i.e. 1/z). /// /// ## Args /// /// * `self`(`T`) - The input complex number. /// /// ## Returns /// /// The reciprocal of the complex number \(a + bi\) is given by: /// \[ /// \frac{1}{a + bi} = \frac{a}{a^2 + b^2} - \frac{b}{a^2 + b^2}i /// \] /// /// ## Examples /// /// ```rust /// use orion::numbers::complex_number::{complex_trait::ComplexTrait, complex64::complex64}; /// use orion::numbers::{FP64x64, FP64x64Impl, FixedTrait}; /// /// fn reciprocal_complex64_example() -> complex64 { /// let z: complex64 = ComplexTrait::new( /// FixedTrait::new(73786976294838206464, false), /// FixedTrait::new(774763251095801167872, false) /// ); // 4 + 42i /// z.reciprocal() /// } /// >>> {real: {mag: 41453357469010228, sign: false}, im: {mag: 435260253424607397, sign: true}} // 0.002247191011 - 0.0235955056 i /// ``` /// fn reciprocal(self: T) -> T; }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point.cairo
//! Fixed-Point implemented from https://github.com/influenceth/cubit and adjusted to Q8.23 mod core; mod implementations; mod utils;
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/core.cairo
/// Trait /// /// new - Constructs a new fixed point instance. /// new_unscaled - Creates a new fixed point instance with the specified unscaled magnitude and sign. /// from_felt - Creates a new fixed point instance from a felt252 value. /// abs - Returns the absolute value of the fixed point number. /// ceil - Returns the smallest integer greater than or equal to the fixed point number. /// exp - Returns the value of e raised to the power of the fixed point number. /// exp2 - Returns the value of 2 raised to the power of the fixed point number. /// floor - Returns the largest integer less than or equal to the fixed point number. /// ln - Returns the natural logarithm of the fixed point number. /// log2 - Returns the base-2 logarithm of the fixed point number. /// log10 - Returns the base-10 logarithm of the fixed point number. /// pow - Returns the result of raising the fixed point number to the power of another fixed point number. /// round - Rounds the fixed point number to the nearest whole number. /// sqrt - Returns the square root of the fixed point number. /// acos - Returns the arccosine (inverse of cosine) of the fixed point number. /// acos_fast - Returns the arccosine (inverse of cosine) of the fixed point number faster with LUT. /// asin - Returns the arcsine (inverse of sine) of the fixed point number. /// asin_fast - Returns the arcsine (inverse of sine) of the fixed point number faster with LUT. /// atan - Returns the arctangent (inverse of tangent) of the input fixed point number. /// atan_fast - Returns the arctangent (inverse of tangent) of the input fixed point number faster with LUT. /// cos - Returns the cosine of the fixed point number. /// cos_fast - Returns the cosine of the fixed point number fast with LUT. /// sin - Returns the sine of the fixed point number. /// sin_fast - Returns the sine of the fixed point number faster with LUT. /// tan - Returns the tangent of the fixed point number. /// tan_fast - Returns the tangent of the fixed point number faster with LUT. /// acosh - Returns the value of the inverse hyperbolic cosine of the fixed point number. /// asinh - Returns the value of the inverse hyperbolic sine of the fixed point number. /// atanh - Returns the value of the inverse hyperbolic tangent of the fixed point number. /// cosh - Returns the value of the hyperbolic cosine of the fixed point number. /// sinh - Returns the value of the hyperbolic sine of the fixed point number. /// tanh - Returns the value of the hyperbolic tangent of the fixed point number. /// sign - Returns the element-wise indication of the sign of the input fixed point number. /// erf - Returns the error function of the input fixed point number computed element-wise. /// trait FixedTrait<T, MAG> { /// # FixedTrait::new /// /// ```rust /// fn new(mag: MAG, sign: bool) -> T; /// ``` /// /// Constructs a new fixed point instance. /// /// ## Args /// /// * `mag`(`MAG`) - The magnitude of the fixed point. /// * `sign`(`bool`) - The sign of the fixed point, where `true` represents a negative number. /// /// ## Returns /// /// A new fixed point instance. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn new_fp_example() -> FP16x16 { /// // We can call `new` function as follows. /// FixedTrait::new(65536, false) /// } /// >>> {mag: 65536, sign: false} // = 1 in FP16x16 /// ``` /// fn new(mag: MAG, sign: bool) -> T; /// # FixedTrait::new\_unscaled /// /// ```rust /// fn new_unscaled(mag: MAG, sign: bool) -> T; /// ``` /// /// Creates a new fixed point instance with the specified unscaled magnitude and sign. /// /// ## Args /// /// `mag`(`MAG`) - The unscaled magnitude of the fixed point. /// `sign`(`bool`) - The sign of the fixed point, where `true` represents a negative number. /// /// ## Returns /// /// A new fixed point instance. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn new_unscaled_example() -> FP16x16 { /// // We can call `new_unscaled` function as follows. /// FixedTrait::new_unscaled(1, false) /// } /// >>> {mag: 65536, sign: false} /// ``` /// fn new_unscaled(mag: MAG, sign: bool) -> T; /// # FixedTrait::from\_felt /// /// /// ```rust /// fn from_felt(val: felt252) -> T; /// ``` /// /// Creates a new fixed point instance from a felt252 value. /// /// ## Args /// /// * `val`(`felt252`) - `felt252` value to convert in fixed point. /// /// ## Returns /// /// A new fixed point instance. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn from_felt_example() -> FP16x16 { /// // We can call `from_felt` function as follows . /// FixedTrait::from_felt(190054) /// } /// >>> {mag: 190054, sign: false} // = 2.9 /// ``` /// fn from_felt(val: felt252) -> T; /// # fp.abs /// /// ```rust /// fn abs(self: T) -> T; /// ``` /// /// Returns the absolute value of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// The absolute value of the input fixed point number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// /// fn abs_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(1, true); /// /// // We can call `abs` function as follows. /// fp.abs() /// } /// >>> {mag: 65536, sign: false} // = 1 /// ``` /// fn abs(self: T) -> T; /// # fp.ceil /// /// ```rust /// fn ceil(self: T) -> T; /// ``` /// /// Returns the smallest integer greater than or equal to the fixed point number. /// /// ## Args /// /// *`self`(`T`) - The input fixed point /// /// ## Returns /// /// The smallest integer greater than or equal to the input fixed point number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn ceil_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::from_felt(190054); // 2.9 /// /// // We can call `ceil` function as follows. /// fp.ceil() /// } /// >>> {mag: 196608, sign: false} // = 3 /// ``` /// fn ceil(self: T) -> T; /// # fp.exp /// /// ```rust /// fn exp(self: T) -> T; /// ``` /// /// Returns the value of e raised to the power of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// The natural exponent of the input fixed point number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn exp_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `exp` function as follows. /// fp.exp() /// } /// >>> {mag: 484249, sign: false} // = 7.389056317241236 /// ``` /// fn exp(self: T) -> T; /// # fp.exp2 /// /// ```rust /// fn exp2(self: T) -> T; /// ``` /// /// Returns the value of 2 raised to the power of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// The binary exponent of the input fixed point number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn exp2_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `exp2` function as follows. /// fp.exp2() /// } /// >>> {mag: 262143, sign: false} // = 3.99999957248 /// ``` /// fn exp2(self: T) -> T; /// # fp.floor /// /// ```rust /// fn floor(self: T) -> T; /// ``` /// /// Returns the largest integer less than or equal to the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// Returns the largest integer less than or equal to the input fixed point number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn floor_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::from_felt(190054); // 2.9 /// /// // We can call `floor` function as follows. /// fp.floor() /// } /// >>> {mag: 131072, sign: false} // = 2 /// ``` /// fn floor(self: T) -> T; /// # fp.ln /// /// /// ```rust /// fn ln(self: T) -> T; /// ``` /// /// Returns the natural logarithm of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point representing the natural logarithm of the input number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn ln_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(1, false); /// /// // We can call `ln` function as follows. /// fp.ln() /// } /// >>> {mag: 0, sign: false} /// ``` /// fn ln(self: T) -> T; /// # fp.log2 /// /// ```rust /// fn log2(self: T) -> T; /// ``` /// /// Returns the base-2 logarithm of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Panics /// /// * Panics if the input is negative. /// /// ## Returns /// /// A fixed point representing the binary logarithm of the input number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn log2_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(3, false); /// /// // We can call `log2` function as follows. /// fp.log2() /// } /// >>> {mag: 103872, sign: false} // = 1.58496250072 /// ``` /// fn log2(self: T) -> T; /// # fp.log10 /// /// ```rust /// fn log10(self: T) -> T; /// ``` /// /// Returns the base-10 logarithm of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point representing the base 10 logarithm of the input number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn log10_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(3, false); /// /// // We can call `log10` function as follows. /// fp.log10() /// } /// >>> {mag: 31269, sign: false} // = 0.47712125472 /// ``` /// fn log10(self: T) -> T; /// # fp.pow /// /// ```rust /// fn pow(self: T, b: T) -> T; /// ``` /// /// Returns the result of raising the fixed point number to the power of another fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point. /// * `b`(`T`) - The exponent fixed point number. /// /// ## Returns /// /// A fixed point number representing the result of x^y. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn pow_fp_example() -> FP16x16 { /// // We instantiate FixedTrait points here. /// let a = FixedTrait::new_unscaled(3, false); /// let b = FixedTrait::new_unscaled(4, false); /// /// // We can call `pow` function as follows. /// a.pow(b) /// } /// >>> {mag: 5308416, sign: false} // = 81 /// ``` /// fn pow(self: T, b: T) -> T; /// # fp.round /// /// ```rust /// fn round(self: T) -> T; /// ``` /// /// Rounds the fixed point number to the nearest whole number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the rounded value. /// /// ## Examples /// /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn round_fp_example() -> FP16x16 { /// // We instantiate FixedTrait points here. /// let a = FixedTrait::from_felt(190054); // 2.9 /// /// // We can call `round` function as follows. /// a.round() /// } /// >>> {mag: 196608, sign: false} // = 3 /// ``` /// fn round(self: T) -> T; /// # fp.sqrt /// /// ```rust /// fn sqrt(self: T) -> T; /// ``` /// /// Returns the square root of the fixed point number. /// /// ## Args /// /// `self`(`T`) - The input fixed point /// /// ## Panics /// /// * Panics if the input is negative. /// /// ## Returns /// /// A fixed point number representing the square root of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn sqrt_fp_example() -> FP16x16 { /// // We instantiate FixedTrait points here. /// let a = FixedTrait::new_unscaled(9, false); /// /// // We can call `round` function as follows. /// a.sqrt() /// } /// >>> {mag: 196608, sign: false} // = 3 /// ``` /// fn sqrt(self: T) -> T; /// # fp.acos /// /// ```rust /// fn acos(self: T) -> T; /// ``` /// /// Returns the arccosine (inverse of cosine) of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the acos of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn acos_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(1, true); /// /// // We can call `acos` function as follows. /// fp.acos() /// } /// >>> {mag: 205887, sign: false} // = 3.14159265 /// ``` /// fn acos(self: T) -> T; /// # fp.acos_fast /// /// ```rust /// fn acos_fast(self: T) -> T; /// ``` /// /// Returns the arccosine (inverse of cosine) of the fixed point number faster with LUT. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the acos of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn acos_fast_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(1, true); /// /// // We can call `acos_fast` function as follows. /// fp.acos_fast() /// } /// >>> {mag: 205887, sign: false} // = 3.14159265 /// ``` /// fn acos_fast(self: T) -> T; /// # fp.asin /// /// ```rust /// fn asin(self: T) -> T; /// ``` /// /// Returns the arcsine (inverse of sine) of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the asin of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn asin_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(1, false); /// /// // We can call `asin` function as follows. /// fp.asin() /// } /// >>> {mag: 102943, sign: true} // = 1.57079633 /// ``` /// fn asin(self: T) -> T; /// # fp.asin_fast /// /// ```rust /// fn asin_fast(self: T) -> T; /// ``` /// /// Returns the arcsine (inverse of sine) of the fixed point number faster with LUT. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the asin of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn asin_fast_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(1, false); /// /// // We can call `asin_fast` function as follows. /// fp.asin_fast() /// } /// >>> {mag: 102943, sign: true} // = 1.57079633 /// ``` /// fn asin_fast(self: T) -> T; /// # fp.atan /// /// ```rust /// fn atan(self: T) -> T; /// ``` /// /// Returns the arctangent (inverse of tangent) of the input fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the arctangent (inverse of tangent) of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn atan_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `atan` function as follows. /// fp.atan() /// } /// >>> {mag: 72558, sign: false} // = 1.10714872 /// ``` /// fn atan(self: T) -> T; /// # fp.atan_fast /// /// ```rust /// fn atan_fast(self: T) -> T; /// ``` /// /// Returns the arctangent (inverse of tangent) of the input fixed point number faster with LUT. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the arctangent (inverse of tangent) of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn atan_fast_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `atan_fast` function as follows. /// fp.atan_fast() /// } /// >>> {mag: 72558, sign: false} // = 1.10714872 /// ``` /// fn atan_fast(self: T) -> T; /// # fp.cos /// /// ```rust /// fn cos(self: T) -> T; /// ``` /// /// Returns the cosine of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the cosine of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn cos_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `cos` function as follows. /// fp.cos() /// } /// >>> {mag: 27273, sign: true} // = -0.41614684 /// ``` /// fn cos(self: T) -> T; /// # fp.cos_fast /// /// ```rust /// fn cos_fast(self: T) -> T; /// ``` /// /// Returns the cosine of the fixed point number fast with LUT. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the cosine of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn cos_fast_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `cos_fast` function as follows. /// fp.cos_fast() /// } /// >>> {mag: 27273, sign: true} // = -0.41614684 /// ``` /// fn cos_fast(self: T) -> T; /// # fp.sin /// /// ```rust /// fn sin(self: T) -> T; /// ``` /// /// Returns the sine of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the sin of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn sin_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `sin` function as follows. /// fp.sin() /// } /// >>> {mag: 59592, sign: false} // = 0.90929743 /// ``` /// fn sin(self: T) -> T; /// # fp.sin_fast /// /// ```rust /// fn sin_fast(self: T) -> T; /// ``` /// /// Returns the sine of the fixed point number faster with LUT. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the sin of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn sin_fast_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `sin_fast` function as follows. /// fp.sin_fast() /// } /// >>> {mag: 59592, sign: false} // = 0.90929743 /// ``` /// fn sin_fast(self: T) -> T; /// # fp.tan /// /// ```rust /// fn tan(self: T) -> T; /// ``` /// /// Returns the tangent of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the tan of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn tan_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `tan` function as follows. /// fp.tan() /// } /// >>> {mag: 143199, sign: true} // = -2.18503986 /// ``` /// fn tan(self: T) -> T; /// # fp.tan_fast /// /// ```rust /// fn tan_fast(self: T) -> T; /// ``` /// /// Returns the tangent of the fixed point number faster with LUT. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// A fixed point number representing the tan of the input value. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn tan_fast_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `tan_fast` function as follows. /// fp.tan_fast() /// } /// >>> {mag: 143199, sign: true} // = -2.18503986 /// ``` /// fn tan_fast(self: T) -> T; /// # fp.acosh /// /// ```rust /// fn acosh(self: T) -> T; /// ``` /// /// Returns the value of the inverse hyperbolic cosine of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// The inverse hyperbolic cosine of the input fixed point number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn acosh_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `acosh` function as follows. /// fp.acosh() /// } /// >>> {mag: 86308, sign: false} // = 1.3169579 /// ``` /// fn acosh(self: T) -> T; /// # fp.asinh /// /// ```rust /// fn asinh(self: T) -> T; /// ``` /// /// Returns the value of the inverse hyperbolic sine of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// The inverse hyperbolic sine of the input fixed point number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn asinh_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `asinh` function as follows. /// fp.asinh() /// } /// >>> {mag: 94610, sign: false} // = 1.44363548 /// ``` /// fn asinh(self: T) -> T; /// # fp.atanh /// /// ```rust /// fn atanh(self: T) -> T; /// ``` /// /// Returns the value of the inverse hyperbolic tangent of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// The inverse hyperbolic tangent of the input fixed point number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn atanh_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::from_felt(32768); // 0.5 /// /// // We can call `atanh` function as follows. /// fp.atanh() /// } /// >>> {mag: 35999, sign: false} // = 0.54930614 /// ``` /// fn atanh(self: T) -> T; /// # fp.cosh /// /// ```rust /// fn cosh(self: T) -> T; /// ``` /// /// Returns the value of the hyperbolic cosine of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// The hyperbolic cosine of the input fixed point number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn cosh_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `cosh` function as follows. /// fp.cosh() /// } /// >>> {mag: 246559, sign: false} // = 3.76219569 /// ``` /// fn cosh(self: T) -> T; /// # fp.sinh /// /// ```rust /// fn sinh(self: T) -> T; /// ``` /// /// Returns the value of the hyperbolic sine of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// The hyperbolic sine of the input fixed point number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn sinh_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `sinh` function as follows. /// fp.sinh() /// } /// >>> {mag: 237690, sign: false} // = 3.62686041 /// ``` /// fn sinh(self: T) -> T; /// # fp.tanh /// /// ```rust /// fn tanh(self: T) -> T; /// ``` /// /// Returns the value of the hyperbolic tangent of the fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// The hyperbolic tangent of the input fixed point number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn tanh_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, false); /// /// // We can call `tanh` function as follows. /// fp.tanh() /// } /// >>> {mag: 63179, sign: false} // = 0.96402758 /// ``` /// fn tanh(self: T) -> T; /// # fp.sign /// /// ```rust /// fn sign(self: T) -> T; /// ``` /// /// Returns the element-wise indication of the sign of the input fixed point number. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// The element-wise indication of the sign of the input fixed point number. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn sign_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new_unscaled(2, true); /// /// // We can call `sign` function as follows. /// fp.sign() /// } /// >>> {mag: 65536, sign: true} // = -1 /// ``` /// fn sign(self: T) -> T; /// # fp.erf /// /// ```rust /// fn erf(self: T) -> T; /// ``` /// /// Returns the error function of the input fixed point number computed element-wise. /// /// ## Args /// /// * `self`(`T`) - The input fixed point /// /// ## Returns /// /// The error function of the input fixed point number computed element-wise. /// /// ## Examples /// /// ```rust /// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait}; /// /// fn erf_fp_example() -> FP16x16 { /// // We instantiate fixed point here. /// let fp = FixedTrait::new(65536, false); /// /// // We can call `erf` function as follows. /// fp.erf() /// } /// >>> {mag: 55227, sign: false} // = -1 /// ``` /// fn erf(self: T) -> T; fn ZERO() -> T; fn HALF() -> T; fn ONE() -> T; fn MAX() -> T; fn NaN() -> T; fn is_nan(self: T) -> bool; fn INF() -> T; fn POS_INF() -> T; fn NEG_INF() -> T; fn is_inf(self: T) -> bool; fn is_pos_inf(self: T) -> bool; fn is_neg_inf(self: T) -> bool; }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations.cairo
mod fp8x23; mod fp16x16; mod fp64x64; mod fp32x32; mod fp16x16wide; mod fp8x23wide;
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16.cairo
mod core; mod math; mod helpers;
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16/core.cairo
use core::debug::PrintTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::fixed_point::implementations::fp16x16::math::{ core as core_math, trig, hyp, erf }; use orion::numbers::fixed_point::utils; /// A struct representing a fixed point number. #[derive(Serde, Copy, Drop)] struct FP16x16 { mag: u32, sign: bool } // CONSTANTS const TWO: u32 = 131072; // 2 ** 17 const ONE: u32 = 65536; // 2 ** 16 const HALF: u32 = 32768; // 2 ** 15 const MAX: u32 = 2147483648; // 2 ** 31 impl FP16x16Impl of FixedTrait<FP16x16, u32> { fn ZERO() -> FP16x16 { FP16x16 { mag: 0, sign: false } } fn HALF() -> FP16x16 { FP16x16 { mag: HALF, sign: false } } fn ONE() -> FP16x16 { FP16x16 { mag: ONE, sign: false } } fn MAX() -> FP16x16 { FP16x16 { mag: MAX, sign: false } } fn new(mag: u32, sign: bool) -> FP16x16 { FP16x16 { mag: mag, sign: sign } } fn new_unscaled(mag: u32, sign: bool) -> FP16x16 { FP16x16 { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP16x16 { let mag = core::integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap(); FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP16x16) -> FP16x16 { core_math::abs(self) } fn acos(self: FP16x16) -> FP16x16 { trig::acos_fast(self) } fn acos_fast(self: FP16x16) -> FP16x16 { trig::acos_fast(self) } fn acosh(self: FP16x16) -> FP16x16 { hyp::acosh(self) } fn asin(self: FP16x16) -> FP16x16 { trig::asin_fast(self) } fn asin_fast(self: FP16x16) -> FP16x16 { trig::asin_fast(self) } fn asinh(self: FP16x16) -> FP16x16 { hyp::asinh(self) } fn atan(self: FP16x16) -> FP16x16 { trig::atan_fast(self) } fn atan_fast(self: FP16x16) -> FP16x16 { trig::atan_fast(self) } fn atanh(self: FP16x16) -> FP16x16 { hyp::atanh(self) } fn ceil(self: FP16x16) -> FP16x16 { core_math::ceil(self) } fn cos(self: FP16x16) -> FP16x16 { trig::cos_fast(self) } fn cos_fast(self: FP16x16) -> FP16x16 { trig::cos_fast(self) } fn cosh(self: FP16x16) -> FP16x16 { hyp::cosh(self) } fn floor(self: FP16x16) -> FP16x16 { core_math::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP16x16) -> FP16x16 { core_math::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP16x16) -> FP16x16 { core_math::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP16x16) -> FP16x16 { core_math::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP16x16) -> FP16x16 { core_math::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP16x16) -> FP16x16 { core_math::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP16x16, b: FP16x16) -> FP16x16 { core_math::pow(self, b) } fn round(self: FP16x16) -> FP16x16 { core_math::round(self) } fn sin(self: FP16x16) -> FP16x16 { trig::sin_fast(self) } fn sin_fast(self: FP16x16) -> FP16x16 { trig::sin_fast(self) } fn sinh(self: FP16x16) -> FP16x16 { hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP16x16) -> FP16x16 { core_math::sqrt(self) } fn tan(self: FP16x16) -> FP16x16 { trig::tan_fast(self) } fn tan_fast(self: FP16x16) -> FP16x16 { trig::tan_fast(self) } fn tanh(self: FP16x16) -> FP16x16 { hyp::tanh(self) } fn sign(self: FP16x16) -> FP16x16 { core_math::sign(self) } fn NaN() -> FP16x16 { FP16x16 { mag: 0, sign: true } } fn is_nan(self: FP16x16) -> bool { self == FP16x16 { mag: 0, sign: true } } fn INF() -> FP16x16 { FP16x16 { mag: 4294967295, sign: false } } fn POS_INF() -> FP16x16 { FP16x16 { mag: 4294967295, sign: false } } fn NEG_INF() -> FP16x16 { FP16x16 { mag: 4294967295, sign: true } } fn is_inf(self: FP16x16) -> bool { self.mag == 4294967295 } fn is_pos_inf(self: FP16x16) -> bool { self.is_inf() && !self.sign } fn is_neg_inf(self: FP16x16) -> bool { self.is_inf() && self.sign } fn erf(self: FP16x16) -> FP16x16 { erf::erf(self) } } impl FP16x16Print of PrintTrait<FP16x16> { fn print(self: FP16x16) { self.sign.print(); self.mag.print(); } } // Into a raw felt without unscaling impl FP16x16IntoFelt252 of Into<FP16x16, felt252> { fn into(self: FP16x16) -> felt252 { let mag_felt = self.mag.into(); if self.sign { mag_felt * -1 } else { mag_felt * 1 } } } impl FP16x16IntoI32 of Into<FP16x16, i32> { fn into(self: FP16x16) -> i32 { _i32_into_fp(self) } } impl FP16x16TryIntoI8 of TryInto<FP16x16, i8> { fn try_into(self: FP16x16) -> Option<i8> { _i8_try_from_fp(self) } } impl FP16x16TryIntoU128 of TryInto<FP16x16, u128> { fn try_into(self: FP16x16) -> Option<u128> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down Option::Some((self.mag / ONE).into()) } } } impl FP16x16TryIntoU64 of TryInto<FP16x16, u64> { fn try_into(self: FP16x16) -> Option<u64> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down Option::Some((self.mag / ONE).into()) } } } impl FP16x16TryIntoU32 of TryInto<FP16x16, u32> { fn try_into(self: FP16x16) -> Option<u32> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down Option::Some(self.mag / ONE) } } } impl FP16x16TryIntoU16 of TryInto<FP16x16, u16> { fn try_into(self: FP16x16) -> Option<u16> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP16x16TryIntoU8 of TryInto<FP16x16, u8> { fn try_into(self: FP16x16) -> Option<u8> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP16x16PartialEq of PartialEq<FP16x16> { #[inline(always)] fn eq(lhs: @FP16x16, rhs: @FP16x16) -> bool { core_math::eq(lhs, rhs) } #[inline(always)] fn ne(lhs: @FP16x16, rhs: @FP16x16) -> bool { core_math::ne(lhs, rhs) } } impl FP16x16Add of Add<FP16x16> { fn add(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { core_math::add(lhs, rhs) } } impl FP16x16AddEq of AddEq<FP16x16> { #[inline(always)] fn add_eq(ref self: FP16x16, other: FP16x16) { self = Add::add(self, other); } } impl FP16x16Sub of Sub<FP16x16> { fn sub(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { core_math::sub(lhs, rhs) } } impl FP16x16SubEq of SubEq<FP16x16> { #[inline(always)] fn sub_eq(ref self: FP16x16, other: FP16x16) { self = Sub::sub(self, other); } } impl FP16x16Mul of Mul<FP16x16> { fn mul(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { core_math::mul(lhs, rhs) } } impl FP16x16MulEq of MulEq<FP16x16> { #[inline(always)] fn mul_eq(ref self: FP16x16, other: FP16x16) { self = Mul::mul(self, other); } } impl FP16x16Div of Div<FP16x16> { fn div(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { core_math::div(lhs, rhs) } } impl FP16x16DivEq of DivEq<FP16x16> { #[inline(always)] fn div_eq(ref self: FP16x16, other: FP16x16) { self = Div::div(self, other); } } impl FP16x16PartialOrd of PartialOrd<FP16x16> { #[inline(always)] fn ge(lhs: FP16x16, rhs: FP16x16) -> bool { core_math::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP16x16, rhs: FP16x16) -> bool { core_math::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP16x16, rhs: FP16x16) -> bool { core_math::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP16x16, rhs: FP16x16) -> bool { core_math::lt(lhs, rhs) } } impl FP16x16Neg of Neg<FP16x16> { #[inline(always)] fn neg(a: FP16x16) -> FP16x16 { core_math::neg(a) } } impl FP16x16Rem of Rem<FP16x16> { #[inline(always)] fn rem(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { core_math::rem(lhs, rhs) } } /// INTERNAL fn _i32_into_fp(x: FP16x16) -> i32 { let number_felt: felt252 = (x.mag / ONE).into(); let number_i32: i32 = number_felt.try_into().unwrap(); if x.sign { return number_i32 * -1_i32; } number_i32 } fn _i8_try_from_fp(x: FP16x16) -> Option<i8> { let unscaled_mag: Option<u8> = (x.mag / ONE).try_into(); match unscaled_mag { Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); if x.sign { return Option::Some(number_i8 * -1_i8); } Option::Some(number_i8) }, Option::None => Option::None(()) } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16/helpers.cairo
use core::debug::PrintTrait; use orion::numbers::fixed_point::implementations::fp16x16::core::{ HALF, ONE, TWO, FP16x16, FP16x16Impl, FP16x16Sub, FP16x16Div, FixedTrait, FP16x16Print }; const DEFAULT_PRECISION: u32 = 7; // 1e-4 // To use `DEFAULT_PRECISION`, final arg is: `Option::None(())`. // To use `custom_precision` of 430_u32: `Option::Some(430_u32)`. fn assert_precise(result: FP16x16, expected: felt252, msg: felt252, custom_precision: Option<u32>) { let precision = match custom_precision { Option::Some(val) => val, Option::None => DEFAULT_PRECISION, }; let diff = (result - FixedTrait::from_felt(expected)).mag; if (diff > precision) { result.print(); assert(diff <= precision, msg); } } fn assert_relative( result: FP16x16, expected: felt252, msg: felt252, custom_precision: Option<u32> ) { let precision = match custom_precision { Option::Some(val) => val, Option::None => DEFAULT_PRECISION, }; let diff = result - FixedTrait::from_felt(expected); let rel_diff = (diff / result).mag; if (rel_diff > precision) { result.print(); assert(rel_diff <= precision, msg); } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16/math.cairo
mod core; mod comp; mod lut; mod trig; mod hyp; mod erf;
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo
use orion::numbers::fixed_point::implementations::fp16x16::core::{ FP16x16, FixedTrait, FP16x16Impl, FP16x16PartialOrd, FP16x16PartialEq }; fn max(a: FP16x16, b: FP16x16) -> FP16x16 { if a >= b { a } else { b } } fn min(a: FP16x16, b: FP16x16) -> FP16x16 { if a <= b { a } else { b } } fn xor(a: FP16x16, b: FP16x16) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { true } else { false } } fn or(a: FP16x16, b: FP16x16) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { false } else { true } } fn and(a: FP16x16, b: FP16x16) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { false } else { true } } fn where(a: FP16x16, b: FP16x16, c: FP16x16) -> FP16x16 { if a == FixedTrait::new(0, false) { c } else { b } } fn bitwise_and(a: FP16x16, b: FP16x16) -> FP16x16 { FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP16x16, b: FP16x16) -> FP16x16 { FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP16x16, b: FP16x16) -> FP16x16 { FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use super::{FixedTrait, max, min, bitwise_and, bitwise_xor, bitwise_or}; #[test] fn test_max() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::new_unscaled(1, true); assert(max(a, a) == a, 'max(a, a)'); assert(max(a, b) == a, 'max(a, b)'); assert(max(a, c) == a, 'max(a, c)'); assert(max(b, a) == a, 'max(b, a)'); assert(max(b, b) == b, 'max(b, b)'); assert(max(b, c) == b, 'max(b, c)'); assert(max(c, a) == a, 'max(c, a)'); assert(max(c, b) == b, 'max(c, b)'); assert(max(c, c) == c, 'max(c, c)'); } #[test] fn test_min() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::new_unscaled(1, true); assert(min(a, a) == a, 'min(a, a)'); assert(min(a, b) == b, 'min(a, b)'); assert(min(a, c) == c, 'min(a, c)'); assert(min(b, a) == b, 'min(b, a)'); assert(min(b, b) == b, 'min(b, b)'); assert(min(b, c) == c, 'min(b, c)'); assert(min(c, a) == c, 'min(c, a)'); assert(min(c, b) == c, 'min(c, b)'); assert(min(c, c) == c, 'min(c, c)'); } #[test] fn test_bitwise_and() { let a = FixedTrait::new(225280, false); // 3.4375 let b = FixedTrait::new(4160843776, true); // -2046.5625 let c = FixedTrait::new(94208, false); // 1.4375 assert(bitwise_and(a, b) == c, 'bitwise_and(a,b)') } #[test] fn test_bitwise_xor() { let a = FixedTrait::new(225280, false); // 3.4375 let b = FixedTrait::new(4160843776, true); // -2046.5625 let c = FixedTrait::new(4160880640, true); assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)') } #[test] fn test_bitwise_or() { let a = FixedTrait::new(225280, false); // 3.4375 let b = FixedTrait::new(4160843776, true); // -2046.5625 let c = FixedTrait::new(4160974848, true); assert(bitwise_or(a, b) == c, 'bitwise_or(a,b)') } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16/math/core.cairo
use core::integer; use orion::numbers::fixed_point::implementations::fp16x16::core::{ HALF, ONE, MAX, FP16x16, FP16x16Impl, FP16x16Add, FP16x16AddEq, FP16x16Sub, FP16x16Mul, FP16x16MulEq, FP16x16TryIntoU128, FP16x16PartialEq, FP16x16PartialOrd, FP16x16SubEq, FP16x16Neg, FP16x16Div, FP16x16IntoFelt252, FixedTrait }; use orion::numbers::fixed_point::implementations::fp16x16::math::lut; // PUBLIC fn abs(a: FP16x16) -> FP16x16 { FixedTrait::new(a.mag, false) } fn add(a: FP16x16, b: FP16x16) -> FP16x16 { if a.sign == b.sign { return FixedTrait::new(a.mag + b.mag, a.sign); } if a.mag == b.mag { return FixedTrait::ZERO(); } if (a.mag > b.mag) { FixedTrait::new(a.mag - b.mag, a.sign) } else { FixedTrait::new(b.mag - a.mag, b.sign) } } fn ceil(a: FP16x16) -> FP16x16 { let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { a } else if !a.sign { FixedTrait::new_unscaled(div + 1, false) } else if div == 0 { FixedTrait::new_unscaled(0, false) } else { FixedTrait::new_unscaled(div, true) } } fn div(a: FP16x16, b: FP16x16) -> FP16x16 { let a_u64 = integer::u32_wide_mul(a.mag, ONE); let res_u64 = a_u64 / b.mag.into(); // Re-apply sign FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign) } fn eq(a: @FP16x16, b: @FP16x16) -> bool { (*a.mag == *b.mag) && (*a.sign == *b.sign) } // Calculates the natural exponent of x: e^x fn exp(a: FP16x16) -> FP16x16 { exp2(FixedTrait::new(94548, false) * a) // log2(e) * 2^23 ≈ 12102203 } // Calculates the binary exponent of x: 2^x fn exp2(a: FP16x16) -> FP16x16 { if (a.mag == 0) { return FixedTrait::ONE(); } let (int_part, frac_part) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false); let mut res_u = int_res; if frac_part != 0 { let frac = FixedTrait::new(frac_part, false); let r7 = FixedTrait::new(1, false) * frac; let r6 = (r7 + FixedTrait::new(10, false)) * frac; let r5 = (r6 + FixedTrait::new(87, false)) * frac; let r4 = (r5 + FixedTrait::new(630, false)) * frac; let r3 = (r4 + FixedTrait::new(3638, false)) * frac; let r2 = (r3 + FixedTrait::new(15743, false)) * frac; let r1 = (r2 + FixedTrait::new(45426, false)) * frac; res_u = res_u * (r1 + FixedTrait::ONE()); } if a.sign { FixedTrait::ONE() / res_u } else { res_u } } fn exp2_int(exp: u32) -> FP16x16 { FixedTrait::new_unscaled(lut::exp2(exp), false) } fn floor(a: FP16x16) -> FP16x16 { let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { a } else if !a.sign { FixedTrait::new_unscaled(div, false) } else { FixedTrait::new_unscaled(div + 1, true) } } fn ge(a: FP16x16, b: FP16x16) -> bool { if a.sign != b.sign { !a.sign } else { (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign) } } fn gt(a: FP16x16, b: FP16x16) -> bool { if a.sign != b.sign { !a.sign } else { (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign) } } fn le(a: FP16x16, b: FP16x16) -> bool { if a.sign != b.sign { a.sign } else { (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign) } } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(a: FP16x16) -> FP16x16 { FixedTrait::new(45426, false) * log2(a) // ln(2) = 0.693... } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(a: FP16x16) -> FP16x16 { assert(a.sign == false, 'must be positive'); if (a.mag == ONE) { return FixedTrait::ZERO(); } else if (a.mag < ONE) { // Compute true inverse binary log if 0 < x < 1 let div = FixedTrait::ONE() / a; return -log2(div); } let whole = a.mag / ONE; let (msb, div) = lut::msb(whole); if a.mag == div * ONE { FixedTrait::new_unscaled(msb, false) } else { let norm = a / FixedTrait::new_unscaled(div, false); let r8 = FixedTrait::new(596, true) * norm; let r7 = (r8 + FixedTrait::new(8116, false)) * norm; let r6 = (r7 + FixedTrait::new(49044, true)) * norm; let r5 = (r6 + FixedTrait::new(172935, false)) * norm; let r4 = (r5 + FixedTrait::new(394096, true)) * norm; let r3 = (r4 + FixedTrait::new(608566, false)) * norm; let r2 = (r3 + FixedTrait::new(655828, true)) * norm; let r1 = (r2 + FixedTrait::new(534433, false)) * norm; r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false) } } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(a: FP16x16) -> FP16x16 { FixedTrait::new(19728, false) * log2(a) // log10(2) = 0.301... } fn lt(a: FP16x16, b: FP16x16) -> bool { if a.sign != b.sign { a.sign } else { (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign) } } fn mul(a: FP16x16, b: FP16x16) -> FP16x16 { let prod_u128 = integer::u32_wide_mul(a.mag, b.mag); // Re-apply sign FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign) } fn ne(a: @FP16x16, b: @FP16x16) -> bool { (*a.mag != *b.mag) || (*a.sign != *b.sign) } fn neg(a: FP16x16) -> FP16x16 { if a.mag == 0 { a } else if !a.sign { FixedTrait::new(a.mag, !a.sign) } else { FixedTrait::new(a.mag, false) } } // Calclates the value of x^y and checks for overflow before returning // self is a FP16x16 point value // b is a FP16x16 point value fn pow(a: FP16x16, b: FP16x16) -> FP16x16 { let (_, rem) = integer::u32_safe_divmod(b.mag, integer::u32_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { return pow_int(a, b.mag / ONE, b.sign); } // x^y = exp(y*ln(x)) for x > 0 will error for x < 0 exp(b * ln(a)) } // Calclates the value of a^b and checks for overflow before returning fn pow_int(a: FP16x16, b: u32, sign: bool) -> FP16x16 { let mut x = a; let mut n = b; if sign { x = FixedTrait::ONE() / x; } if n == 0 { return FixedTrait::ONE(); } let mut y = FixedTrait::ONE(); let two = integer::u32_as_non_zero(2); while n > 1 { let (div, rem) = integer::u32_safe_divmod(n, two); if rem == 1 { y = x * y; } x = x * x; n = div; }; x * y } fn rem(a: FP16x16, b: FP16x16) -> FP16x16 { a - floor(a / b) * b } fn round(a: FP16x16) -> FP16x16 { let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if (HALF <= rem) { FixedTrait::new_unscaled(div + 1, a.sign) } else { FixedTrait::new_unscaled(div, a.sign) } } // Calculates the square root of a FP16x16 point value // x must be positive fn sqrt(a: FP16x16) -> FP16x16 { assert(a.sign == false, 'must be positive'); let root = integer::u64_sqrt(a.mag.into() * ONE.into()); FixedTrait::new(root.into(), false) } fn sub(a: FP16x16, b: FP16x16) -> FP16x16 { add(a, -b) } fn sign(a: FP16x16) -> FP16x16 { if a.mag == 0 { FixedTrait::new(0, false) } else { FixedTrait::new(ONE, a.sign) } } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use orion::numbers::fixed_point::implementations::fp16x16::helpers::{ assert_precise, assert_relative }; use orion::numbers::fixed_point::implementations::fp16x16::math::trig::{PI, HALF_PI}; use super::{ FixedTrait, ONE, FP16x16, ceil, floor, sqrt, round, lut, pow, exp, exp2, exp2_int, ln, log2, log10, eq, add, ne, HALF }; #[test] fn test_into() { let a = FixedTrait::<FP16x16>::new_unscaled(5, false); assert(a.mag == 5 * ONE, 'invalid result'); } #[test] fn test_try_into_u128() { // Positive unscaled let a = FixedTrait::<FP16x16>::new_unscaled(5, false); assert(a.try_into().unwrap() == 5_u128, 'invalid result'); // Positive scaled let b = FixedTrait::<FP16x16>::new(5 * ONE, false); assert(b.try_into().unwrap() == 5_u128, 'invalid result'); // Zero let d = FixedTrait::<FP16x16>::new_unscaled(0, false); assert(d.try_into().unwrap() == 0_u128, 'invalid result'); } #[test] #[should_panic] fn test_negative_try_into_u128() { let a = FixedTrait::<FP16x16>::new_unscaled(1, true); let _a: u128 = a.try_into().unwrap(); } #[test] #[available_gas(1000000)] fn test_acos() { let a = FixedTrait::<FP16x16>::ONE(); assert(a.acos().into() == 0, 'invalid one'); } #[test] #[available_gas(1000000)] fn test_asin() { let a = FixedTrait::ONE(); assert_precise(a.asin(), HALF_PI.into(), 'invalid one', Option::None(())); // PI / 2 } #[test] #[available_gas(2000000)] fn test_atan() { let a = FixedTrait::new(2 * ONE, false); assert_relative(a.atan(), 72558, 'invalid two', Option::None(())); } #[test] fn test_ceil() { let a = FixedTrait::new(190054, false); // 2.9 assert(ceil(a).mag == 3 * ONE, 'invalid pos decimal'); } #[test] fn test_floor() { let a = FixedTrait::new(190054, false); // 2.9 assert(floor(a).mag == 2 * ONE, 'invalid pos decimal'); } #[test] fn test_round() { let a = FixedTrait::new(190054, false); // 2.9 assert(round(a).mag == 3 * ONE, 'invalid pos decimal'); } #[test] #[should_panic] fn test_sqrt_fail() { let a = FixedTrait::new_unscaled(25, true); sqrt(a); } #[test] fn test_sqrt() { let mut a = FixedTrait::new_unscaled(0, false); assert(sqrt(a).mag == 0, 'invalid zero root'); a = FixedTrait::new_unscaled(25, false); assert(sqrt(a).mag == 5 * ONE, 'invalid pos root'); } #[test] #[available_gas(100000)] fn test_msb() { let a = FixedTrait::<FP16x16>::new_unscaled(100, false); let (msb, div) = lut::msb(a.mag / ONE); assert(msb == 6, 'invalid msb'); assert(div == 64, 'invalid msb ceil'); } #[test] #[available_gas(600000)] fn test_pow() { let a = FixedTrait::new_unscaled(3, false); let b = FixedTrait::new_unscaled(4, false); assert(pow(a, b).mag == 81 * ONE, 'invalid pos base power'); } #[test] #[available_gas(900000)] fn test_pow_frac() { let a = FixedTrait::new_unscaled(3, false); let b = FixedTrait::new(32768, false); // 0.5 assert_relative( pow(a, b), 113512, 'invalid pos base power', Option::None(()) ); // 1.7320508075688772 } #[test] #[available_gas(1000000)] fn test_exp() { let a = FixedTrait::new_unscaled(2, false); assert_relative(exp(a), 484249, 'invalid exp of 2', Option::None(())); // 7.389056098793725 } #[test] #[available_gas(400000)] fn test_exp2() { let a = FixedTrait::new_unscaled(5, false); assert(exp2(a).mag == 2097152, 'invalid exp2 of 2'); } #[test] #[available_gas(20000)] fn test_exp2_int() { assert(exp2_int(5).into() == 2097152, 'invalid exp2 of 2'); } #[test] #[available_gas(1000000)] fn test_ln() { let mut a = FixedTrait::new_unscaled(1, false); assert(ln(a).mag == 0, 'invalid ln of 1'); a = FixedTrait::new(178145, false); assert_relative(ln(a), ONE.into(), 'invalid ln of 2.7...', Option::None(())); } #[test] #[available_gas(1000000)] fn test_log2() { let mut a = FixedTrait::new_unscaled(32, false); assert(log2(a) == FixedTrait::new_unscaled(5, false), 'invalid log2 32'); a = FixedTrait::new_unscaled(10, false); assert_relative(log2(a), 217706, 'invalid log2 10', Option::None(())); // 3.321928094887362 } #[test] #[available_gas(1000000)] fn test_log10() { let a = FixedTrait::new_unscaled(100, false); assert_relative(log10(a), 2 * ONE.into(), 'invalid log10', Option::None(())); } #[test] fn test_eq() { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = eq(@a, @b); assert(c, 'invalid result'); } #[test] fn test_ne() { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = ne(@a, @b); assert(!c, 'invalid result'); } #[test] fn test_add() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(2, false); assert(add(a, b) == FixedTrait::new_unscaled(3, false), 'invalid result'); } #[test] fn test_add_eq() { let mut a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(2, false); a += b; assert(a == FixedTrait::<FP16x16>::new_unscaled(3, false), 'invalid result'); } #[test] fn test_sub() { let a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, false); let c = a - b; assert(c == FixedTrait::<FP16x16>::new_unscaled(3, false), 'false result invalid'); } #[test] fn test_sub_eq() { let mut a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, false); a -= b; assert(a == FixedTrait::<FP16x16>::new_unscaled(3, false), 'invalid result'); } #[test] #[available_gas(100000)] fn test_mul_pos() { let a = FP16x16 { mag: 190054, sign: false }; let b = FP16x16 { mag: 190054, sign: false }; let c = a * b; assert(c.mag == 551155, 'invalid result'); } #[test] fn test_mul_neg() { let a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, true); let c = a * b; assert(c == FixedTrait::<FP16x16>::new_unscaled(10, true), 'invalid result'); } #[test] fn test_mul_eq() { let mut a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, true); a *= b; assert(a == FixedTrait::<FP16x16>::new_unscaled(10, true), 'invalid result'); } #[test] fn test_div() { let a = FixedTrait::new_unscaled(10, false); let b = FixedTrait::<FP16x16>::new(190054, false); // 2.9 let c = a / b; assert(c.mag == 225986, 'invalid pos decimal'); // 3.4482758620689653 } #[test] fn test_le() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP16x16>::new_unscaled(1, true); assert(a <= a, 'a <= a'); assert(!(a <= b), 'a <= b'); assert(!(a <= c), 'a <= c'); assert(b <= a, 'b <= a'); assert(b <= b, 'b <= b'); assert(!(b <= c), 'b <= c'); assert(c <= a, 'c <= a'); assert(c <= b, 'c <= b'); assert(c <= c, 'c <= c'); } #[test] fn test_lt() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP16x16>::new_unscaled(1, true); assert(!(a < a), 'a < a'); assert(!(a < b), 'a < b'); assert(!(a < c), 'a < c'); assert(b < a, 'b < a'); assert(!(b < b), 'b < b'); assert(!(b < c), 'b < c'); assert(c < a, 'c < a'); assert(c < b, 'c < b'); assert(!(c < c), 'c < c'); } #[test] fn test_ge() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP16x16>::new_unscaled(1, true); assert(a >= a, 'a >= a'); assert(a >= b, 'a >= b'); assert(a >= c, 'a >= c'); assert(!(b >= a), 'b >= a'); assert(b >= b, 'b >= b'); assert(b >= c, 'b >= c'); assert(!(c >= a), 'c >= a'); assert(!(c >= b), 'c >= b'); assert(c >= c, 'c >= c'); } #[test] fn test_gt() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP16x16>::new_unscaled(1, true); assert(!(a > a), 'a > a'); assert(a > b, 'a > b'); assert(a > c, 'a > c'); assert(!(b > a), 'b > a'); assert(!(b > b), 'b > b'); assert(b > c, 'b > c'); assert(!(c > a), 'c > a'); assert(!(c > b), 'c > b'); assert(!(c > c), 'c > c'); } #[test] #[available_gas(1000000)] fn test_cos() { let a = FixedTrait::<FP16x16>::new(HALF_PI, false); assert(a.cos().into() == 0, 'invalid half pi'); } #[test] #[available_gas(1000000)] fn test_sin() { let a = FixedTrait::new(HALF_PI, false); assert_precise(a.sin(), ONE.into(), 'invalid half pi', Option::None(())); } #[test] #[available_gas(2000000)] fn test_tan() { let a = FixedTrait::<FP16x16>::new(HALF_PI / 2, false); assert(a.tan().mag == 65536, 'invalid quarter pi'); } #[test] #[available_gas(2000000)] fn test_sign() { let a = FixedTrait::<FP16x16>::new(0, false); assert(a.sign().mag == 0 && !a.sign().sign, 'invalid sign (0, true)'); let a = FixedTrait::<FP16x16>::new(HALF, true); assert(a.sign().mag == ONE && a.sign().sign, 'invalid sign (HALF, true)'); let a = FixedTrait::<FP16x16>::new(HALF, false); assert(a.sign().mag == ONE && !a.sign().sign, 'invalid sign (HALF, false)'); let a = FixedTrait::<FP16x16>::new(ONE, true); assert(a.sign().mag == ONE && a.sign().sign, 'invalid sign (ONE, true)'); let a = FixedTrait::<FP16x16>::new(ONE, false); assert(a.sign().mag == ONE && !a.sign().sign, 'invalid sign (ONE, false)'); } #[test] #[should_panic] #[available_gas(2000000)] fn test_sign_fail() { let a = FixedTrait::<FP16x16>::new(HALF, true); assert(a.sign().mag != ONE && !a.sign().sign, 'invalid sign (HALF, true)'); } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo
use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE, FP16x16, FixedTrait}; use orion::numbers::fixed_point::implementations::fp16x16::math::lut::erf_lut; const ERF_COMPUTATIONAL_ACCURACY: u32 = 100; const ROUND_CHECK_NUMBER: u32 = 10; // Values > MAX_ERF_NUMBER return 1 const MAX_ERF_NUMBER: u32 = 229376; // Values <= ERF_TRUNCATION_NUMBER -> two decimal places, and values > ERF_TRUNCATION_NUMBER -> one decimal place const ERF_TRUNCATION_NUMBER: u32 = 131072; fn erf(x: FP16x16) -> FP16x16 { // Lookup // 1. if x.mag < 3.5 { lookup table } // 2. else{ return 1} let mut erf_value: u32 = 0; if x.mag < MAX_ERF_NUMBER { erf_value = erf_lut(x.mag); } else { erf_value = ONE; } FP16x16 { mag: erf_value, sign: x.sign } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo
use orion::numbers::fixed_point::implementations::fp16x16::core::{ HALF, ONE, TWO, FP16x16, FP16x16Impl, FP16x16Add, FP16x16AddEq, FP16x16Sub, FP16x16Mul, FP16x16MulEq, FP16x16TryIntoU128, FP16x16PartialEq, FP16x16PartialOrd, FP16x16SubEq, FP16x16Neg, FP16x16Div, FP16x16IntoFelt252, FixedTrait }; // Calculates hyperbolic cosine of a (fixed point) fn cosh(a: FP16x16) -> FP16x16 { let ea = a.exp(); (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic sine of a (fixed point) fn sinh(a: FP16x16) -> FP16x16 { let ea = a.exp(); (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic tangent of a (fixed point) fn tanh(a: FP16x16) -> FP16x16 { let ea = a.exp(); let ea_i = FixedTrait::ONE() / ea; (ea - ea_i) / (ea + ea_i) } // Calculates inverse hyperbolic cosine of a (fixed point) fn acosh(a: FP16x16) -> FP16x16 { let root = (a * a - FixedTrait::ONE()).sqrt(); (a + root).ln() } // Calculates inverse hyperbolic sine of a (fixed point) fn asinh(a: FP16x16) -> FP16x16 { let root = (a * a + FixedTrait::ONE()).sqrt(); (a + root).ln() } // Calculates inverse hyperbolic tangent of a (fixed point) fn atanh(a: FP16x16) -> FP16x16 { let one = FixedTrait::ONE(); let ln_arg = (one + a) / (one - a); ln_arg.ln() / FixedTrait::new(TWO, false) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use orion::numbers::fixed_point::implementations::fp16x16::helpers::assert_precise; use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF}; #[test] #[available_gas(10000000)] fn test_cosh() { let a = FixedTrait::new(TWO, false); assert_precise(cosh(a), 246550, 'invalid two', Option::None(())); // 3.5954653836066 let a = FixedTrait::ONE(); assert_precise(cosh(a), 101127, 'invalid one', Option::None(())); // 1.42428174592510 let a = FixedTrait::ZERO(); assert_precise(cosh(a), ONE.into(), 'invalid zero', Option::None(())); let a = FixedTrait::ONE(); assert_precise(cosh(a), 101127, 'invalid neg one', Option::None(())); // 1.42428174592510 let a = FixedTrait::new(TWO, true); assert_precise(cosh(a), 246568, 'invalid neg two', Option::None(())); // 3.5954653836066 } #[test] #[available_gas(10000000)] fn test_sinh() { let a = FixedTrait::new(TWO, false); assert_precise(sinh(a), 237681, 'invalid two', Option::None(())); // 3.48973469357602 let a = FixedTrait::ONE(); assert_precise(sinh(a), 77018, 'invalid one', Option::None(())); // 1.13687593250230 let a = FixedTrait::ZERO(); assert(sinh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE, true); assert_precise(sinh(a), -77018, 'invalid neg one', Option::None(())); // -1.13687593250230 let a = FixedTrait::new(TWO, true); assert_precise(sinh(a), -237699, 'invalid neg two', Option::None(())); // -3.48973469357602 } #[test] #[available_gas(10000000)] fn test_tanh() { let a = FixedTrait::new(TWO, false); assert_precise(tanh(a), 63179, 'invalid two', Option::None(())); // 0.75314654693321 let a = FixedTrait::ONE(); assert_precise(tanh(a), 49912, 'invalid one', Option::None(())); // 0.59499543433175 let a = FixedTrait::ZERO(); assert(tanh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE, true); assert_precise(tanh(a), -49912, 'invalid neg one', Option::None(())); // -0.59499543433175 let a = FixedTrait::new(TWO, true); assert_precise(tanh(a), -63179, 'invalid neg two', Option::None(())); // 0.75314654693321 } #[test] #[available_gas(10000000)] fn test_acosh() { let a = FixedTrait::new(246559, false); // 3.5954653836066 assert_precise(acosh(a), 131072, 'invalid two', Option::None(())); let a = FixedTrait::new(101127, false); // 1.42428174592510 assert_precise(acosh(a), ONE.into(), 'invalid one', Option::None(())); let a = FixedTrait::ONE(); // 1 assert(acosh(a).into() == 0, 'invalid zero'); } #[test] #[available_gas(10000000)] fn test_asinh() { let a = FixedTrait::new(237690, false); // 3.48973469357602 assert_precise(asinh(a), 131072, 'invalid two', Option::None(())); let a = FixedTrait::new(77018, false); // 1.13687593250230 assert_precise(asinh(a), ONE.into(), 'invalid one', Option::None(())); let a = FixedTrait::ZERO(); assert(asinh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(77018, true); // -1.13687593250230 assert_precise(asinh(a), -ONE.into(), 'invalid neg one', Option::None(())); let a = FixedTrait::new(237690, true); // -3.48973469357602 assert_precise(asinh(a), -131017, 'invalid neg two', Option::None(())); } #[test] #[available_gas(10000000)] fn test_atanh() { let a = FixedTrait::new(58982, false); // 0.9 assert_precise(atanh(a), 96483, 'invalid 0.9', Option::None(())); // 1.36892147623689 let a = FixedTrait::new(HALF, false); // 0.5 assert_precise(atanh(a), 35999, 'invalid half', Option::None(())); // 0.42914542526098 let a = FixedTrait::ZERO(); assert(atanh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(HALF, true); // 0.5 assert_precise(atanh(a), -35999, 'invalid neg half', Option::None(())); // 0.42914542526098 let a = FixedTrait::new(58982, true); // 0.9 assert_precise(atanh(a), -96483, 'invalid -0.9', Option::None(())); // 1.36892147623689 } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo
use orion::numbers::fixed_point::implementations::fp16x16::core::ONE; // Calculates the most significant bit fn msb(whole: u32) -> (u32, u32) { if whole < 256 { if whole < 2 { return (0, 1); } if whole < 4 { return (1, 2); } if whole < 8 { return (2, 4); } if whole < 16 { return (3, 8); } if whole < 32 { return (4, 16); } if whole < 64 { return (5, 32); } if whole < 128 { return (6, 64); } if whole < 256 { return (7, 128); } } else if whole < 65536 { if whole < 512 { return (8, 256); } if whole < 1024 { return (9, 512); } if whole < 2048 { return (10, 1024); } if whole < 4096 { return (11, 2048); } if whole < 8192 { return (12, 4096); } if whole < 16384 { return (13, 8192); } if whole < 32768 { return (14, 16384); } if whole < 65536 { return (15, 32768); } } (16, 65536) } fn exp2(exp: u32) -> u32 { if exp <= 16 { if exp == 0 { return 1; } if exp == 1 { return 2; } if exp == 2 { return 4; } if exp == 3 { return 8; } if exp == 4 { return 16; } if exp == 5 { return 32; } if exp == 6 { return 64; } if exp == 7 { return 128; } if exp == 8 { return 256; } if exp == 9 { return 512; } if exp == 10 { return 1024; } if exp == 11 { return 2048; } if exp == 12 { return 4096; } if exp == 13 { return 8192; } if exp == 14 { return 16384; } if exp == 15 { return 32768; } if exp == 16 { return 65536; } } 65536 } fn sin(a: u32) -> (u32, u32, u32) { let slot = a / 402; if slot < 128 { if slot < 64 { if slot < 32 { if slot < 16 { if slot == 0 { return (0, 0, 402); } if slot == 1 { return (402, 402, 804); } if slot == 2 { return (804, 804, 1206); } if slot == 3 { return (1206, 1206, 1608); } if slot == 4 { return (1608, 1608, 2010); } if slot == 5 { return (2011, 2010, 2412); } if slot == 6 { return (2413, 2412, 2814); } if slot == 7 { return (2815, 2814, 3216); } if slot == 8 { return (3217, 3216, 3617); } if slot == 9 { return (3619, 3617, 4019); } if slot == 10 { return (4023, 4019, 4420); } if slot == 11 { return (4423, 4420, 4821); } if slot == 12 { return (4825, 4821, 5222); } if slot == 13 { return (5228, 5222, 5623); } if slot == 14 { return (5630, 5623, 6023); } if slot == 15 { return (6032, 6023, 6424); } } else { if slot == 16 { return (6434, 6424, 6824); } if slot == 17 { return (6836, 6824, 7224); } if slot == 18 { return (7238, 7224, 7623); } if slot == 19 { return (7640, 7623, 8022); } if slot == 20 { return (8042, 8022, 8421); } if slot == 21 { return (8445, 8421, 8820); } if slot == 22 { return (8847, 8820, 9218); } if slot == 23 { return (9249, 9218, 9616); } if slot == 24 { return (9651, 9616, 10014); } if slot == 25 { return (10053, 10014, 10411); } if slot == 26 { return (10455, 10411, 10808); } if slot == 27 { return (10857, 10808, 11204); } if slot == 28 { return (11259, 11204, 11600); } if slot == 29 { return (11662, 11600, 11996); } if slot == 30 { return (12064, 11996, 12391); } if slot == 31 { return (12466, 12391, 12785); } } } else { if slot < 48 { if slot == 32 { return (12868, 12785, 13180); } if slot == 33 { return (13270, 13180, 13573); } if slot == 34 { return (13672, 13573, 13966); } if slot == 35 { return (14074, 13966, 14359); } if slot == 36 { return (14476, 14359, 14751); } if slot == 37 { return (14879, 14751, 15143); } if slot == 38 { return (15281, 15143, 15534); } if slot == 39 { return (15683, 15534, 15924); } if slot == 40 { return (16081, 15924, 16314); } if slot == 41 { return (16487, 16314, 16703); } if slot == 42 { return (16889, 16703, 17091); } if slot == 43 { return (17291, 17091, 17479); } if slot == 44 { return (17693, 17479, 17867); } if slot == 45 { return (18096, 17867, 18253); } if slot == 46 { return (18498, 18253, 18639); } if slot == 47 { return (18900, 18639, 19024); } } else { if slot == 48 { return (19302, 19024, 19409); } if slot == 49 { return (19704, 19409, 19792); } if slot == 50 { return (20113, 19792, 20175); } if slot == 51 { return (20508, 20175, 20557); } if slot == 52 { return (20910, 20557, 20939); } if slot == 53 { return (21313, 20939, 21320); } if slot == 54 { return (21715, 21320, 21699); } if slot == 55 { return (22117, 21699, 22078); } if slot == 56 { return (22519, 22078, 22457); } if slot == 57 { return (22921, 22457, 22834); } if slot == 58 { return (23323, 22834, 23210); } if slot == 59 { return (23725, 23210, 23586); } if slot == 60 { return (24127, 23586, 23961); } if slot == 61 { return (24530, 23961, 24335); } if slot == 62 { return (24932, 24335, 24708); } if slot == 63 { return (25334, 24708, 25080); } } } } else { if slot < 96 { if slot < 80 { if slot == 64 { return (25736, 25080, 25451); } if slot == 65 { return (26138, 25451, 25821); } if slot == 66 { return (26540, 25821, 26190); } if slot == 67 { return (26942, 26190, 26558); } if slot == 68 { return (27344, 26558, 26925); } if slot == 69 { return (27747, 26925, 27291); } if slot == 70 { return (28149, 27291, 27656); } if slot == 71 { return (28551, 27656, 28020); } if slot == 72 { return (28953, 28020, 28383); } if slot == 73 { return (29355, 28383, 28745); } if slot == 74 { return (29757, 28745, 29106); } if slot == 75 { return (30159, 29106, 29466); } if slot == 76 { return (30561, 29466, 29824); } if slot == 77 { return (30964, 29824, 30182); } if slot == 78 { return (31366, 30182, 30538); } if slot == 79 { return (31768, 30538, 30893); } } else { if slot == 80 { return (32171, 30893, 31248); } if slot == 81 { return (32572, 31248, 31600); } if slot == 82 { return (32974, 31600, 31952); } if slot == 83 { return (33376, 31952, 32303); } if slot == 84 { return (33778, 32303, 32652); } if slot == 85 { return (34181, 32652, 33000); } if slot == 86 { return (34583, 33000, 33347); } if slot == 87 { return (34985, 33347, 33692); } if slot == 88 { return (35387, 33692, 34037); } if slot == 89 { return (35789, 34037, 34380); } if slot == 90 { return (36194, 34380, 34721); } if slot == 91 { return (36593, 34721, 35062); } if slot == 92 { return (36995, 35062, 35401); } if slot == 93 { return (37398, 35401, 35738); } if slot == 94 { return (37800, 35738, 36075); } if slot == 95 { return (38202, 36075, 36410); } } } else { if slot < 112 { if slot == 96 { return (38604, 36410, 36744); } if slot == 97 { return (39006, 36744, 37076); } if slot == 98 { return (39408, 37076, 37407); } if slot == 99 { return (39810, 37407, 37736); } if slot == 100 { return (40227, 37736, 38064); } if slot == 101 { return (40615, 38064, 38391); } if slot == 102 { return (41017, 38391, 38716); } if slot == 103 { return (41419, 38716, 39040); } if slot == 104 { return (41821, 39040, 39362); } if slot == 105 { return (42223, 39362, 39683); } if slot == 106 { return (42625, 39683, 40002); } if slot == 107 { return (43027, 40002, 40320); } if slot == 108 { return (43429, 40320, 40636); } if slot == 109 { return (43832, 40636, 40951); } if slot == 110 { return (44234, 40951, 41264); } if slot == 111 { return (44636, 41264, 41576); } } else { if slot == 112 { return (45038, 41576, 41886); } if slot == 113 { return (45440, 41886, 42194); } if slot == 114 { return (45842, 42194, 42501); } if slot == 115 { return (46244, 42501, 42806); } if slot == 116 { return (46646, 42806, 43110); } if slot == 117 { return (47048, 43110, 43412); } if slot == 118 { return (47451, 43412, 43713); } if slot == 119 { return (47853, 43713, 44011); } if slot == 120 { return (48252, 44011, 44308); } if slot == 121 { return (48657, 44308, 44604); } if slot == 122 { return (49059, 44604, 44898); } if slot == 123 { return (49461, 44898, 45190); } if slot == 124 { return (49863, 45190, 45480); } if slot == 125 { return (50265, 45480, 45769); } if slot == 126 { return (50668, 45769, 46056); } if slot == 127 { return (51070, 46056, 46341); } } } } } else { if slot < 192 { if slot < 160 { if slot < 144 { if slot == 128 { return (51472, 46341, 46624); } if slot == 129 { return (51874, 46624, 46906); } if slot == 130 { return (52285, 46906, 47186); } if slot == 131 { return (52678, 47186, 47464); } if slot == 132 { return (53080, 47464, 47741); } if slot == 133 { return (53482, 47741, 48015); } if slot == 134 { return (53885, 48015, 48288); } if slot == 135 { return (54287, 48288, 48559); } if slot == 136 { return (54689, 48559, 48828); } if slot == 137 { return (55091, 48828, 49095); } if slot == 138 { return (55493, 49095, 49361); } if slot == 139 { return (55895, 49361, 49624); } if slot == 140 { return (56297, 49624, 49886); } if slot == 141 { return (56699, 49886, 50146); } if slot == 142 { return (57102, 50146, 50404); } if slot == 143 { return (57504, 50404, 50660); } } else { if slot == 144 { return (57906, 50660, 50914); } if slot == 145 { return (58308, 50914, 51166); } if slot == 146 { return (58710, 51166, 51417); } if slot == 147 { return (59112, 51417, 51665); } if slot == 148 { return (59514, 51665, 51911); } if slot == 149 { return (59916, 51911, 52156); } if slot == 150 { return (60320, 52156, 52398); } if slot == 151 { return (60721, 52398, 52639); } if slot == 152 { return (61123, 52639, 52878); } if slot == 153 { return (61525, 52878, 53114); } if slot == 154 { return (61927, 53114, 53349); } if slot == 155 { return (62329, 53349, 53581); } if slot == 156 { return (62731, 53581, 53812); } if slot == 157 { return (63133, 53812, 54040); } if slot == 158 { return (63536, 54040, 54267); } if slot == 159 { return (63938, 54267, 54491); } if slot == 160 { return (64343, 54491, 54714); } } } else { if slot < 176 { if slot == 161 { return (64742, 54714, 54934); } if slot == 162 { return (65144, 54934, 55152); } if slot == 163 { return (65546, 55152, 55368); } if slot == 164 { return (65948, 55368, 55582); } if slot == 165 { return (66350, 55582, 55794); } if slot == 166 { return (66753, 55794, 56004); } if slot == 167 { return (67155, 56004, 56212); } if slot == 168 { return (67557, 56212, 56418); } if slot == 169 { return (67959, 56418, 56621); } if slot == 170 { return (68361, 56621, 56823); } if slot == 171 { return (68763, 56823, 57022); } if slot == 172 { return (69165, 57022, 57219); } if slot == 173 { return (69567, 57219, 57414); } if slot == 174 { return (69970, 57414, 57607); } if slot == 175 { return (70372, 57607, 57798); } } else { if slot == 176 { return (70774, 57798, 57986); } if slot == 177 { return (71176, 57986, 58172); } if slot == 178 { return (71578, 58172, 58356); } if slot == 179 { return (71980, 58356, 58538); } if slot == 180 { return (72382, 58538, 58718); } if slot == 181 { return (72784, 58718, 58896); } if slot == 182 { return (73187, 58896, 59071); } if slot == 183 { return (73589, 59071, 59244); } if slot == 184 { return (73991, 59244, 59415); } if slot == 185 { return (74393, 59415, 59583); } if slot == 186 { return (74795, 59583, 59750); } if slot == 187 { return (75197, 59750, 59914); } if slot == 188 { return (75599, 59914, 60075); } if slot == 189 { return (76001, 60075, 60235); } if slot == 190 { return (76401, 60235, 60392); } if slot == 191 { return (76806, 60392, 60547); } } } } else { if slot < 224 { if slot < 208 { if slot == 192 { return (77208, 60547, 60700); } if slot == 193 { return (77610, 60700, 60851); } if slot == 194 { return (78012, 60851, 60999); } if slot == 195 { return (78414, 60999, 61145); } if slot == 196 { return (78816, 61145, 61288); } if slot == 197 { return (79218, 61288, 61429); } if slot == 198 { return (79621, 61429, 61568); } if slot == 199 { return (80023, 61568, 61705); } if slot == 200 { return (80423, 61705, 61839); } if slot == 201 { return (80827, 61839, 61971); } if slot == 202 { return (81229, 61971, 62101); } if slot == 203 { return (81631, 62101, 62228); } if slot == 204 { return (82033, 62228, 62353); } if slot == 205 { return (82435, 62353, 62476); } if slot == 206 { return (82838, 62476, 62596); } if slot == 207 { return (83240, 62596, 62714); } } else { if slot == 208 { return (83642, 62714, 62830); } if slot == 209 { return (84044, 62830, 62943); } if slot == 210 { return (84446, 62943, 63054); } if slot == 211 { return (84848, 63054, 63162); } if slot == 212 { return (85250, 63162, 63268); } if slot == 213 { return (85652, 63268, 63372); } if slot == 214 { return (86055, 63372, 63473); } if slot == 215 { return (86457, 63473, 63572); } if slot == 216 { return (86859, 63572, 63668); } if slot == 217 { return (87261, 63668, 63763); } if slot == 218 { return (87663, 63763, 63854); } if slot == 219 { return (88065, 63854, 63944); } if slot == 220 { return (88467, 63944, 64031); } if slot == 221 { return (88869, 64031, 64115); } if slot == 222 { return (89271, 64115, 64197); } if slot == 223 { return (89674, 64197, 64277); } } } else { if slot < 240 { if slot == 224 { return (90076, 64277, 64354); } if slot == 225 { return (90478, 64354, 64429); } if slot == 226 { return (90880, 64429, 64501); } if slot == 227 { return (91282, 64501, 64571); } if slot == 228 { return (91684, 64571, 64639); } if slot == 229 { return (92086, 64639, 64704); } if slot == 230 { return (92491, 64704, 64766); } if slot == 231 { return (92891, 64766, 64827); } if slot == 232 { return (93293, 64827, 64884); } if slot == 233 { return (93695, 64884, 64940); } if slot == 234 { return (94097, 64940, 64993); } if slot == 235 { return (94499, 64993, 65043); } if slot == 236 { return (94901, 65043, 65091); } if slot == 237 { return (95303, 65091, 65137); } if slot == 238 { return (95705, 65137, 65180); } if slot == 239 { return (96108, 65180, 65220); } } else { if slot == 240 { return (96514, 65220, 65259); } if slot == 241 { return (96912, 65259, 65294); } if slot == 242 { return (97314, 65294, 65328); } if slot == 243 { return (97716, 65328, 65358); } if slot == 244 { return (98118, 65358, 65387); } if slot == 245 { return (98520, 65387, 65413); } if slot == 246 { return (98922, 65413, 65436); } if slot == 247 { return (99325, 65436, 65457); } if slot == 248 { return (99727, 65457, 65476); } if slot == 249 { return (100129, 65476, 65492); } if slot == 250 { return (100531, 65492, 65505); } if slot == 251 { return (100933, 65505, 65516); } if slot == 252 { return (101335, 65516, 65525); } if slot == 253 { return (101737, 65525, 65531); } if slot == 254 { return (102139, 65531, 65535); } } } } } (102542, 65535, 65536) } fn atan(a: u32) -> (u32, u32, u32) { let slot = a / 459; if slot == 0 { return (0, 0, 459); } if slot == 1 { return (459, 459, 917); } if slot == 2 { return (918, 917, 1376); } if slot == 3 { return (1376, 1376, 1835); } if slot == 4 { return (1835, 1835, 2293); } if slot == 5 { return (2294, 2293, 2751); } if slot == 6 { return (2753, 2751, 3209); } if slot == 7 { return (3211, 3209, 3666); } if slot == 8 { return (3670, 3666, 4123); } if slot == 9 { return (4129, 4123, 4580); } if slot == 10 { return (4591, 4580, 5036); } if slot == 11 { return (5046, 5036, 5492); } if slot == 12 { return (5505, 5492, 5947); } if slot == 13 { return (5964, 5947, 6402); } if slot == 14 { return (6423, 6402, 6856); } if slot == 15 { return (6881, 6856, 7310); } if slot == 16 { return (7340, 7310, 7762); } if slot == 17 { return (7799, 7762, 8214); } if slot == 18 { return (8258, 8214, 8665); } if slot == 19 { return (8716, 8665, 9116); } if slot == 20 { return (9181, 9116, 9565); } if slot == 21 { return (9634, 9565, 10014); } if slot == 22 { return (10093, 10014, 10462); } if slot == 23 { return (10551, 10462, 10908); } if slot == 24 { return (11010, 10908, 11354); } if slot == 25 { return (11469, 11354, 11798); } if slot == 26 { return (11928, 11798, 12242); } if slot == 27 { return (12386, 12242, 12684); } if slot == 28 { return (12845, 12684, 13125); } if slot == 29 { return (13304, 13125, 13565); } if slot == 30 { return (13762, 13565, 14004); } if slot == 31 { return (14221, 14004, 14442); } if slot == 32 { return (14680, 14442, 14878); } if slot == 33 { return (15139, 14878, 15313); } if slot == 34 { return (15598, 15313, 15746); } if slot == 35 { return (16056, 15746, 16178); } if slot == 36 { return (16515, 16178, 16609); } if slot == 37 { return (16974, 16609, 17038); } if slot == 38 { return (17433, 17038, 17466); } if slot == 39 { return (17891, 17466, 17892); } if slot == 40 { return (18353, 17892, 18317); } if slot == 41 { return (18809, 18317, 18740); } if slot == 42 { return (19268, 18740, 19161); } if slot == 43 { return (19726, 19161, 19581); } if slot == 44 { return (20185, 19581, 19999); } if slot == 45 { return (20644, 19999, 20416); } if slot == 46 { return (21103, 20416, 20830); } if slot == 47 { return (21561, 20830, 21243); } if slot == 48 { return (22020, 21243, 21655); } if slot == 49 { return (22479, 21655, 22064); } if slot == 50 { return (22944, 22064, 22472); } if slot == 51 { return (23396, 22472, 22878); } if slot == 52 { return (23855, 22878, 23282); } if slot == 53 { return (24314, 23282, 23685); } if slot == 54 { return (24773, 23685, 24085); } if slot == 55 { return (25231, 24085, 24484); } if slot == 56 { return (25690, 24484, 24880); } if slot == 57 { return (26149, 24880, 25275); } if slot == 58 { return (26608, 25275, 25668); } if slot == 59 { return (27066, 25668, 26059); } if slot == 60 { return (27534, 26059, 26448); } if slot == 61 { return (27984, 26448, 26835); } if slot == 62 { return (28443, 26835, 27220); } if slot == 63 { return (28901, 27220, 27603); } if slot == 64 { return (29360, 27603, 27984); } if slot == 65 { return (29819, 27984, 28363); } if slot == 66 { return (30278, 28363, 28740); } if slot == 67 { return (30736, 28740, 29115); } if slot == 68 { return (31195, 29115, 29488); } if slot == 69 { return (31654, 29488, 29859); } if slot == 70 { return (32113, 29859, 30228); } if slot == 71 { return (32571, 30228, 30595); } if slot == 72 { return (33030, 30595, 30960); } if slot == 73 { return (33489, 30960, 31323); } if slot == 74 { return (33948, 31323, 31683); } if slot == 75 { return (34406, 31683, 32042); } if slot == 76 { return (34865, 32042, 32398); } if slot == 77 { return (35324, 32398, 32753); } if slot == 78 { return (35783, 32753, 33105); } if slot == 79 { return (36241, 33105, 33455); } if slot == 80 { return (36700, 33455, 33804); } if slot == 81 { return (37159, 33804, 34150); } if slot == 82 { return (37618, 34150, 34494); } if slot == 83 { return (38076, 34494, 34836); } if slot == 84 { return (38535, 34836, 35175); } if slot == 85 { return (38994, 35175, 35513); } if slot == 86 { return (39453, 35513, 35849); } if slot == 87 { return (39911, 35849, 36183); } if slot == 88 { return (40370, 36183, 36514); } if slot == 89 { return (40829, 36514, 36843); } if slot == 90 { return (41288, 36843, 37171); } if slot == 91 { return (41746, 37171, 37496); } if slot == 92 { return (42205, 37496, 37819); } if slot == 93 { return (42664, 37819, 38141); } if slot == 94 { return (43123, 38141, 38460); } if slot == 95 { return (43581, 38460, 38777); } if slot == 96 { return (44040, 38777, 39092); } if slot == 97 { return (44499, 39092, 39405); } if slot == 98 { return (44958, 39405, 39716); } (45416, 39716, 40025) } fn erf_lut(x: u32) -> u32 { // Construct the erf lookup table if x <= 5898 { if x <= 0 { return 0; } if x <= 655 { return 739; } if x <= 1310 { return 1478; } if x <= 1966 { return 2217; } if x <= 2621 { return 2956; } if x <= 3276 { return 3694; } if x <= 3932 { return 4431; } if x <= 4587 { return 5168; } if x <= 5242 { return 5903; } if x <= 5898 { return 6637; } } if x <= 12451 { if x <= 6553 { return 7370; } if x <= 7208 { return 8101; } if x <= 7864 { return 8831; } if x <= 8519 { return 9559; } if x <= 9175 { return 10285; } if x <= 9830 { return 11009; } if x <= 10485 { return 11731; } if x <= 11141 { return 12451; } if x <= 11796 { return 13168; } if x <= 12451 { return 13883; } } if x <= 19005 { if x <= 13107 { return 14595; } if x <= 13762 { return 15304; } if x <= 14417 { return 16010; } if x <= 15073 { return 16713; } if x <= 15728 { return 17412; } if x <= 16384 { return 18109; } if x <= 17039 { return 18802; } if x <= 17694 { return 19491; } if x <= 18350 { return 20177; } if x <= 19005 { return 20859; } } if x <= 25559 { if x <= 19660 { return 21536; } if x <= 20316 { return 22210; } if x <= 20971 { return 22880; } if x <= 21626 { return 23545; } if x <= 22282 { return 24206; } if x <= 22937 { return 24863; } if x <= 23592 { return 25515; } if x <= 24248 { return 26162; } if x <= 24903 { return 26804; } if x <= 25559 { return 27442; } } if x <= 32112 { if x <= 26214 { return 28075; } if x <= 26869 { return 28702; } if x <= 27525 { return 29325; } if x <= 28180 { return 29942; } if x <= 28835 { return 30554; } if x <= 29491 { return 31161; } if x <= 30146 { return 31762; } if x <= 30801 { return 32358; } if x <= 31457 { return 32948; } if x <= 32112 { return 33532; } } if x <= 38666 { if x <= 32768 { return 34111; } if x <= 33423 { return 34684; } if x <= 34078 { return 35251; } if x <= 34734 { return 35813; } if x <= 35389 { return 36368; } if x <= 36044 { return 36917; } if x <= 36700 { return 37461; } if x <= 37355 { return 37998; } if x <= 38010 { return 38530; } if x <= 38666 { return 39055; } } if x <= 45219 { if x <= 39321 { return 39574; } if x <= 39976 { return 40087; } if x <= 40632 { return 40593; } if x <= 41287 { return 41094; } if x <= 41943 { return 41588; } if x <= 42598 { return 42076; } if x <= 43253 { return 42557; } if x <= 43909 { return 43032; } if x <= 44564 { return 43501; } if x <= 45219 { return 43964; } } if x <= 51773 { if x <= 45875 { return 44420; } if x <= 46530 { return 44870; } if x <= 47185 { return 45313; } if x <= 47841 { return 45750; } if x <= 48496 { return 46181; } if x <= 49152 { return 46606; } if x <= 49807 { return 47024; } if x <= 50462 { return 47436; } if x <= 51118 { return 47841; } if x <= 51773 { return 48241; } } if x <= 58327 { if x <= 52428 { return 48634; } if x <= 53084 { return 49021; } if x <= 53739 { return 49401; } if x <= 54394 { return 49776; } if x <= 55050 { return 50144; } if x <= 55705 { return 50506; } if x <= 56360 { return 50862; } if x <= 57016 { return 51212; } if x <= 57671 { return 51556; } if x <= 58327 { return 51894; } } if x <= 64880 { if x <= 58982 { return 52226; } if x <= 59637 { return 52552; } if x <= 60293 { return 52872; } if x <= 60948 { return 53186; } if x <= 61603 { return 53495; } if x <= 62259 { return 53797; } if x <= 62914 { return 54094; } if x <= 63569 { return 54386; } if x <= 64225 { return 54672; } if x <= 64880 { return 54952; } } if x <= 71434 { if x <= 65536 { return 55227; } if x <= 66191 { return 55496; } if x <= 66846 { return 55760; } if x <= 67502 { return 56019; } if x <= 68157 { return 56272; } if x <= 68812 { return 56520; } if x <= 69468 { return 56763; } if x <= 70123 { return 57001; } if x <= 70778 { return 57234; } if x <= 71434 { return 57462; } } if x <= 77987 { if x <= 72089 { return 57685; } if x <= 72744 { return 57903; } if x <= 73400 { return 58116; } if x <= 74055 { return 58325; } if x <= 74711 { return 58529; } if x <= 75366 { return 58728; } if x <= 76021 { return 58923; } if x <= 76677 { return 59113; } if x <= 77332 { return 59299; } if x <= 77987 { return 59481; } } if x <= 84541 { if x <= 78643 { return 59658; } if x <= 79298 { return 59831; } if x <= 79953 { return 60000; } if x <= 80609 { return 60165; } if x <= 81264 { return 60326; } if x <= 81920 { return 60483; } if x <= 82575 { return 60636; } if x <= 83230 { return 60785; } if x <= 83886 { return 60931; } if x <= 84541 { return 61072; } } if x <= 91095 { if x <= 85196 { return 61211; } if x <= 85852 { return 61345; } if x <= 86507 { return 61477; } if x <= 87162 { return 61604; } if x <= 87818 { return 61729; } if x <= 88473 { return 61850; } if x <= 89128 { return 61968; } if x <= 89784 { return 62083; } if x <= 90439 { return 62194; } if x <= 91095 { return 62303; } } if x <= 97648 { if x <= 91750 { return 62408; } if x <= 92405 { return 62511; } if x <= 93061 { return 62611; } if x <= 93716 { return 62708; } if x <= 94371 { return 62802; } if x <= 95027 { return 62894; } if x <= 95682 { return 62983; } if x <= 96337 { return 63070; } if x <= 96993 { return 63154; } if x <= 97648 { return 63235; } } if x <= 104202 { if x <= 98304 { return 63314; } if x <= 98959 { return 63391; } if x <= 99614 { return 63465; } if x <= 100270 { return 63538; } if x <= 100925 { return 63608; } if x <= 101580 { return 63676; } if x <= 102236 { return 63742; } if x <= 102891 { return 63806; } if x <= 103546 { return 63867; } if x <= 104202 { return 63927; } } if x <= 110755 { if x <= 104857 { return 63985; } if x <= 105512 { return 64042; } if x <= 106168 { return 64096; } if x <= 106823 { return 64149; } if x <= 107479 { return 64200; } if x <= 108134 { return 64249; } if x <= 108789 { return 64297; } if x <= 109445 { return 64343; } if x <= 110100 { return 64388; } if x <= 110755 { return 64431; } } if x <= 117309 { if x <= 111411 { return 64473; } if x <= 112066 { return 64514; } if x <= 112721 { return 64553; } if x <= 113377 { return 64590; } if x <= 114032 { return 64627; } if x <= 114688 { return 64662; } if x <= 115343 { return 64696; } if x <= 115998 { return 64729; } if x <= 116654 { return 64760; } if x <= 117309 { return 64791; } } if x <= 123863 { if x <= 117964 { return 64821; } if x <= 118620 { return 64849; } if x <= 119275 { return 64876; } if x <= 119930 { return 64903; } if x <= 120586 { return 64928; } if x <= 121241 { return 64953; } if x <= 121896 { return 64977; } if x <= 122552 { return 64999; } if x <= 123207 { return 65021; } if x <= 123863 { return 65043; } } if x <= 130416 { if x <= 124518 { return 65063; } if x <= 125173 { return 65083; } if x <= 125829 { return 65102; } if x <= 126484 { return 65120; } if x <= 127139 { return 65137; } if x <= 127795 { return 65154; } if x <= 128450 { return 65170; } if x <= 129105 { return 65186; } if x <= 129761 { return 65201; } if x <= 130416 { return 65215; } } if x <= 222822 { if x <= 131072 { return 65229; } if x <= 137625 { return 65340; } if x <= 144179 { return 65413; } if x <= 150732 { return 65461; } if x <= 157286 { return 65490; } if x <= 163840 { return 65509; } if x <= 170393 { return 65520; } if x <= 176947 { return 65527; } if x <= 183500 { return 65531; } if x <= 190054 { return 65533; } if x <= 196608 { return 65534; } if x <= 203161 { return 65535; } if x <= 209715 { return 65535; } if x <= 216268 { return 65535; } if x <= 222822 { return 65535; } } ONE }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo
use core::integer; use orion::numbers::fixed_point::implementations::fp16x16::math::lut; use orion::numbers::fixed_point::implementations::fp16x16::core::{ HALF, ONE, TWO, FP16x16, FP16x16Impl, FP16x16Add, FP16x16Sub, FP16x16Mul, FP16x16Div, FP16x16IntoFelt252, FixedTrait }; // CONSTANTS const TWO_PI: u32 = 411775; const PI: u32 = 205887; const HALF_PI: u32 = 102944; // PUBLIC // Calculates arccos(a) for -1 <= a <= 1 (fixed point) // arccos(a) = arcsin(sqrt(1 - a^2)) - arctan identity has discontinuity at zero fn acos(a: FP16x16) -> FP16x16 { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin(asin_arg); if a.sign { FixedTrait::new(PI, false) - asin_res } else { asin_res } } fn acos_fast(a: FP16x16) -> FP16x16 { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin_fast(asin_arg); if a.sign { FixedTrait::new(PI, false) - asin_res } else { asin_res } } // Calculates arcsin(a) for -1 <= a <= 1 (fixed point) // arcsin(a) = arctan(a / sqrt(1 - a^2)) fn asin(a: FP16x16) -> FP16x16 { if (a.mag == ONE) { return FixedTrait::new(HALF_PI, a.sign); } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 atan(a / div) } fn asin_fast(a: FP16x16) -> FP16x16 { if (a.mag == ONE) { return FixedTrait::new(HALF_PI, a.sign); } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 atan_fast(a / div) } // Calculates arctan(a) (fixed point) // See https://stackoverflow.com/a/50894477 for range adjustments fn atan(a: FP16x16) -> FP16x16 { let mut at = a.abs(); let mut shift = false; let mut invert = false; // Invert value when a > 1 if (at.mag > ONE) { at = FixedTrait::ONE() / at; invert = true; } // Account for lack of precision in polynomaial when a > 0.7 if (at.mag > 45875) { let sqrt3_3 = FixedTrait::new(37837, false); // sqrt(3) / 3 at = (at - sqrt3_3) / (FixedTrait::ONE() + at * sqrt3_3); shift = true; } let r10 = FixedTrait::new(120, true) * at; let r9 = (r10 + FixedTrait::new(3066, true)) * at; let r8 = (r9 + FixedTrait::new(12727, false)) * at; let r7 = (r8 + FixedTrait::new(17170, true)) * at; let r6 = (r7 + FixedTrait::new(2865, false)) * at; let r5 = (r6 + FixedTrait::new(12456, false)) * at; let r4 = (r5 + FixedTrait::new(90, false)) * at; let r3 = (r4 + FixedTrait::new(21852, true)) * at; let r2 = r3 * at; let mut res = (r2 + FixedTrait::new(65536, false)) * at; // Adjust for sign change, inversion, and shift if (shift) { res = res + FixedTrait::new(34315, false); // pi / 6 } if (invert) { res = res - FixedTrait::new(HALF_PI, false); } FixedTrait::new(res.mag, a.sign) } fn atan_fast(a: FP16x16) -> FP16x16 { let mut at = a.abs(); let mut shift = false; let mut invert = false; // Invert value when a > 1 if (at.mag > ONE) { at = FixedTrait::ONE() / at; invert = true; } // Account for lack of precision in polynomaial when a > 0.7 if (at.mag > 45875) { let sqrt3_3 = FixedTrait::new(37837, false); // sqrt(3) / 3 at = (at - sqrt3_3) / (FixedTrait::ONE() + at * sqrt3_3); shift = true; } let (start, low, high) = lut::atan(at.mag); let partial_step = FixedTrait::new(at.mag - start, false) / FixedTrait::new(459, false); let mut res = partial_step * FixedTrait::new(high - low, false) + FixedTrait::new(low, false); // Adjust for sign change, inversion, and shift if (shift) { res = res + FixedTrait::new(34315, false); // pi / 6 } if (invert) { res = res - FixedTrait::<FP16x16>::new(HALF_PI, false); } FixedTrait::new(res.mag, a.sign) } // Calculates cos(a) with a in radians (fixed point) fn cos(a: FP16x16) -> FP16x16 { sin(FixedTrait::new(HALF_PI, false) - a) } fn cos_fast(a: FP16x16) -> FP16x16 { sin_fast(FixedTrait::new(HALF_PI, false) - a) } fn sin(a: FP16x16) -> FP16x16 { let a1 = a.mag % TWO_PI; let (whole_rem, partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI)); let a2 = FixedTrait::new(partial_rem, false); let partial_sign = whole_rem == 1; let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE()); FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0) } fn sin_fast(a: FP16x16) -> FP16x16 { let a1 = a.mag % TWO_PI; let (whole_rem, mut partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI)); let partial_sign = whole_rem == 1; if partial_rem >= HALF_PI { partial_rem = PI - partial_rem; } let (start, low, high) = lut::sin(partial_rem); let partial_step = FixedTrait::new(partial_rem - start, false) / FixedTrait::new(402, false); let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false)) + FixedTrait::<FP16x16>::new(low, false); FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0) } // Calculates tan(a) with a in radians (fixed point) fn tan(a: FP16x16) -> FP16x16 { let sinx = sin(a); let cosx = cos(a); assert(cosx.mag != 0, 'tan undefined'); sinx / cosx } fn tan_fast(a: FP16x16) -> FP16x16 { let sinx = sin_fast(a); let cosx = cos_fast(a); assert(cosx.mag != 0, 'tan undefined'); sinx / cosx } // Helper function to calculate Taylor series for sin fn _sin_loop(a: FP16x16, i: u32, acc: FP16x16) -> FP16x16 { let div = (2 * i + 2) * (2 * i + 3); let term = a * a * acc / FixedTrait::new_unscaled(div, false); let new_acc = FixedTrait::ONE() - term; if (i == 0) { return new_acc; } _sin_loop(a, i - 1, new_acc) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use orion::numbers::fixed_point::implementations::fp16x16::helpers::{ assert_precise, assert_relative }; use orion::numbers::fixed_point::implementations::fp16x16::core::{ FP16x16PartialEq, FP16x16Print }; use super::{ FixedTrait, acos, HALF_PI, ONE, acos_fast, PI, atan_fast, atan, asin, cos, cos_fast, sin, sin_fast, tan }; #[test] #[available_gas(8000000)] fn test_acos() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::ONE(); assert(acos(a).into() == 0, 'invalid one'); let a = FixedTrait::new(ONE / 2, false); assert_relative(acos(a), 68629, 'invalid half', error); // 1.3687308642680 let a = FixedTrait::ZERO(); assert_relative(acos(a), HALF_PI.into(), 'invalid zero', Option::None(())); // PI / 2 let a = FixedTrait::new(ONE / 2, true); assert_relative(acos(a), 137258, 'invalid neg half', error); // 2.737461741902 let a = FixedTrait::new(ONE, true); assert_relative(acos(a), PI.into(), 'invalid neg one', Option::None(())); // PI } #[test] #[available_gas(8000000)] fn test_acos_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::ONE(); assert(acos_fast(a).into() == 0, 'invalid one'); let a = FixedTrait::new(ONE / 2, false); assert_relative(acos_fast(a), 68629, 'invalid half', error); // 1.3687308642680 let a = FixedTrait::ZERO(); assert_relative(acos_fast(a), HALF_PI.into(), 'invalid zero', Option::None(())); // PI / 2 let a = FixedTrait::new(ONE / 2, true); assert_relative(acos_fast(a), 137258, 'invalid neg half', error); // 2.737461741902 let a = FixedTrait::new(ONE, true); assert_relative(acos_fast(a), PI.into(), 'invalid neg one', Option::None(())); // PI } #[test] #[should_panic] #[available_gas(8000000)] fn test_acos_fail() { let a = FixedTrait::new(2 * ONE, true); acos(a); } #[test] #[available_gas(8000000)] fn test_atan_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::new(2 * ONE, false); assert_relative(atan_fast(a), 72558, 'invalid two', error); let a = FixedTrait::ONE(); assert_relative(atan_fast(a), 51472, 'invalid one', error); let a = FixedTrait::new(ONE / 2, false); assert_relative(atan_fast(a), 30386, 'invalid half', error); let a = FixedTrait::ZERO(); assert(atan_fast(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE / 2, true); assert_relative(atan_fast(a), -30386, 'invalid neg half', error); let a = FixedTrait::new(ONE, true); assert_relative(atan_fast(a), -51472, 'invalid neg one', error); let a = FixedTrait::new(2 * ONE, true); assert_relative(atan_fast(a), -72558, 'invalid neg two', error); } #[test] #[available_gas(8000000)] fn test_atan() { let a = FixedTrait::new(2 * ONE, false); assert_relative(atan(a), 72558, 'invalid two', Option::None(())); let a = FixedTrait::ONE(); assert_relative(atan(a), 51472, 'invalid one', Option::None(())); let a = FixedTrait::new(ONE / 2, false); assert_relative(atan(a), 30386, 'invalid half', Option::None(())); let a = FixedTrait::ZERO(); assert(atan(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE / 2, true); assert_relative(atan(a), -30386, 'invalid neg half', Option::None(())); let a = FixedTrait::new(ONE, true); assert_relative(atan(a), -51472, 'invalid neg one', Option::None(())); let a = FixedTrait::new(2 * ONE, true); assert_relative(atan(a), -72558, 'invalid neg two', Option::None(())); } #[test] #[available_gas(8000000)] fn test_asin() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::ONE(); assert_relative(asin(a), HALF_PI.into(), 'invalid one', Option::None(())); // PI / 2 let a = FixedTrait::new(ONE / 2, false); assert_relative(asin(a), 34315, 'invalid half', error); let a = FixedTrait::ZERO(); assert_precise(asin(a), 0, 'invalid zero', Option::None(())); let a = FixedTrait::new(ONE / 2, true); assert_relative(asin(a), -34315, 'invalid neg half', error); let a = FixedTrait::new(ONE, true); assert_relative(asin(a), -HALF_PI.into(), 'invalid neg one', Option::None(())); // -PI / 2 } #[test] #[should_panic] #[available_gas(8000000)] fn test_asin_fail() { let a = FixedTrait::new(2 * ONE, false); asin(a); } #[test] #[available_gas(8000000)] fn test_cos() { let a = FixedTrait::new(HALF_PI, false); assert(cos(a).into() == 0, 'invalid half pi'); let a = FixedTrait::new(HALF_PI / 2, false); assert_relative(cos(a), 46341, 'invalid quarter pi', Option::None(())); // 0.55242717280199 let a = FixedTrait::new(PI, false); assert_relative(cos(a), -1 * ONE.into(), 'invalid pi', Option::None(())); let a = FixedTrait::new(HALF_PI, true); assert_precise(cos(a), 0, 'invalid neg half pi', Option::None(())); let a = FixedTrait::new_unscaled(17, false); assert_relative(cos(a), -18033, 'invalid 17', Option::None(())); // -0.21497123284870 let a = FixedTrait::new_unscaled(17, true); assert_relative(cos(a), -18033, 'invalid -17', Option::None(())); // -0.21497123284870 } #[test] #[available_gas(8000000)] fn test_cos_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::new(HALF_PI, false); assert(cos_fast(a).into() == 0, 'invalid half pi'); let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(cos_fast(a), 46341, 'invalid quarter pi', error); // 0.55242717280199 let a = FixedTrait::new(PI, false); assert_precise(cos_fast(a), -1 * ONE.into(), 'invalid pi', error); let a = FixedTrait::new(HALF_PI, true); assert_precise(cos(a), 0, 'invalid neg half pi', Option::None(())); let a = FixedTrait::new_unscaled(17, false); assert_precise(cos_fast(a), -18033, 'invalid 17', error); // -0.21497123284870 } #[test] #[available_gas(8000000)] fn test_sin() { let a = FixedTrait::new(HALF_PI, false); assert_precise(sin(a), ONE.into(), 'invalid half pi', Option::None(())); let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(sin(a), 46341, 'invalid quarter pi', Option::None(())); // 0.55242717280199 let a = FixedTrait::new(PI, false); assert(sin(a).into() == 0, 'invalid pi'); let a = FixedTrait::new(HALF_PI, true); assert_precise( sin(a), -ONE.into(), 'invalid neg half pi', Option::None(()) ); // 0.78124999999529 let a = FixedTrait::new_unscaled(17, false); assert_precise(sin(a), -63006, 'invalid 17', Option::None(())); // -0.75109179053073 let a = FixedTrait::new_unscaled(17, true); assert_precise(sin(a), 63006, 'invalid -17', Option::None(())); // 0.75109179053073 } #[test] #[available_gas(8000000)] fn test_sin_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::new(HALF_PI, false); assert_precise(sin_fast(a), ONE.into(), 'invalid half pi', error); let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(sin_fast(a), 46341, 'invalid quarter pi', error); // 0.55242717280199 let a = FixedTrait::new(PI, false); assert(sin_fast(a).into() == 0, 'invalid pi'); let a = FixedTrait::new(HALF_PI, true); assert_precise(sin_fast(a), -ONE.into(), 'invalid neg half pi', error); // 0.78124999999529 let a = FixedTrait::new_unscaled(17, false); assert_precise(sin_fast(a), -63006, 'invalid 17', error); // -0.75109179053073 let a = FixedTrait::new_unscaled(17, true); assert_precise(sin_fast(a), 63006, 'invalid -17', error); // 0.75109179053073 } #[test] #[available_gas(8000000)] fn test_tan() { let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(tan(a), ONE.into(), 'invalid quarter pi', Option::None(())); let a = FixedTrait::new(PI, false); assert_precise(tan(a), 0, 'invalid pi', Option::None(())); let a = FixedTrait::new_unscaled(17, false); assert_precise(tan(a), 228990, 'invalid 17', Option::None(())); // 3.3858731852805 let a = FixedTrait::new_unscaled(17, true); assert_precise(tan(a), -228952, 'invalid -17', Option::None(())); // -3.3858731852805 } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16wide.cairo
mod core; mod math; mod helpers;
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16wide/core.cairo
use core::debug::PrintTrait; use orion::numbers::{fixed_point::core::FixedTrait, FP16x16}; use orion::numbers::fixed_point::implementations::fp16x16wide::math::{ core as core_math, trig, hyp, erf }; use orion::numbers::fixed_point::utils; /// A struct representing a fixed point number. #[derive(Serde, Copy, Drop)] struct FP16x16W { mag: u64, sign: bool } // CONSTANTS const TWO: u64 = 131072; // 2 ** 17 const ONE: u64 = 65536; // 2 ** 16 const HALF: u64 = 32768; // 2 ** 15 const MAX: u64 = 2147483648; // 2 ** 31 impl FP16x16WImpl of FixedTrait<FP16x16W, u64> { fn ZERO() -> FP16x16W { FP16x16W { mag: 0, sign: false } } fn HALF() -> FP16x16W { FP16x16W { mag: HALF, sign: false } } fn ONE() -> FP16x16W { FP16x16W { mag: ONE, sign: false } } fn MAX() -> FP16x16W { FP16x16W { mag: MAX, sign: false } } fn new(mag: u64, sign: bool) -> FP16x16W { FP16x16W { mag: mag, sign: sign } } fn new_unscaled(mag: u64, sign: bool) -> FP16x16W { FP16x16W { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP16x16W { let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap(); FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP16x16W) -> FP16x16W { core_math::abs(self) } fn acos(self: FP16x16W) -> FP16x16W { trig::acos_fast(self) } fn acos_fast(self: FP16x16W) -> FP16x16W { trig::acos_fast(self) } fn acosh(self: FP16x16W) -> FP16x16W { hyp::acosh(self) } fn asin(self: FP16x16W) -> FP16x16W { trig::asin_fast(self) } fn asin_fast(self: FP16x16W) -> FP16x16W { trig::asin_fast(self) } fn asinh(self: FP16x16W) -> FP16x16W { hyp::asinh(self) } fn atan(self: FP16x16W) -> FP16x16W { trig::atan_fast(self) } fn atan_fast(self: FP16x16W) -> FP16x16W { trig::atan_fast(self) } fn atanh(self: FP16x16W) -> FP16x16W { hyp::atanh(self) } fn ceil(self: FP16x16W) -> FP16x16W { core_math::ceil(self) } fn cos(self: FP16x16W) -> FP16x16W { trig::cos_fast(self) } fn cos_fast(self: FP16x16W) -> FP16x16W { trig::cos_fast(self) } fn cosh(self: FP16x16W) -> FP16x16W { hyp::cosh(self) } fn floor(self: FP16x16W) -> FP16x16W { core_math::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP16x16W) -> FP16x16W { core_math::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP16x16W) -> FP16x16W { core_math::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP16x16W) -> FP16x16W { core_math::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP16x16W) -> FP16x16W { core_math::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP16x16W) -> FP16x16W { core_math::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP16x16W, b: FP16x16W) -> FP16x16W { core_math::pow(self, b) } fn round(self: FP16x16W) -> FP16x16W { core_math::round(self) } fn sin(self: FP16x16W) -> FP16x16W { trig::sin_fast(self) } fn sin_fast(self: FP16x16W) -> FP16x16W { trig::sin_fast(self) } fn sinh(self: FP16x16W) -> FP16x16W { hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP16x16W) -> FP16x16W { core_math::sqrt(self) } fn tan(self: FP16x16W) -> FP16x16W { trig::tan_fast(self) } fn tan_fast(self: FP16x16W) -> FP16x16W { trig::tan_fast(self) } fn tanh(self: FP16x16W) -> FP16x16W { hyp::tanh(self) } fn sign(self: FP16x16W) -> FP16x16W { core_math::sign(self) } fn NaN() -> FP16x16W { FP16x16W { mag: 0, sign: true } } fn is_nan(self: FP16x16W) -> bool { self == FP16x16W { mag: 0, sign: true } } fn INF() -> FP16x16W { FP16x16W { mag: 4294967295, sign: false } } fn POS_INF() -> FP16x16W { FP16x16W { mag: 4294967295, sign: false } } fn NEG_INF() -> FP16x16W { FP16x16W { mag: 4294967295, sign: true } } fn is_inf(self: FP16x16W) -> bool { self.mag == 4294967295 } fn is_pos_inf(self: FP16x16W) -> bool { self.is_inf() && !self.sign } fn is_neg_inf(self: FP16x16W) -> bool { self.is_inf() && self.sign } fn erf(self: FP16x16W) -> FP16x16W { erf::erf(self) } } impl FP16x16WPrint of PrintTrait<FP16x16W> { fn print(self: FP16x16W) { self.sign.print(); self.mag.print(); } } // Into a raw felt without unscaling impl FP16x16WIntoFelt252 of Into<FP16x16W, felt252> { fn into(self: FP16x16W) -> felt252 { let mag_felt = self.mag.into(); if self.sign { mag_felt * -1 } else { mag_felt * 1 } } } impl FP16x16WIntoI32 of Into<FP16x16W, i32> { fn into(self: FP16x16W) -> i32 { _i32_into_fp(self) } } impl FP16x16IntoFP16x16W of Into<FP16x16, FP16x16W> { fn into(self: FP16x16) -> FP16x16W { FP16x16W { mag: self.mag.into(), sign: self.sign } } } impl FP16x16WTryIntoFP16x16 of TryInto<FP16x16W, FP16x16> { fn try_into(self: FP16x16W) -> Option<FP16x16> { match self.mag.try_into() { Option::Some(val) => { Option::Some(FP16x16 { mag: val, sign: self.sign }) }, Option::None => { Option::None(()) } } } } impl FP16x16WTryIntoI8 of TryInto<FP16x16W, i8> { fn try_into(self: FP16x16W) -> Option<i8> { _i8_try_from_fp(self) } } impl FP16x16WTryIntoU128 of TryInto<FP16x16W, u128> { fn try_into(self: FP16x16W) -> Option<u128> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down Option::Some((self.mag / ONE).into()) } } } impl FP16x16WTryIntoU64 of TryInto<FP16x16W, u64> { fn try_into(self: FP16x16W) -> Option<u64> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down Option::Some((self.mag / ONE).into()) } } } impl FP16x16WTryIntoU32 of TryInto<FP16x16W, u32> { fn try_into(self: FP16x16W) -> Option<u32> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP16x16WTryIntoU16 of TryInto<FP16x16W, u16> { fn try_into(self: FP16x16W) -> Option<u16> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP16x16WTryIntoU8 of TryInto<FP16x16W, u8> { fn try_into(self: FP16x16W) -> Option<u8> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP16x16WPartialEq of PartialEq<FP16x16W> { #[inline(always)] fn eq(lhs: @FP16x16W, rhs: @FP16x16W) -> bool { core_math::eq(lhs, rhs) } #[inline(always)] fn ne(lhs: @FP16x16W, rhs: @FP16x16W) -> bool { core_math::ne(lhs, rhs) } } impl FP16x16WAdd of Add<FP16x16W> { fn add(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { core_math::add(lhs, rhs) } } impl FP16x16WAddEq of AddEq<FP16x16W> { #[inline(always)] fn add_eq(ref self: FP16x16W, other: FP16x16W) { self = Add::add(self, other); } } impl FP16x16WSub of Sub<FP16x16W> { fn sub(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { core_math::sub(lhs, rhs) } } impl FP16x16WSubEq of SubEq<FP16x16W> { #[inline(always)] fn sub_eq(ref self: FP16x16W, other: FP16x16W) { self = Sub::sub(self, other); } } impl FP16x16WMul of Mul<FP16x16W> { fn mul(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { core_math::mul(lhs, rhs) } } impl FP16x16WMulEq of MulEq<FP16x16W> { #[inline(always)] fn mul_eq(ref self: FP16x16W, other: FP16x16W) { self = Mul::mul(self, other); } } impl FP16x16WDiv of Div<FP16x16W> { fn div(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { core_math::div(lhs, rhs) } } impl FP16x16WDivEq of DivEq<FP16x16W> { #[inline(always)] fn div_eq(ref self: FP16x16W, other: FP16x16W) { self = Div::div(self, other); } } impl FP16x16WPartialOrd of PartialOrd<FP16x16W> { #[inline(always)] fn ge(lhs: FP16x16W, rhs: FP16x16W) -> bool { core_math::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP16x16W, rhs: FP16x16W) -> bool { core_math::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP16x16W, rhs: FP16x16W) -> bool { core_math::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP16x16W, rhs: FP16x16W) -> bool { core_math::lt(lhs, rhs) } } impl FP16x16WNeg of Neg<FP16x16W> { #[inline(always)] fn neg(a: FP16x16W) -> FP16x16W { core_math::neg(a) } } impl FP16x16WRem of Rem<FP16x16W> { #[inline(always)] fn rem(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { core_math::rem(lhs, rhs) } } /// INTERNAL fn _i32_into_fp(x: FP16x16W) -> i32 { let number_felt: felt252 = (x.mag / ONE).into(); let number_i32: i32 = number_felt.try_into().unwrap(); if x.sign { return number_i32 * -1_i32; } number_i32 } fn _i8_try_from_fp(x: FP16x16W) -> Option<i8> { let unscaled_mag: Option<u8> = (x.mag / ONE).try_into(); match unscaled_mag { Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); if x.sign { return Option::Some(number_i8 * -1_i8); } Option::Some(number_i8) }, Option::None => Option::None(()) } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo
use core::debug::PrintTrait; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ HALF, ONE, TWO, FP16x16W, FP16x16WImpl, FP16x16WSub, FP16x16WDiv, FixedTrait, FP16x16WPrint }; const DEFAULT_PRECISION: u64 = 7; // 1e-4 // To use `DEFAULT_PRECISION`, final arg is: `Option::None(())`. // To use `custom_precision` of 430_u32: `Option::Some(430_u32)`. fn assert_precise( result: FP16x16W, expected: felt252, msg: felt252, custom_precision: Option<u64> ) { let precision = match custom_precision { Option::Some(val) => val, Option::None => DEFAULT_PRECISION, }; let diff = (result - FixedTrait::from_felt(expected)).mag; if (diff > precision) { result.print(); assert(diff <= precision, msg); } } fn assert_relative( result: FP16x16W, expected: felt252, msg: felt252, custom_precision: Option<u64> ) { let precision = match custom_precision { Option::Some(val) => val, Option::None => DEFAULT_PRECISION, }; let diff = result - FixedTrait::from_felt(expected); let rel_diff = (diff / result).mag; if (rel_diff > precision) { result.print(); assert(rel_diff <= precision, msg); } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16wide/math.cairo
mod core; mod comp; mod lut; mod trig; mod hyp; mod erf;
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ FP16x16W, FixedTrait, FP16x16WImpl, FP16x16WPartialOrd, FP16x16WPartialEq }; fn max(a: FP16x16W, b: FP16x16W) -> FP16x16W { if a >= b { a } else { b } } fn min(a: FP16x16W, b: FP16x16W) -> FP16x16W { if a <= b { a } else { b } } fn xor(a: FP16x16W, b: FP16x16W) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { true } else { false } } fn or(a: FP16x16W, b: FP16x16W) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { false } else { true } } fn and(a: FP16x16W, b: FP16x16W) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { false } else { true } } fn where(a: FP16x16W, b: FP16x16W, c: FP16x16W) -> FP16x16W { if a == FixedTrait::new(0, false) { c } else { b } } fn bitwise_and(a: FP16x16W, b: FP16x16W) -> FP16x16W { FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP16x16W, b: FP16x16W) -> FP16x16W { FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP16x16W, b: FP16x16W) -> FP16x16W { FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use super::{FixedTrait, max, min, bitwise_and, bitwise_xor, bitwise_or}; #[test] fn test_max() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::new_unscaled(1, true); assert(max(a, a) == a, 'max(a, a)'); assert(max(a, b) == a, 'max(a, b)'); assert(max(a, c) == a, 'max(a, c)'); assert(max(b, a) == a, 'max(b, a)'); assert(max(b, b) == b, 'max(b, b)'); assert(max(b, c) == b, 'max(b, c)'); assert(max(c, a) == a, 'max(c, a)'); assert(max(c, b) == b, 'max(c, b)'); assert(max(c, c) == c, 'max(c, c)'); } #[test] fn test_min() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::new_unscaled(1, true); assert(min(a, a) == a, 'min(a, a)'); assert(min(a, b) == b, 'min(a, b)'); assert(min(a, c) == c, 'min(a, c)'); assert(min(b, a) == b, 'min(b, a)'); assert(min(b, b) == b, 'min(b, b)'); assert(min(b, c) == c, 'min(b, c)'); assert(min(c, a) == c, 'min(c, a)'); assert(min(c, b) == c, 'min(c, b)'); assert(min(c, c) == c, 'min(c, c)'); } #[test] fn test_bitwise_and() { let a = FixedTrait::new(225280, false); // 3.4375 let b = FixedTrait::new(4160843776, true); // -2046.5625 let c = FixedTrait::new(94208, false); // 1.4375 assert(bitwise_and(a, b) == c, 'bitwise_and(a,b)') } #[test] fn test_bitwise_xor() { let a = FixedTrait::new(225280, false); // 3.4375 let b = FixedTrait::new(4160843776, true); // -2046.5625 let c = FixedTrait::new(4160880640, true); assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)') } #[test] fn test_bitwise_or() { let a = FixedTrait::new(225280, false); // 3.4375 let b = FixedTrait::new(4160843776, true); // -2046.5625 let c = FixedTrait::new(4160974848, true); assert(bitwise_or(a, b) == c, 'bitwise_or(a,b)') } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo
use core::integer; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ HALF, ONE, MAX, FP16x16W, FP16x16WImpl, FP16x16WAdd, FP16x16WAddEq, FP16x16WSub, FP16x16WMul, FP16x16WMulEq, FP16x16WTryIntoU128, FP16x16WPartialEq, FP16x16WPartialOrd, FP16x16WSubEq, FP16x16WNeg, FP16x16WDiv, FP16x16WIntoFelt252, FixedTrait }; use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut; // PUBLIC fn abs(a: FP16x16W) -> FP16x16W { FixedTrait::new(a.mag, false) } fn add(a: FP16x16W, b: FP16x16W) -> FP16x16W { if a.sign == b.sign { return FixedTrait::new(a.mag + b.mag, a.sign); } if a.mag == b.mag { return FixedTrait::ZERO(); } if (a.mag > b.mag) { FixedTrait::new(a.mag - b.mag, a.sign) } else { FixedTrait::new(b.mag - a.mag, b.sign) } } fn ceil(a: FP16x16W) -> FP16x16W { let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if rem == 0 { a } else if !a.sign { FixedTrait::new_unscaled(div + 1, false) } else if div == 0 { FixedTrait::new_unscaled(0, false) } else { FixedTrait::new_unscaled(div, true) } } fn div(a: FP16x16W, b: FP16x16W) -> FP16x16W { let a_u64 = integer::u64_wide_mul(a.mag, ONE); let res_u64 = a_u64 / b.mag.into(); // Re-apply sign FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign) } fn eq(a: @FP16x16W, b: @FP16x16W) -> bool { (*a.mag == *b.mag) && (*a.sign == *b.sign) } // Calculates the natural exponent of x: e^x fn exp(a: FP16x16W) -> FP16x16W { exp2(FixedTrait::new(94548, false) * a) // log2(e) * 2^23 ≈ 12102203 } // Calculates the binary exponent of x: 2^x fn exp2(a: FP16x16W) -> FP16x16W { if (a.mag == 0) { return FixedTrait::ONE(); } let (int_part, frac_part) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false); let mut res_u = int_res; if frac_part != 0 { let frac = FixedTrait::new(frac_part, false); let r7 = FixedTrait::new(1, false) * frac; let r6 = (r7 + FixedTrait::new(10, false)) * frac; let r5 = (r6 + FixedTrait::new(87, false)) * frac; let r4 = (r5 + FixedTrait::new(630, false)) * frac; let r3 = (r4 + FixedTrait::new(3638, false)) * frac; let r2 = (r3 + FixedTrait::new(15743, false)) * frac; let r1 = (r2 + FixedTrait::new(45426, false)) * frac; res_u = res_u * (r1 + FixedTrait::ONE()); } if a.sign { FixedTrait::ONE() / res_u } else { res_u } } fn exp2_int(exp: u64) -> FP16x16W { FixedTrait::new_unscaled(lut::exp2(exp), false) } fn floor(a: FP16x16W) -> FP16x16W { let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if rem == 0 { a } else if !a.sign { FixedTrait::new_unscaled(div, false) } else { FixedTrait::new_unscaled(div + 1, true) } } fn ge(a: FP16x16W, b: FP16x16W) -> bool { if a.sign != b.sign { !a.sign } else { (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign) } } fn gt(a: FP16x16W, b: FP16x16W) -> bool { if a.sign != b.sign { !a.sign } else { (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign) } } fn le(a: FP16x16W, b: FP16x16W) -> bool { if a.sign != b.sign { a.sign } else { (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign) } } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(a: FP16x16W) -> FP16x16W { FixedTrait::new(45426, false) * log2(a) // ln(2) = 0.693... } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(a: FP16x16W) -> FP16x16W { assert(a.sign == false, 'must be positive'); if (a.mag == ONE) { return FixedTrait::ZERO(); } else if (a.mag < ONE) { // Compute true inverse binary log if 0 < x < 1 let div = FixedTrait::ONE() / a; return -log2(div); } let whole = a.mag / ONE; let (msb, div) = lut::msb(whole); if a.mag == div * ONE { FixedTrait::new_unscaled(msb, false) } else { let norm = a / FixedTrait::new_unscaled(div, false); let r8 = FixedTrait::new(596, true) * norm; let r7 = (r8 + FixedTrait::new(8116, false)) * norm; let r6 = (r7 + FixedTrait::new(49044, true)) * norm; let r5 = (r6 + FixedTrait::new(172935, false)) * norm; let r4 = (r5 + FixedTrait::new(394096, true)) * norm; let r3 = (r4 + FixedTrait::new(608566, false)) * norm; let r2 = (r3 + FixedTrait::new(655828, true)) * norm; let r1 = (r2 + FixedTrait::new(534433, false)) * norm; r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false) } } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(a: FP16x16W) -> FP16x16W { FixedTrait::new(19728, false) * log2(a) // log10(2) = 0.301... } fn lt(a: FP16x16W, b: FP16x16W) -> bool { if a.sign != b.sign { a.sign } else { (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign) } } fn mul(a: FP16x16W, b: FP16x16W) -> FP16x16W { let prod_u128 = integer::u64_wide_mul(a.mag, b.mag); // Re-apply sign FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign) } fn ne(a: @FP16x16W, b: @FP16x16W) -> bool { (*a.mag != *b.mag) || (*a.sign != *b.sign) } fn neg(a: FP16x16W) -> FP16x16W { if a.mag == 0 { a } else if !a.sign { FixedTrait::new(a.mag, !a.sign) } else { FixedTrait::new(a.mag, false) } } // Calclates the value of x^y and checks for overflow before returning // self is a FP16x16W point value // b is a FP16x16W point value fn pow(a: FP16x16W, b: FP16x16W) -> FP16x16W { let (_, rem) = integer::u64_safe_divmod(b.mag, integer::u64_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { return pow_int(a, b.mag / ONE, b.sign); } // x^y = exp(y*ln(x)) for x > 0 will error for x < 0 exp(b * ln(a)) } // Calclates the value of a^b and checks for overflow before returning fn pow_int(a: FP16x16W, b: u64, sign: bool) -> FP16x16W { let mut x = a; let mut n = b; if sign { x = FixedTrait::ONE() / x; } if n == 0 { return FixedTrait::ONE(); } let mut y = FixedTrait::ONE(); let two = integer::u64_as_non_zero(2); while n > 1 { let (div, rem) = integer::u64_safe_divmod(n, two); if rem == 1 { y = x * y; } x = x * x; n = div; }; x * y } fn rem(a: FP16x16W, b: FP16x16W) -> FP16x16W { a - floor(a / b) * b } fn round(a: FP16x16W) -> FP16x16W { let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if (HALF <= rem) { FixedTrait::new_unscaled(div + 1, a.sign) } else { FixedTrait::new_unscaled(div, a.sign) } } // Calculates the square root of a FP16x16W point value // x must be positive fn sqrt(a: FP16x16W) -> FP16x16W { assert(a.sign == false, 'must be positive'); let root = integer::u64_sqrt(a.mag.into() * ONE.into()); FixedTrait::new(root.into(), false) } fn sub(a: FP16x16W, b: FP16x16W) -> FP16x16W { add(a, -b) } fn sign(a: FP16x16W) -> FP16x16W { if a.mag == 0 { FixedTrait::new(0, false) } else { FixedTrait::new(ONE, a.sign) } } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use orion::numbers::fixed_point::implementations::fp16x16wide::helpers::{ assert_precise, assert_relative }; use orion::numbers::fixed_point::implementations::fp16x16wide::math::trig::{PI, HALF_PI}; use super::{ FixedTrait, ONE, FP16x16W, ceil, floor, sqrt, round, lut, pow, exp, exp2, exp2_int, ln, log2, log10, eq, add, ne, HALF }; #[test] fn test_into() { let a = FixedTrait::<FP16x16W>::new_unscaled(5, false); assert(a.mag == 5 * ONE, 'invalid result'); } #[test] fn test_try_into_u128() { // Positive unscaled let a = FixedTrait::<FP16x16W>::new_unscaled(5, false); assert(a.try_into().unwrap() == 5_u128, 'invalid result'); // Positive scaled let b = FixedTrait::<FP16x16W>::new(5 * ONE, false); assert(b.try_into().unwrap() == 5_u128, 'invalid result'); // Zero let d = FixedTrait::<FP16x16W>::new_unscaled(0, false); assert(d.try_into().unwrap() == 0_u128, 'invalid result'); } #[test] #[should_panic] fn test_negative_try_into_u128() { let a = FixedTrait::<FP16x16W>::new_unscaled(1, true); let _a: u128 = a.try_into().unwrap(); } #[test] #[available_gas(1000000)] fn test_acos() { let a = FixedTrait::<FP16x16W>::ONE(); assert(a.acos().into() == 0, 'invalid one'); } #[test] #[available_gas(1000000)] fn test_asin() { let a = FixedTrait::ONE(); assert_precise(a.asin(), HALF_PI.into(), 'invalid one', Option::None(())); // PI / 2 } #[test] #[available_gas(2000000)] fn test_atan() { let a = FixedTrait::new(2 * ONE, false); assert_relative(a.atan(), 72558, 'invalid two', Option::None(())); } #[test] fn test_ceil() { let a = FixedTrait::new(190054, false); // 2.9 assert(ceil(a).mag == 3 * ONE, 'invalid pos decimal'); } #[test] fn test_floor() { let a = FixedTrait::new(190054, false); // 2.9 assert(floor(a).mag == 2 * ONE, 'invalid pos decimal'); } #[test] fn test_round() { let a = FixedTrait::new(190054, false); // 2.9 assert(round(a).mag == 3 * ONE, 'invalid pos decimal'); } #[test] #[should_panic] fn test_sqrt_fail() { let a = FixedTrait::new_unscaled(25, true); sqrt(a); } #[test] fn test_sqrt() { let mut a = FixedTrait::new_unscaled(0, false); assert(sqrt(a).mag == 0, 'invalid zero root'); a = FixedTrait::new_unscaled(25, false); assert(sqrt(a).mag == 5 * ONE, 'invalid pos root'); } #[test] #[available_gas(100000)] fn test_msb() { let a = FixedTrait::<FP16x16W>::new_unscaled(100, false); let (msb, div) = lut::msb(a.mag / ONE); assert(msb == 6, 'invalid msb'); assert(div == 64, 'invalid msb ceil'); } #[test] #[available_gas(600000)] fn test_pow() { let a = FixedTrait::new_unscaled(3, false); let b = FixedTrait::new_unscaled(4, false); assert(pow(a, b).mag == 81 * ONE, 'invalid pos base power'); } #[test] #[available_gas(900000)] fn test_pow_frac() { let a = FixedTrait::new_unscaled(3, false); let b = FixedTrait::new(32768, false); // 0.5 assert_relative( pow(a, b), 113512, 'invalid pos base power', Option::None(()) ); // 1.7320508075688772 } #[test] #[available_gas(1000000)] fn test_exp() { let a = FixedTrait::new_unscaled(2, false); assert_relative(exp(a), 484249, 'invalid exp of 2', Option::None(())); // 7.389056098793725 } #[test] #[available_gas(400000)] fn test_exp2() { let a = FixedTrait::new_unscaled(5, false); assert(exp2(a).mag == 2097152, 'invalid exp2 of 2'); } #[test] #[available_gas(20000)] fn test_exp2_int() { assert(exp2_int(5).into() == 2097152, 'invalid exp2 of 2'); } #[test] #[available_gas(1000000)] fn test_ln() { let mut a = FixedTrait::new_unscaled(1, false); assert(ln(a).mag == 0, 'invalid ln of 1'); a = FixedTrait::new(178145, false); assert_relative(ln(a), ONE.into(), 'invalid ln of 2.7...', Option::None(())); } #[test] #[available_gas(1000000)] fn test_log2() { let mut a = FixedTrait::new_unscaled(32, false); assert(log2(a) == FixedTrait::new_unscaled(5, false), 'invalid log2 32'); a = FixedTrait::new_unscaled(10, false); assert_relative(log2(a), 217706, 'invalid log2 10', Option::None(())); // 3.321928094887362 } #[test] #[available_gas(1000000)] fn test_log10() { let a = FixedTrait::new_unscaled(100, false); assert_relative(log10(a), 2 * ONE.into(), 'invalid log10', Option::None(())); } #[test] fn test_eq() { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = eq(@a, @b); assert(c, 'invalid result'); } #[test] fn test_ne() { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = ne(@a, @b); assert(!c, 'invalid result'); } #[test] fn test_add() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(2, false); assert(add(a, b) == FixedTrait::new_unscaled(3, false), 'invalid result'); } #[test] fn test_add_eq() { let mut a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(2, false); a += b; assert(a == FixedTrait::<FP16x16W>::new_unscaled(3, false), 'invalid result'); } #[test] fn test_sub() { let a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, false); let c = a - b; assert(c == FixedTrait::<FP16x16W>::new_unscaled(3, false), 'false result invalid'); } #[test] fn test_sub_eq() { let mut a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, false); a -= b; assert(a == FixedTrait::<FP16x16W>::new_unscaled(3, false), 'invalid result'); } #[test] #[available_gas(100000)] fn test_mul_pos() { let a = FP16x16W { mag: 190054, sign: false }; let b = FP16x16W { mag: 190054, sign: false }; let c = a * b; assert(c.mag == 551155, 'invalid result'); } #[test] fn test_mul_neg() { let a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, true); let c = a * b; assert(c == FixedTrait::<FP16x16W>::new_unscaled(10, true), 'invalid result'); } #[test] fn test_mul_eq() { let mut a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, true); a *= b; assert(a == FixedTrait::<FP16x16W>::new_unscaled(10, true), 'invalid result'); } #[test] fn test_div() { let a = FixedTrait::new_unscaled(10, false); let b = FixedTrait::<FP16x16W>::new(190054, false); // 2.9 let c = a / b; assert(c.mag == 225986, 'invalid pos decimal'); // 3.4482758620689653 } #[test] fn test_le() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP16x16W>::new_unscaled(1, true); assert(a <= a, 'a <= a'); assert(!(a <= b), 'a <= b'); assert(!(a <= c), 'a <= c'); assert(b <= a, 'b <= a'); assert(b <= b, 'b <= b'); assert(!(b <= c), 'b <= c'); assert(c <= a, 'c <= a'); assert(c <= b, 'c <= b'); assert(c <= c, 'c <= c'); } #[test] fn test_lt() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP16x16W>::new_unscaled(1, true); assert(!(a < a), 'a < a'); assert(!(a < b), 'a < b'); assert(!(a < c), 'a < c'); assert(b < a, 'b < a'); assert(!(b < b), 'b < b'); assert(!(b < c), 'b < c'); assert(c < a, 'c < a'); assert(c < b, 'c < b'); assert(!(c < c), 'c < c'); } #[test] fn test_ge() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP16x16W>::new_unscaled(1, true); assert(a >= a, 'a >= a'); assert(a >= b, 'a >= b'); assert(a >= c, 'a >= c'); assert(!(b >= a), 'b >= a'); assert(b >= b, 'b >= b'); assert(b >= c, 'b >= c'); assert(!(c >= a), 'c >= a'); assert(!(c >= b), 'c >= b'); assert(c >= c, 'c >= c'); } #[test] fn test_gt() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP16x16W>::new_unscaled(1, true); assert(!(a > a), 'a > a'); assert(a > b, 'a > b'); assert(a > c, 'a > c'); assert(!(b > a), 'b > a'); assert(!(b > b), 'b > b'); assert(b > c, 'b > c'); assert(!(c > a), 'c > a'); assert(!(c > b), 'c > b'); assert(!(c > c), 'c > c'); } #[test] #[available_gas(1000000)] fn test_cos() { let a = FixedTrait::<FP16x16W>::new(HALF_PI, false); assert(a.cos().into() == 0, 'invalid half pi'); } #[test] #[available_gas(1000000)] fn test_sin() { let a = FixedTrait::new(HALF_PI, false); assert_precise(a.sin(), ONE.into(), 'invalid half pi', Option::None(())); } #[test] #[available_gas(2000000)] fn test_tan() { let a = FixedTrait::<FP16x16W>::new(HALF_PI / 2, false); assert(a.tan().mag == 65536, 'invalid quarter pi'); } #[test] #[available_gas(2000000)] fn test_sign() { let a = FixedTrait::<FP16x16W>::new(0, false); assert(a.sign().mag == 0 && !a.sign().sign, 'invalid sign (0, true)'); let a = FixedTrait::<FP16x16W>::new(HALF, true); assert(a.sign().mag == ONE && a.sign().sign, 'invalid sign (HALF, true)'); let a = FixedTrait::<FP16x16W>::new(HALF, false); assert(a.sign().mag == ONE && !a.sign().sign, 'invalid sign (HALF, false)'); let a = FixedTrait::<FP16x16W>::new(ONE, true); assert(a.sign().mag == ONE && a.sign().sign, 'invalid sign (ONE, true)'); let a = FixedTrait::<FP16x16W>::new(ONE, false); assert(a.sign().mag == ONE && !a.sign().sign, 'invalid sign (ONE, false)'); } #[test] #[should_panic] #[available_gas(2000000)] fn test_sign_fail() { let a = FixedTrait::<FP16x16W>::new(HALF, true); assert(a.sign().mag != ONE && !a.sign().sign, 'invalid sign (HALF, true)'); } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ONE, FP16x16W, FixedTrait}; use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut::erf_lut; const ERF_COMPUTATIONAL_ACCURACY: u64 = 100; const ROUND_CHECK_NUMBER: u64 = 10; // Values > MAX_ERF_NUMBER return 1 const MAX_ERF_NUMBER: u64 = 229376; // Values <= ERF_TRUNCATION_NUMBER -> two decimal places, and values > ERF_TRUNCATION_NUMBER -> one decimal place const ERF_TRUNCATION_NUMBER: u64 = 131072; fn erf(x: FP16x16W) -> FP16x16W { // Lookup // 1. if x.mag < 3.5 { lookup table } // 2. else{ return 1} let mut erf_value: u64 = 0; if x.mag < MAX_ERF_NUMBER { erf_value = erf_lut(x.mag); } else { erf_value = ONE; } FP16x16W { mag: erf_value, sign: x.sign } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ HALF, ONE, TWO, FP16x16W, FP16x16WImpl, FP16x16WAdd, FP16x16WAddEq, FP16x16WSub, FP16x16WMul, FP16x16WMulEq, FP16x16WTryIntoU128, FP16x16WPartialEq, FP16x16WPartialOrd, FP16x16WSubEq, FP16x16WNeg, FP16x16WDiv, FP16x16WIntoFelt252, FixedTrait }; // Calculates hyperbolic cosine of a (fixed point) fn cosh(a: FP16x16W) -> FP16x16W { let ea = a.exp(); (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic sine of a (fixed point) fn sinh(a: FP16x16W) -> FP16x16W { let ea = a.exp(); (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic tangent of a (fixed point) fn tanh(a: FP16x16W) -> FP16x16W { let ea = a.exp(); let ea_i = FixedTrait::ONE() / ea; (ea - ea_i) / (ea + ea_i) } // Calculates inverse hyperbolic cosine of a (fixed point) fn acosh(a: FP16x16W) -> FP16x16W { let root = (a * a - FixedTrait::ONE()).sqrt(); (a + root).ln() } // Calculates inverse hyperbolic sine of a (fixed point) fn asinh(a: FP16x16W) -> FP16x16W { let root = (a * a + FixedTrait::ONE()).sqrt(); (a + root).ln() } // Calculates inverse hyperbolic tangent of a (fixed point) fn atanh(a: FP16x16W) -> FP16x16W { let one = FixedTrait::ONE(); let ln_arg = (one + a) / (one - a); ln_arg.ln() / FixedTrait::new(TWO, false) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use orion::numbers::fixed_point::implementations::fp16x16wide::helpers::assert_precise; use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF}; #[test] #[available_gas(10000000)] fn test_cosh() { let a = FixedTrait::new(TWO, false); assert_precise(cosh(a), 246550, 'invalid two', Option::None(())); // 3.5954653836066 let a = FixedTrait::ONE(); assert_precise(cosh(a), 101127, 'invalid one', Option::None(())); // 1.42428174592510 let a = FixedTrait::ZERO(); assert_precise(cosh(a), ONE.into(), 'invalid zero', Option::None(())); let a = FixedTrait::ONE(); assert_precise(cosh(a), 101127, 'invalid neg one', Option::None(())); // 1.42428174592510 let a = FixedTrait::new(TWO, true); assert_precise(cosh(a), 246568, 'invalid neg two', Option::None(())); // 3.5954653836066 } #[test] #[available_gas(10000000)] fn test_sinh() { let a = FixedTrait::new(TWO, false); assert_precise(sinh(a), 237681, 'invalid two', Option::None(())); // 3.48973469357602 let a = FixedTrait::ONE(); assert_precise(sinh(a), 77018, 'invalid one', Option::None(())); // 1.13687593250230 let a = FixedTrait::ZERO(); assert(sinh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE, true); assert_precise(sinh(a), -77018, 'invalid neg one', Option::None(())); // -1.13687593250230 let a = FixedTrait::new(TWO, true); assert_precise(sinh(a), -237699, 'invalid neg two', Option::None(())); // -3.48973469357602 } #[test] #[available_gas(10000000)] fn test_tanh() { let a = FixedTrait::new(TWO, false); assert_precise(tanh(a), 63179, 'invalid two', Option::None(())); // 0.75314654693321 let a = FixedTrait::ONE(); assert_precise(tanh(a), 49912, 'invalid one', Option::None(())); // 0.59499543433175 let a = FixedTrait::ZERO(); assert(tanh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE, true); assert_precise(tanh(a), -49912, 'invalid neg one', Option::None(())); // -0.59499543433175 let a = FixedTrait::new(TWO, true); assert_precise(tanh(a), -63179, 'invalid neg two', Option::None(())); // 0.75314654693321 } #[test] #[available_gas(10000000)] fn test_acosh() { let a = FixedTrait::new(246559, false); // 3.5954653836066 assert_precise(acosh(a), 131072, 'invalid two', Option::None(())); let a = FixedTrait::new(101127, false); // 1.42428174592510 assert_precise(acosh(a), ONE.into(), 'invalid one', Option::None(())); let a = FixedTrait::ONE(); // 1 assert(acosh(a).into() == 0, 'invalid zero'); } #[test] #[available_gas(10000000)] fn test_asinh() { let a = FixedTrait::new(237690, false); // 3.48973469357602 assert_precise(asinh(a), 131072, 'invalid two', Option::None(())); let a = FixedTrait::new(77018, false); // 1.13687593250230 assert_precise(asinh(a), ONE.into(), 'invalid one', Option::None(())); let a = FixedTrait::ZERO(); assert(asinh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(77018, true); // -1.13687593250230 assert_precise(asinh(a), -ONE.into(), 'invalid neg one', Option::None(())); let a = FixedTrait::new(237690, true); // -3.48973469357602 assert_precise(asinh(a), -131017, 'invalid neg two', Option::None(())); } #[test] #[available_gas(10000000)] fn test_atanh() { let a = FixedTrait::new(58982, false); // 0.9 assert_precise(atanh(a), 96483, 'invalid 0.9', Option::None(())); // 1.36892147623689 let a = FixedTrait::new(HALF, false); // 0.5 assert_precise(atanh(a), 35999, 'invalid half', Option::None(())); // 0.42914542526098 let a = FixedTrait::ZERO(); assert(atanh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(HALF, true); // 0.5 assert_precise(atanh(a), -35999, 'invalid neg half', Option::None(())); // 0.42914542526098 let a = FixedTrait::new(58982, true); // 0.9 assert_precise(atanh(a), -96483, 'invalid -0.9', Option::None(())); // 1.36892147623689 } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo
use orion::numbers::fixed_point::implementations::fp8x23wide::core::ONE; // Calculates the most significant bit fn msb(whole: u64) -> (u64, u64) { if whole < 256 { if whole < 2 { return (0, 1); } if whole < 4 { return (1, 2); } if whole < 8 { return (2, 4); } if whole < 16 { return (3, 8); } if whole < 32 { return (4, 16); } if whole < 64 { return (5, 32); } if whole < 128 { return (6, 64); } if whole < 256 { return (7, 128); } } else if whole < 65536 { if whole < 512 { return (8, 256); } if whole < 1024 { return (9, 512); } if whole < 2048 { return (10, 1024); } if whole < 4096 { return (11, 2048); } if whole < 8192 { return (12, 4096); } if whole < 16384 { return (13, 8192); } if whole < 32768 { return (14, 16384); } if whole < 65536 { return (15, 32768); } } (16, 65536) } fn exp2(exp: u64) -> u64 { if exp <= 16 { if exp == 0 { return 1; } if exp == 1 { return 2; } if exp == 2 { return 4; } if exp == 3 { return 8; } if exp == 4 { return 16; } if exp == 5 { return 32; } if exp == 6 { return 64; } if exp == 7 { return 128; } if exp == 8 { return 256; } if exp == 9 { return 512; } if exp == 10 { return 1024; } if exp == 11 { return 2048; } if exp == 12 { return 4096; } if exp == 13 { return 8192; } if exp == 14 { return 16384; } if exp == 15 { return 32768; } if exp == 16 { return 65536; } } 65536 } fn sin(a: u64) -> (u64, u64, u64) { let slot = a / 402; if slot < 128 { if slot < 64 { if slot < 32 { if slot < 16 { if slot == 0 { return (0, 0, 402); } if slot == 1 { return (402, 402, 804); } if slot == 2 { return (804, 804, 1206); } if slot == 3 { return (1206, 1206, 1608); } if slot == 4 { return (1608, 1608, 2010); } if slot == 5 { return (2011, 2010, 2412); } if slot == 6 { return (2413, 2412, 2814); } if slot == 7 { return (2815, 2814, 3216); } if slot == 8 { return (3217, 3216, 3617); } if slot == 9 { return (3619, 3617, 4019); } if slot == 10 { return (4023, 4019, 4420); } if slot == 11 { return (4423, 4420, 4821); } if slot == 12 { return (4825, 4821, 5222); } if slot == 13 { return (5228, 5222, 5623); } if slot == 14 { return (5630, 5623, 6023); } if slot == 15 { return (6032, 6023, 6424); } } else { if slot == 16 { return (6434, 6424, 6824); } if slot == 17 { return (6836, 6824, 7224); } if slot == 18 { return (7238, 7224, 7623); } if slot == 19 { return (7640, 7623, 8022); } if slot == 20 { return (8042, 8022, 8421); } if slot == 21 { return (8445, 8421, 8820); } if slot == 22 { return (8847, 8820, 9218); } if slot == 23 { return (9249, 9218, 9616); } if slot == 24 { return (9651, 9616, 10014); } if slot == 25 { return (10053, 10014, 10411); } if slot == 26 { return (10455, 10411, 10808); } if slot == 27 { return (10857, 10808, 11204); } if slot == 28 { return (11259, 11204, 11600); } if slot == 29 { return (11662, 11600, 11996); } if slot == 30 { return (12064, 11996, 12391); } if slot == 31 { return (12466, 12391, 12785); } } } else { if slot < 48 { if slot == 32 { return (12868, 12785, 13180); } if slot == 33 { return (13270, 13180, 13573); } if slot == 34 { return (13672, 13573, 13966); } if slot == 35 { return (14074, 13966, 14359); } if slot == 36 { return (14476, 14359, 14751); } if slot == 37 { return (14879, 14751, 15143); } if slot == 38 { return (15281, 15143, 15534); } if slot == 39 { return (15683, 15534, 15924); } if slot == 40 { return (16081, 15924, 16314); } if slot == 41 { return (16487, 16314, 16703); } if slot == 42 { return (16889, 16703, 17091); } if slot == 43 { return (17291, 17091, 17479); } if slot == 44 { return (17693, 17479, 17867); } if slot == 45 { return (18096, 17867, 18253); } if slot == 46 { return (18498, 18253, 18639); } if slot == 47 { return (18900, 18639, 19024); } } else { if slot == 48 { return (19302, 19024, 19409); } if slot == 49 { return (19704, 19409, 19792); } if slot == 50 { return (20113, 19792, 20175); } if slot == 51 { return (20508, 20175, 20557); } if slot == 52 { return (20910, 20557, 20939); } if slot == 53 { return (21313, 20939, 21320); } if slot == 54 { return (21715, 21320, 21699); } if slot == 55 { return (22117, 21699, 22078); } if slot == 56 { return (22519, 22078, 22457); } if slot == 57 { return (22921, 22457, 22834); } if slot == 58 { return (23323, 22834, 23210); } if slot == 59 { return (23725, 23210, 23586); } if slot == 60 { return (24127, 23586, 23961); } if slot == 61 { return (24530, 23961, 24335); } if slot == 62 { return (24932, 24335, 24708); } if slot == 63 { return (25334, 24708, 25080); } } } } else { if slot < 96 { if slot < 80 { if slot == 64 { return (25736, 25080, 25451); } if slot == 65 { return (26138, 25451, 25821); } if slot == 66 { return (26540, 25821, 26190); } if slot == 67 { return (26942, 26190, 26558); } if slot == 68 { return (27344, 26558, 26925); } if slot == 69 { return (27747, 26925, 27291); } if slot == 70 { return (28149, 27291, 27656); } if slot == 71 { return (28551, 27656, 28020); } if slot == 72 { return (28953, 28020, 28383); } if slot == 73 { return (29355, 28383, 28745); } if slot == 74 { return (29757, 28745, 29106); } if slot == 75 { return (30159, 29106, 29466); } if slot == 76 { return (30561, 29466, 29824); } if slot == 77 { return (30964, 29824, 30182); } if slot == 78 { return (31366, 30182, 30538); } if slot == 79 { return (31768, 30538, 30893); } } else { if slot == 80 { return (32171, 30893, 31248); } if slot == 81 { return (32572, 31248, 31600); } if slot == 82 { return (32974, 31600, 31952); } if slot == 83 { return (33376, 31952, 32303); } if slot == 84 { return (33778, 32303, 32652); } if slot == 85 { return (34181, 32652, 33000); } if slot == 86 { return (34583, 33000, 33347); } if slot == 87 { return (34985, 33347, 33692); } if slot == 88 { return (35387, 33692, 34037); } if slot == 89 { return (35789, 34037, 34380); } if slot == 90 { return (36194, 34380, 34721); } if slot == 91 { return (36593, 34721, 35062); } if slot == 92 { return (36995, 35062, 35401); } if slot == 93 { return (37398, 35401, 35738); } if slot == 94 { return (37800, 35738, 36075); } if slot == 95 { return (38202, 36075, 36410); } } } else { if slot < 112 { if slot == 96 { return (38604, 36410, 36744); } if slot == 97 { return (39006, 36744, 37076); } if slot == 98 { return (39408, 37076, 37407); } if slot == 99 { return (39810, 37407, 37736); } if slot == 100 { return (40227, 37736, 38064); } if slot == 101 { return (40615, 38064, 38391); } if slot == 102 { return (41017, 38391, 38716); } if slot == 103 { return (41419, 38716, 39040); } if slot == 104 { return (41821, 39040, 39362); } if slot == 105 { return (42223, 39362, 39683); } if slot == 106 { return (42625, 39683, 40002); } if slot == 107 { return (43027, 40002, 40320); } if slot == 108 { return (43429, 40320, 40636); } if slot == 109 { return (43832, 40636, 40951); } if slot == 110 { return (44234, 40951, 41264); } if slot == 111 { return (44636, 41264, 41576); } } else { if slot == 112 { return (45038, 41576, 41886); } if slot == 113 { return (45440, 41886, 42194); } if slot == 114 { return (45842, 42194, 42501); } if slot == 115 { return (46244, 42501, 42806); } if slot == 116 { return (46646, 42806, 43110); } if slot == 117 { return (47048, 43110, 43412); } if slot == 118 { return (47451, 43412, 43713); } if slot == 119 { return (47853, 43713, 44011); } if slot == 120 { return (48252, 44011, 44308); } if slot == 121 { return (48657, 44308, 44604); } if slot == 122 { return (49059, 44604, 44898); } if slot == 123 { return (49461, 44898, 45190); } if slot == 124 { return (49863, 45190, 45480); } if slot == 125 { return (50265, 45480, 45769); } if slot == 126 { return (50668, 45769, 46056); } if slot == 127 { return (51070, 46056, 46341); } } } } } else { if slot < 192 { if slot < 160 { if slot < 144 { if slot == 128 { return (51472, 46341, 46624); } if slot == 129 { return (51874, 46624, 46906); } if slot == 130 { return (52285, 46906, 47186); } if slot == 131 { return (52678, 47186, 47464); } if slot == 132 { return (53080, 47464, 47741); } if slot == 133 { return (53482, 47741, 48015); } if slot == 134 { return (53885, 48015, 48288); } if slot == 135 { return (54287, 48288, 48559); } if slot == 136 { return (54689, 48559, 48828); } if slot == 137 { return (55091, 48828, 49095); } if slot == 138 { return (55493, 49095, 49361); } if slot == 139 { return (55895, 49361, 49624); } if slot == 140 { return (56297, 49624, 49886); } if slot == 141 { return (56699, 49886, 50146); } if slot == 142 { return (57102, 50146, 50404); } if slot == 143 { return (57504, 50404, 50660); } } else { if slot == 144 { return (57906, 50660, 50914); } if slot == 145 { return (58308, 50914, 51166); } if slot == 146 { return (58710, 51166, 51417); } if slot == 147 { return (59112, 51417, 51665); } if slot == 148 { return (59514, 51665, 51911); } if slot == 149 { return (59916, 51911, 52156); } if slot == 150 { return (60320, 52156, 52398); } if slot == 151 { return (60721, 52398, 52639); } if slot == 152 { return (61123, 52639, 52878); } if slot == 153 { return (61525, 52878, 53114); } if slot == 154 { return (61927, 53114, 53349); } if slot == 155 { return (62329, 53349, 53581); } if slot == 156 { return (62731, 53581, 53812); } if slot == 157 { return (63133, 53812, 54040); } if slot == 158 { return (63536, 54040, 54267); } if slot == 159 { return (63938, 54267, 54491); } if slot == 160 { return (64343, 54491, 54714); } } } else { if slot < 176 { if slot == 161 { return (64742, 54714, 54934); } if slot == 162 { return (65144, 54934, 55152); } if slot == 163 { return (65546, 55152, 55368); } if slot == 164 { return (65948, 55368, 55582); } if slot == 165 { return (66350, 55582, 55794); } if slot == 166 { return (66753, 55794, 56004); } if slot == 167 { return (67155, 56004, 56212); } if slot == 168 { return (67557, 56212, 56418); } if slot == 169 { return (67959, 56418, 56621); } if slot == 170 { return (68361, 56621, 56823); } if slot == 171 { return (68763, 56823, 57022); } if slot == 172 { return (69165, 57022, 57219); } if slot == 173 { return (69567, 57219, 57414); } if slot == 174 { return (69970, 57414, 57607); } if slot == 175 { return (70372, 57607, 57798); } } else { if slot == 176 { return (70774, 57798, 57986); } if slot == 177 { return (71176, 57986, 58172); } if slot == 178 { return (71578, 58172, 58356); } if slot == 179 { return (71980, 58356, 58538); } if slot == 180 { return (72382, 58538, 58718); } if slot == 181 { return (72784, 58718, 58896); } if slot == 182 { return (73187, 58896, 59071); } if slot == 183 { return (73589, 59071, 59244); } if slot == 184 { return (73991, 59244, 59415); } if slot == 185 { return (74393, 59415, 59583); } if slot == 186 { return (74795, 59583, 59750); } if slot == 187 { return (75197, 59750, 59914); } if slot == 188 { return (75599, 59914, 60075); } if slot == 189 { return (76001, 60075, 60235); } if slot == 190 { return (76401, 60235, 60392); } if slot == 191 { return (76806, 60392, 60547); } } } } else { if slot < 224 { if slot < 208 { if slot == 192 { return (77208, 60547, 60700); } if slot == 193 { return (77610, 60700, 60851); } if slot == 194 { return (78012, 60851, 60999); } if slot == 195 { return (78414, 60999, 61145); } if slot == 196 { return (78816, 61145, 61288); } if slot == 197 { return (79218, 61288, 61429); } if slot == 198 { return (79621, 61429, 61568); } if slot == 199 { return (80023, 61568, 61705); } if slot == 200 { return (80423, 61705, 61839); } if slot == 201 { return (80827, 61839, 61971); } if slot == 202 { return (81229, 61971, 62101); } if slot == 203 { return (81631, 62101, 62228); } if slot == 204 { return (82033, 62228, 62353); } if slot == 205 { return (82435, 62353, 62476); } if slot == 206 { return (82838, 62476, 62596); } if slot == 207 { return (83240, 62596, 62714); } } else { if slot == 208 { return (83642, 62714, 62830); } if slot == 209 { return (84044, 62830, 62943); } if slot == 210 { return (84446, 62943, 63054); } if slot == 211 { return (84848, 63054, 63162); } if slot == 212 { return (85250, 63162, 63268); } if slot == 213 { return (85652, 63268, 63372); } if slot == 214 { return (86055, 63372, 63473); } if slot == 215 { return (86457, 63473, 63572); } if slot == 216 { return (86859, 63572, 63668); } if slot == 217 { return (87261, 63668, 63763); } if slot == 218 { return (87663, 63763, 63854); } if slot == 219 { return (88065, 63854, 63944); } if slot == 220 { return (88467, 63944, 64031); } if slot == 221 { return (88869, 64031, 64115); } if slot == 222 { return (89271, 64115, 64197); } if slot == 223 { return (89674, 64197, 64277); } } } else { if slot < 240 { if slot == 224 { return (90076, 64277, 64354); } if slot == 225 { return (90478, 64354, 64429); } if slot == 226 { return (90880, 64429, 64501); } if slot == 227 { return (91282, 64501, 64571); } if slot == 228 { return (91684, 64571, 64639); } if slot == 229 { return (92086, 64639, 64704); } if slot == 230 { return (92491, 64704, 64766); } if slot == 231 { return (92891, 64766, 64827); } if slot == 232 { return (93293, 64827, 64884); } if slot == 233 { return (93695, 64884, 64940); } if slot == 234 { return (94097, 64940, 64993); } if slot == 235 { return (94499, 64993, 65043); } if slot == 236 { return (94901, 65043, 65091); } if slot == 237 { return (95303, 65091, 65137); } if slot == 238 { return (95705, 65137, 65180); } if slot == 239 { return (96108, 65180, 65220); } } else { if slot == 240 { return (96514, 65220, 65259); } if slot == 241 { return (96912, 65259, 65294); } if slot == 242 { return (97314, 65294, 65328); } if slot == 243 { return (97716, 65328, 65358); } if slot == 244 { return (98118, 65358, 65387); } if slot == 245 { return (98520, 65387, 65413); } if slot == 246 { return (98922, 65413, 65436); } if slot == 247 { return (99325, 65436, 65457); } if slot == 248 { return (99727, 65457, 65476); } if slot == 249 { return (100129, 65476, 65492); } if slot == 250 { return (100531, 65492, 65505); } if slot == 251 { return (100933, 65505, 65516); } if slot == 252 { return (101335, 65516, 65525); } if slot == 253 { return (101737, 65525, 65531); } if slot == 254 { return (102139, 65531, 65535); } } } } } (102542, 65535, 65536) } fn atan(a: u64) -> (u64, u64, u64) { let slot = a / 459; if slot == 0 { return (0, 0, 459); } if slot == 1 { return (459, 459, 917); } if slot == 2 { return (918, 917, 1376); } if slot == 3 { return (1376, 1376, 1835); } if slot == 4 { return (1835, 1835, 2293); } if slot == 5 { return (2294, 2293, 2751); } if slot == 6 { return (2753, 2751, 3209); } if slot == 7 { return (3211, 3209, 3666); } if slot == 8 { return (3670, 3666, 4123); } if slot == 9 { return (4129, 4123, 4580); } if slot == 10 { return (4591, 4580, 5036); } if slot == 11 { return (5046, 5036, 5492); } if slot == 12 { return (5505, 5492, 5947); } if slot == 13 { return (5964, 5947, 6402); } if slot == 14 { return (6423, 6402, 6856); } if slot == 15 { return (6881, 6856, 7310); } if slot == 16 { return (7340, 7310, 7762); } if slot == 17 { return (7799, 7762, 8214); } if slot == 18 { return (8258, 8214, 8665); } if slot == 19 { return (8716, 8665, 9116); } if slot == 20 { return (9181, 9116, 9565); } if slot == 21 { return (9634, 9565, 10014); } if slot == 22 { return (10093, 10014, 10462); } if slot == 23 { return (10551, 10462, 10908); } if slot == 24 { return (11010, 10908, 11354); } if slot == 25 { return (11469, 11354, 11798); } if slot == 26 { return (11928, 11798, 12242); } if slot == 27 { return (12386, 12242, 12684); } if slot == 28 { return (12845, 12684, 13125); } if slot == 29 { return (13304, 13125, 13565); } if slot == 30 { return (13762, 13565, 14004); } if slot == 31 { return (14221, 14004, 14442); } if slot == 32 { return (14680, 14442, 14878); } if slot == 33 { return (15139, 14878, 15313); } if slot == 34 { return (15598, 15313, 15746); } if slot == 35 { return (16056, 15746, 16178); } if slot == 36 { return (16515, 16178, 16609); } if slot == 37 { return (16974, 16609, 17038); } if slot == 38 { return (17433, 17038, 17466); } if slot == 39 { return (17891, 17466, 17892); } if slot == 40 { return (18353, 17892, 18317); } if slot == 41 { return (18809, 18317, 18740); } if slot == 42 { return (19268, 18740, 19161); } if slot == 43 { return (19726, 19161, 19581); } if slot == 44 { return (20185, 19581, 19999); } if slot == 45 { return (20644, 19999, 20416); } if slot == 46 { return (21103, 20416, 20830); } if slot == 47 { return (21561, 20830, 21243); } if slot == 48 { return (22020, 21243, 21655); } if slot == 49 { return (22479, 21655, 22064); } if slot == 50 { return (22944, 22064, 22472); } if slot == 51 { return (23396, 22472, 22878); } if slot == 52 { return (23855, 22878, 23282); } if slot == 53 { return (24314, 23282, 23685); } if slot == 54 { return (24773, 23685, 24085); } if slot == 55 { return (25231, 24085, 24484); } if slot == 56 { return (25690, 24484, 24880); } if slot == 57 { return (26149, 24880, 25275); } if slot == 58 { return (26608, 25275, 25668); } if slot == 59 { return (27066, 25668, 26059); } if slot == 60 { return (27534, 26059, 26448); } if slot == 61 { return (27984, 26448, 26835); } if slot == 62 { return (28443, 26835, 27220); } if slot == 63 { return (28901, 27220, 27603); } if slot == 64 { return (29360, 27603, 27984); } if slot == 65 { return (29819, 27984, 28363); } if slot == 66 { return (30278, 28363, 28740); } if slot == 67 { return (30736, 28740, 29115); } if slot == 68 { return (31195, 29115, 29488); } if slot == 69 { return (31654, 29488, 29859); } if slot == 70 { return (32113, 29859, 30228); } if slot == 71 { return (32571, 30228, 30595); } if slot == 72 { return (33030, 30595, 30960); } if slot == 73 { return (33489, 30960, 31323); } if slot == 74 { return (33948, 31323, 31683); } if slot == 75 { return (34406, 31683, 32042); } if slot == 76 { return (34865, 32042, 32398); } if slot == 77 { return (35324, 32398, 32753); } if slot == 78 { return (35783, 32753, 33105); } if slot == 79 { return (36241, 33105, 33455); } if slot == 80 { return (36700, 33455, 33804); } if slot == 81 { return (37159, 33804, 34150); } if slot == 82 { return (37618, 34150, 34494); } if slot == 83 { return (38076, 34494, 34836); } if slot == 84 { return (38535, 34836, 35175); } if slot == 85 { return (38994, 35175, 35513); } if slot == 86 { return (39453, 35513, 35849); } if slot == 87 { return (39911, 35849, 36183); } if slot == 88 { return (40370, 36183, 36514); } if slot == 89 { return (40829, 36514, 36843); } if slot == 90 { return (41288, 36843, 37171); } if slot == 91 { return (41746, 37171, 37496); } if slot == 92 { return (42205, 37496, 37819); } if slot == 93 { return (42664, 37819, 38141); } if slot == 94 { return (43123, 38141, 38460); } if slot == 95 { return (43581, 38460, 38777); } if slot == 96 { return (44040, 38777, 39092); } if slot == 97 { return (44499, 39092, 39405); } if slot == 98 { return (44958, 39405, 39716); } (45416, 39716, 40025) } fn erf_lut(x: u64) -> u64 { // Construct the erf lookup table if x <= 5898 { if x <= 0 { return 0; } if x <= 655 { return 739; } if x <= 1310 { return 1478; } if x <= 1966 { return 2217; } if x <= 2621 { return 2956; } if x <= 3276 { return 3694; } if x <= 3932 { return 4431; } if x <= 4587 { return 5168; } if x <= 5242 { return 5903; } if x <= 5898 { return 6637; } } if x <= 12451 { if x <= 6553 { return 7370; } if x <= 7208 { return 8101; } if x <= 7864 { return 8831; } if x <= 8519 { return 9559; } if x <= 9175 { return 10285; } if x <= 9830 { return 11009; } if x <= 10485 { return 11731; } if x <= 11141 { return 12451; } if x <= 11796 { return 13168; } if x <= 12451 { return 13883; } } if x <= 19005 { if x <= 13107 { return 14595; } if x <= 13762 { return 15304; } if x <= 14417 { return 16010; } if x <= 15073 { return 16713; } if x <= 15728 { return 17412; } if x <= 16384 { return 18109; } if x <= 17039 { return 18802; } if x <= 17694 { return 19491; } if x <= 18350 { return 20177; } if x <= 19005 { return 20859; } } if x <= 25559 { if x <= 19660 { return 21536; } if x <= 20316 { return 22210; } if x <= 20971 { return 22880; } if x <= 21626 { return 23545; } if x <= 22282 { return 24206; } if x <= 22937 { return 24863; } if x <= 23592 { return 25515; } if x <= 24248 { return 26162; } if x <= 24903 { return 26804; } if x <= 25559 { return 27442; } } if x <= 32112 { if x <= 26214 { return 28075; } if x <= 26869 { return 28702; } if x <= 27525 { return 29325; } if x <= 28180 { return 29942; } if x <= 28835 { return 30554; } if x <= 29491 { return 31161; } if x <= 30146 { return 31762; } if x <= 30801 { return 32358; } if x <= 31457 { return 32948; } if x <= 32112 { return 33532; } } if x <= 38666 { if x <= 32768 { return 34111; } if x <= 33423 { return 34684; } if x <= 34078 { return 35251; } if x <= 34734 { return 35813; } if x <= 35389 { return 36368; } if x <= 36044 { return 36917; } if x <= 36700 { return 37461; } if x <= 37355 { return 37998; } if x <= 38010 { return 38530; } if x <= 38666 { return 39055; } } if x <= 45219 { if x <= 39321 { return 39574; } if x <= 39976 { return 40087; } if x <= 40632 { return 40593; } if x <= 41287 { return 41094; } if x <= 41943 { return 41588; } if x <= 42598 { return 42076; } if x <= 43253 { return 42557; } if x <= 43909 { return 43032; } if x <= 44564 { return 43501; } if x <= 45219 { return 43964; } } if x <= 51773 { if x <= 45875 { return 44420; } if x <= 46530 { return 44870; } if x <= 47185 { return 45313; } if x <= 47841 { return 45750; } if x <= 48496 { return 46181; } if x <= 49152 { return 46606; } if x <= 49807 { return 47024; } if x <= 50462 { return 47436; } if x <= 51118 { return 47841; } if x <= 51773 { return 48241; } } if x <= 58327 { if x <= 52428 { return 48634; } if x <= 53084 { return 49021; } if x <= 53739 { return 49401; } if x <= 54394 { return 49776; } if x <= 55050 { return 50144; } if x <= 55705 { return 50506; } if x <= 56360 { return 50862; } if x <= 57016 { return 51212; } if x <= 57671 { return 51556; } if x <= 58327 { return 51894; } } if x <= 64880 { if x <= 58982 { return 52226; } if x <= 59637 { return 52552; } if x <= 60293 { return 52872; } if x <= 60948 { return 53186; } if x <= 61603 { return 53495; } if x <= 62259 { return 53797; } if x <= 62914 { return 54094; } if x <= 63569 { return 54386; } if x <= 64225 { return 54672; } if x <= 64880 { return 54952; } } if x <= 71434 { if x <= 65536 { return 55227; } if x <= 66191 { return 55496; } if x <= 66846 { return 55760; } if x <= 67502 { return 56019; } if x <= 68157 { return 56272; } if x <= 68812 { return 56520; } if x <= 69468 { return 56763; } if x <= 70123 { return 57001; } if x <= 70778 { return 57234; } if x <= 71434 { return 57462; } } if x <= 77987 { if x <= 72089 { return 57685; } if x <= 72744 { return 57903; } if x <= 73400 { return 58116; } if x <= 74055 { return 58325; } if x <= 74711 { return 58529; } if x <= 75366 { return 58728; } if x <= 76021 { return 58923; } if x <= 76677 { return 59113; } if x <= 77332 { return 59299; } if x <= 77987 { return 59481; } } if x <= 84541 { if x <= 78643 { return 59658; } if x <= 79298 { return 59831; } if x <= 79953 { return 60000; } if x <= 80609 { return 60165; } if x <= 81264 { return 60326; } if x <= 81920 { return 60483; } if x <= 82575 { return 60636; } if x <= 83230 { return 60785; } if x <= 83886 { return 60931; } if x <= 84541 { return 61072; } } if x <= 91095 { if x <= 85196 { return 61211; } if x <= 85852 { return 61345; } if x <= 86507 { return 61477; } if x <= 87162 { return 61604; } if x <= 87818 { return 61729; } if x <= 88473 { return 61850; } if x <= 89128 { return 61968; } if x <= 89784 { return 62083; } if x <= 90439 { return 62194; } if x <= 91095 { return 62303; } } if x <= 97648 { if x <= 91750 { return 62408; } if x <= 92405 { return 62511; } if x <= 93061 { return 62611; } if x <= 93716 { return 62708; } if x <= 94371 { return 62802; } if x <= 95027 { return 62894; } if x <= 95682 { return 62983; } if x <= 96337 { return 63070; } if x <= 96993 { return 63154; } if x <= 97648 { return 63235; } } if x <= 104202 { if x <= 98304 { return 63314; } if x <= 98959 { return 63391; } if x <= 99614 { return 63465; } if x <= 100270 { return 63538; } if x <= 100925 { return 63608; } if x <= 101580 { return 63676; } if x <= 102236 { return 63742; } if x <= 102891 { return 63806; } if x <= 103546 { return 63867; } if x <= 104202 { return 63927; } } if x <= 110755 { if x <= 104857 { return 63985; } if x <= 105512 { return 64042; } if x <= 106168 { return 64096; } if x <= 106823 { return 64149; } if x <= 107479 { return 64200; } if x <= 108134 { return 64249; } if x <= 108789 { return 64297; } if x <= 109445 { return 64343; } if x <= 110100 { return 64388; } if x <= 110755 { return 64431; } } if x <= 117309 { if x <= 111411 { return 64473; } if x <= 112066 { return 64514; } if x <= 112721 { return 64553; } if x <= 113377 { return 64590; } if x <= 114032 { return 64627; } if x <= 114688 { return 64662; } if x <= 115343 { return 64696; } if x <= 115998 { return 64729; } if x <= 116654 { return 64760; } if x <= 117309 { return 64791; } } if x <= 123863 { if x <= 117964 { return 64821; } if x <= 118620 { return 64849; } if x <= 119275 { return 64876; } if x <= 119930 { return 64903; } if x <= 120586 { return 64928; } if x <= 121241 { return 64953; } if x <= 121896 { return 64977; } if x <= 122552 { return 64999; } if x <= 123207 { return 65021; } if x <= 123863 { return 65043; } } if x <= 130416 { if x <= 124518 { return 65063; } if x <= 125173 { return 65083; } if x <= 125829 { return 65102; } if x <= 126484 { return 65120; } if x <= 127139 { return 65137; } if x <= 127795 { return 65154; } if x <= 128450 { return 65170; } if x <= 129105 { return 65186; } if x <= 129761 { return 65201; } if x <= 130416 { return 65215; } } if x <= 222822 { if x <= 131072 { return 65229; } if x <= 137625 { return 65340; } if x <= 144179 { return 65413; } if x <= 150732 { return 65461; } if x <= 157286 { return 65490; } if x <= 163840 { return 65509; } if x <= 170393 { return 65520; } if x <= 176947 { return 65527; } if x <= 183500 { return 65531; } if x <= 190054 { return 65533; } if x <= 196608 { return 65534; } if x <= 203161 { return 65535; } if x <= 209715 { return 65535; } if x <= 216268 { return 65535; } if x <= 222822 { return 65535; } } ONE }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo
use core::integer; use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ HALF, ONE, TWO, FP16x16W, FP16x16WImpl, FP16x16WAdd, FP16x16WSub, FP16x16WMul, FP16x16WDiv, FP16x16WIntoFelt252, FixedTrait }; // CONSTANTS const TWO_PI: u64 = 411775; const PI: u64 = 205887; const HALF_PI: u64 = 102944; // PUBLIC // Calculates arccos(a) for -1 <= a <= 1 (fixed point) // arccos(a) = arcsin(sqrt(1 - a^2)) - arctan identity has discontinuity at zero fn acos(a: FP16x16W) -> FP16x16W { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin(asin_arg); if a.sign { FixedTrait::new(PI, false) - asin_res } else { asin_res } } fn acos_fast(a: FP16x16W) -> FP16x16W { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin_fast(asin_arg); if a.sign { FixedTrait::new(PI, false) - asin_res } else { asin_res } } // Calculates arcsin(a) for -1 <= a <= 1 (fixed point) // arcsin(a) = arctan(a / sqrt(1 - a^2)) fn asin(a: FP16x16W) -> FP16x16W { if (a.mag == ONE) { return FixedTrait::new(HALF_PI, a.sign); } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 atan(a / div) } fn asin_fast(a: FP16x16W) -> FP16x16W { if (a.mag == ONE) { return FixedTrait::new(HALF_PI, a.sign); } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 atan_fast(a / div) } // Calculates arctan(a) (fixed point) // See https://stackoverflow.com/a/50894477 for range adjustments fn atan(a: FP16x16W) -> FP16x16W { let mut at = a.abs(); let mut shift = false; let mut invert = false; // Invert value when a > 1 if (at.mag > ONE) { at = FixedTrait::ONE() / at; invert = true; } // Account for lack of precision in polynomaial when a > 0.7 if (at.mag > 45875) { let sqrt3_3 = FixedTrait::new(37837, false); // sqrt(3) / 3 at = (at - sqrt3_3) / (FixedTrait::ONE() + at * sqrt3_3); shift = true; } let r10 = FixedTrait::new(120, true) * at; let r9 = (r10 + FixedTrait::new(3066, true)) * at; let r8 = (r9 + FixedTrait::new(12727, false)) * at; let r7 = (r8 + FixedTrait::new(17170, true)) * at; let r6 = (r7 + FixedTrait::new(2865, false)) * at; let r5 = (r6 + FixedTrait::new(12456, false)) * at; let r4 = (r5 + FixedTrait::new(90, false)) * at; let r3 = (r4 + FixedTrait::new(21852, true)) * at; let r2 = r3 * at; let mut res = (r2 + FixedTrait::new(65536, false)) * at; // Adjust for sign change, inversion, and shift if (shift) { res = res + FixedTrait::new(34315, false); // pi / 6 } if (invert) { res = res - FixedTrait::new(HALF_PI, false); } FixedTrait::new(res.mag, a.sign) } fn atan_fast(a: FP16x16W) -> FP16x16W { let mut at = a.abs(); let mut shift = false; let mut invert = false; // Invert value when a > 1 if (at.mag > ONE) { at = FixedTrait::ONE() / at; invert = true; } // Account for lack of precision in polynomaial when a > 0.7 if (at.mag > 45875) { let sqrt3_3 = FixedTrait::new(37837, false); // sqrt(3) / 3 at = (at - sqrt3_3) / (FixedTrait::ONE() + at * sqrt3_3); shift = true; } let (start, low, high) = lut::atan(at.mag); let partial_step = FixedTrait::new(at.mag - start, false) / FixedTrait::new(459, false); let mut res = partial_step * FixedTrait::new(high - low, false) + FixedTrait::new(low, false); // Adjust for sign change, inversion, and shift if (shift) { res = res + FixedTrait::new(34315, false); // pi / 6 } if (invert) { res = res - FixedTrait::<FP16x16W>::new(HALF_PI, false); } FixedTrait::new(res.mag, a.sign) } // Calculates cos(a) with a in radians (fixed point) fn cos(a: FP16x16W) -> FP16x16W { sin(FixedTrait::new(HALF_PI, false) - a) } fn cos_fast(a: FP16x16W) -> FP16x16W { sin_fast(FixedTrait::new(HALF_PI, false) - a) } fn sin(a: FP16x16W) -> FP16x16W { let a1 = a.mag % TWO_PI; let (whole_rem, partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI)); let a2 = FixedTrait::new(partial_rem, false); let partial_sign = whole_rem == 1; let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE()); FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0) } fn sin_fast(a: FP16x16W) -> FP16x16W { let a1 = a.mag % TWO_PI; let (whole_rem, mut partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI)); let partial_sign = whole_rem == 1; if partial_rem >= HALF_PI { partial_rem = PI - partial_rem; } let (start, low, high) = lut::sin(partial_rem); let partial_step = FixedTrait::new(partial_rem - start, false) / FixedTrait::new(402, false); let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false)) + FixedTrait::<FP16x16W>::new(low, false); FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0) } // Calculates tan(a) with a in radians (fixed point) fn tan(a: FP16x16W) -> FP16x16W { let sinx = sin(a); let cosx = cos(a); assert(cosx.mag != 0, 'tan undefined'); sinx / cosx } fn tan_fast(a: FP16x16W) -> FP16x16W { let sinx = sin_fast(a); let cosx = cos_fast(a); assert(cosx.mag != 0, 'tan undefined'); sinx / cosx } // Helper function to calculate Taylor series for sin fn _sin_loop(a: FP16x16W, i: u64, acc: FP16x16W) -> FP16x16W { let div = (2 * i + 2) * (2 * i + 3); let term = a * a * acc / FixedTrait::new_unscaled(div, false); let new_acc = FixedTrait::ONE() - term; if (i == 0) { return new_acc; } _sin_loop(a, i - 1, new_acc) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use orion::numbers::fixed_point::implementations::fp16x16wide::helpers::{ assert_precise, assert_relative }; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ FP16x16WPartialEq, FP16x16WPrint }; use super::{ FixedTrait, acos, HALF_PI, ONE, acos_fast, PI, atan_fast, atan, asin, cos, cos_fast, sin, sin_fast, tan }; #[test] #[available_gas(8000000)] fn test_acos() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::ONE(); assert(acos(a).into() == 0, 'invalid one'); let a = FixedTrait::new(ONE / 2, false); assert_relative(acos(a), 68629, 'invalid half', error); // 1.3687308642680 let a = FixedTrait::ZERO(); assert_relative(acos(a), HALF_PI.into(), 'invalid zero', Option::None(())); // PI / 2 let a = FixedTrait::new(ONE / 2, true); assert_relative(acos(a), 137258, 'invalid neg half', error); // 2.737461741902 let a = FixedTrait::new(ONE, true); assert_relative(acos(a), PI.into(), 'invalid neg one', Option::None(())); // PI } #[test] #[available_gas(8000000)] fn test_acos_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::ONE(); assert(acos_fast(a).into() == 0, 'invalid one'); let a = FixedTrait::new(ONE / 2, false); assert_relative(acos_fast(a), 68629, 'invalid half', error); // 1.3687308642680 let a = FixedTrait::ZERO(); assert_relative(acos_fast(a), HALF_PI.into(), 'invalid zero', Option::None(())); // PI / 2 let a = FixedTrait::new(ONE / 2, true); assert_relative(acos_fast(a), 137258, 'invalid neg half', error); // 2.737461741902 let a = FixedTrait::new(ONE, true); assert_relative(acos_fast(a), PI.into(), 'invalid neg one', Option::None(())); // PI } #[test] #[should_panic] #[available_gas(8000000)] fn test_acos_fail() { let a = FixedTrait::new(2 * ONE, true); acos(a); } #[test] #[available_gas(8000000)] fn test_atan_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::new(2 * ONE, false); assert_relative(atan_fast(a), 72558, 'invalid two', error); let a = FixedTrait::ONE(); assert_relative(atan_fast(a), 51472, 'invalid one', error); let a = FixedTrait::new(ONE / 2, false); assert_relative(atan_fast(a), 30386, 'invalid half', error); let a = FixedTrait::ZERO(); assert(atan_fast(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE / 2, true); assert_relative(atan_fast(a), -30386, 'invalid neg half', error); let a = FixedTrait::new(ONE, true); assert_relative(atan_fast(a), -51472, 'invalid neg one', error); let a = FixedTrait::new(2 * ONE, true); assert_relative(atan_fast(a), -72558, 'invalid neg two', error); } #[test] #[available_gas(8000000)] fn test_atan() { let a = FixedTrait::new(2 * ONE, false); assert_relative(atan(a), 72558, 'invalid two', Option::None(())); let a = FixedTrait::ONE(); assert_relative(atan(a), 51472, 'invalid one', Option::None(())); let a = FixedTrait::new(ONE / 2, false); assert_relative(atan(a), 30386, 'invalid half', Option::None(())); let a = FixedTrait::ZERO(); assert(atan(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE / 2, true); assert_relative(atan(a), -30386, 'invalid neg half', Option::None(())); let a = FixedTrait::new(ONE, true); assert_relative(atan(a), -51472, 'invalid neg one', Option::None(())); let a = FixedTrait::new(2 * ONE, true); assert_relative(atan(a), -72558, 'invalid neg two', Option::None(())); } #[test] #[available_gas(8000000)] fn test_asin() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::ONE(); assert_relative(asin(a), HALF_PI.into(), 'invalid one', Option::None(())); // PI / 2 let a = FixedTrait::new(ONE / 2, false); assert_relative(asin(a), 34315, 'invalid half', error); let a = FixedTrait::ZERO(); assert_precise(asin(a), 0, 'invalid zero', Option::None(())); let a = FixedTrait::new(ONE / 2, true); assert_relative(asin(a), -34315, 'invalid neg half', error); let a = FixedTrait::new(ONE, true); assert_relative(asin(a), -HALF_PI.into(), 'invalid neg one', Option::None(())); // -PI / 2 } #[test] #[should_panic] #[available_gas(8000000)] fn test_asin_fail() { let a = FixedTrait::new(2 * ONE, false); asin(a); } #[test] #[available_gas(8000000)] fn test_cos() { let a = FixedTrait::new(HALF_PI, false); assert(cos(a).into() == 0, 'invalid half pi'); let a = FixedTrait::new(HALF_PI / 2, false); assert_relative(cos(a), 46341, 'invalid quarter pi', Option::None(())); // 0.55242717280199 let a = FixedTrait::new(PI, false); assert_relative(cos(a), -1 * ONE.into(), 'invalid pi', Option::None(())); let a = FixedTrait::new(HALF_PI, true); assert_precise(cos(a), 0, 'invalid neg half pi', Option::None(())); let a = FixedTrait::new_unscaled(17, false); assert_relative(cos(a), -18033, 'invalid 17', Option::None(())); // -0.21497123284870 let a = FixedTrait::new_unscaled(17, true); assert_relative(cos(a), -18033, 'invalid -17', Option::None(())); // -0.21497123284870 } #[test] #[available_gas(8000000)] fn test_cos_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::new(HALF_PI, false); assert(cos_fast(a).into() == 0, 'invalid half pi'); let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(cos_fast(a), 46341, 'invalid quarter pi', error); // 0.55242717280199 let a = FixedTrait::new(PI, false); assert_precise(cos_fast(a), -1 * ONE.into(), 'invalid pi', error); let a = FixedTrait::new(HALF_PI, true); assert_precise(cos(a), 0, 'invalid neg half pi', Option::None(())); let a = FixedTrait::new_unscaled(17, false); assert_precise(cos_fast(a), -18033, 'invalid 17', error); // -0.21497123284870 } #[test] #[available_gas(8000000)] fn test_sin() { let a = FixedTrait::new(HALF_PI, false); assert_precise(sin(a), ONE.into(), 'invalid half pi', Option::None(())); let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(sin(a), 46341, 'invalid quarter pi', Option::None(())); // 0.55242717280199 let a = FixedTrait::new(PI, false); assert(sin(a).into() == 0, 'invalid pi'); let a = FixedTrait::new(HALF_PI, true); assert_precise( sin(a), -ONE.into(), 'invalid neg half pi', Option::None(()) ); // 0.78124999999529 let a = FixedTrait::new_unscaled(17, false); assert_precise(sin(a), -63006, 'invalid 17', Option::None(())); // -0.75109179053073 let a = FixedTrait::new_unscaled(17, true); assert_precise(sin(a), 63006, 'invalid -17', Option::None(())); // 0.75109179053073 } #[test] #[available_gas(8000000)] fn test_sin_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::new(HALF_PI, false); assert_precise(sin_fast(a), ONE.into(), 'invalid half pi', error); let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(sin_fast(a), 46341, 'invalid quarter pi', error); // 0.55242717280199 let a = FixedTrait::new(PI, false); assert(sin_fast(a).into() == 0, 'invalid pi'); let a = FixedTrait::new(HALF_PI, true); assert_precise(sin_fast(a), -ONE.into(), 'invalid neg half pi', error); // 0.78124999999529 let a = FixedTrait::new_unscaled(17, false); assert_precise(sin_fast(a), -63006, 'invalid 17', error); // -0.75109179053073 let a = FixedTrait::new_unscaled(17, true); assert_precise(sin_fast(a), 63006, 'invalid -17', error); // 0.75109179053073 } #[test] #[available_gas(8000000)] fn test_tan() { let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(tan(a), ONE.into(), 'invalid quarter pi', Option::None(())); let a = FixedTrait::new(PI, false); assert_precise(tan(a), 0, 'invalid pi', Option::None(())); let a = FixedTrait::new_unscaled(17, false); assert_precise(tan(a), 228990, 'invalid 17', Option::None(())); // 3.3858731852805 let a = FixedTrait::new_unscaled(17, true); assert_precise(tan(a), -228952, 'invalid -17', Option::None(())); // -3.3858731852805 } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp32x32.cairo
mod core; mod comp; mod erf; mod lut;
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp32x32/comp.cairo
use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; fn xor(a: FP32x32, b: FP32x32) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { true } else { false } } fn or(a: FP32x32, b: FP32x32) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { false } else { true } } fn and(a: FP32x32, b: FP32x32) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { false } else { true } } fn where(a: FP32x32, b: FP32x32, c: FP32x32) -> FP32x32 { if a == FixedTrait::new(0, false) { c } else { b } } fn bitwise_and(a: FP32x32, b: FP32x32) -> FP32x32 { FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP32x32, b: FP32x32) -> FP32x32 { FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP32x32, b: FP32x32) -> FP32x32 { FixedTrait::new(a.mag | b.mag, a.sign | b.sign) }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp32x32/core.cairo
use core::debug::PrintTrait; use cubit::f64 as fp32x32; use cubit::f64::Fixed as FP32x32; use cubit::f64::{ONE, HALF}; use cubit::f64::types::fixed; use orion::numbers::fixed_point::core::{FixedTrait}; use orion::numbers::fixed_point::implementations::fp32x32::erf; use orion::numbers::fixed_point::utils; const MAX: u64 = 9223372036854775808; impl FP32x32Impl of FixedTrait<FP32x32, u64> { fn ZERO() -> FP32x32 { FP32x32 { mag: 0, sign: false } } fn HALF() -> FP32x32 { FP32x32 { mag: HALF, sign: false } } fn ONE() -> FP32x32 { FP32x32 { mag: ONE, sign: false } } fn MAX() -> FP32x32 { FP32x32 { mag: MAX, sign: false } } fn new(mag: u64, sign: bool) -> FP32x32 { FP32x32 { mag: mag, sign: sign } } fn new_unscaled(mag: u64, sign: bool) -> FP32x32 { FP32x32 { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP32x32 { let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap(); FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP32x32) -> FP32x32 { fp32x32::ops::abs(self) } fn acos(self: FP32x32) -> FP32x32 { fp32x32::trig::acos_fast(self) } fn acos_fast(self: FP32x32) -> FP32x32 { fp32x32::trig::acos_fast(self) } fn acosh(self: FP32x32) -> FP32x32 { fp32x32::hyp::acosh(self) } fn asin(self: FP32x32) -> FP32x32 { fp32x32::trig::asin_fast(self) } fn asin_fast(self: FP32x32) -> FP32x32 { fp32x32::trig::asin_fast(self) } fn asinh(self: FP32x32) -> FP32x32 { fp32x32::hyp::asinh(self) } fn atan(self: FP32x32) -> FP32x32 { fp32x32::trig::atan_fast(self) } fn atan_fast(self: FP32x32) -> FP32x32 { fp32x32::trig::atan_fast(self) } fn atanh(self: FP32x32) -> FP32x32 { fp32x32::hyp::atanh(self) } fn ceil(self: FP32x32) -> FP32x32 { fp32x32::ops::ceil(self) } fn cos(self: FP32x32) -> FP32x32 { fp32x32::trig::cos_fast(self) } fn cos_fast(self: FP32x32) -> FP32x32 { fp32x32::trig::cos_fast(self) } fn cosh(self: FP32x32) -> FP32x32 { fp32x32::hyp::cosh(self) } fn floor(self: FP32x32) -> FP32x32 { fp32x32::ops::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP32x32) -> FP32x32 { fp32x32::ops::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP32x32) -> FP32x32 { fp32x32::ops::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP32x32) -> FP32x32 { fp32x32::ops::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP32x32) -> FP32x32 { fp32x32::ops::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP32x32) -> FP32x32 { fp32x32::ops::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP32x32, b: FP32x32) -> FP32x32 { fp32x32::ops::pow(self, b) } fn round(self: FP32x32) -> FP32x32 { fp32x32::ops::round(self) } fn sin(self: FP32x32) -> FP32x32 { fp32x32::trig::sin_fast(self) } fn sin_fast(self: FP32x32) -> FP32x32 { fp32x32::trig::sin_fast(self) } fn sinh(self: FP32x32) -> FP32x32 { fp32x32::hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP32x32) -> FP32x32 { fp32x32::ops::sqrt(self) } fn tan(self: FP32x32) -> FP32x32 { fp32x32::trig::tan_fast(self) } fn tan_fast(self: FP32x32) -> FP32x32 { fp32x32::trig::tan_fast(self) } fn tanh(self: FP32x32) -> FP32x32 { fp32x32::hyp::tanh(self) } fn sign(self: FP32x32) -> FP32x32 { panic(array!['not supported!']) } fn NaN() -> FP32x32 { FP32x32 { mag: 0, sign: true } } fn is_nan(self: FP32x32) -> bool { self == FP32x32 { mag: 0, sign: true } } fn INF() -> FP32x32 { FP32x32 { mag: 4294967295, sign: false } } fn POS_INF() -> FP32x32 { FP32x32 { mag: 4294967295, sign: false } } fn NEG_INF() -> FP32x32 { FP32x32 { mag: 4294967295, sign: true } } fn is_inf(self: FP32x32) -> bool { self.mag == 4294967295 } fn is_pos_inf(self: FP32x32) -> bool { self.is_inf() && !self.sign } fn is_neg_inf(self: FP32x32) -> bool { self.is_inf() && self.sign } fn erf(self: FP32x32) -> FP32x32 { erf::erf(self) } } impl FP32x32Print of PrintTrait<FP32x32> { fn print(self: FP32x32) { self.sign.print(); self.mag.print(); } } // Into a raw felt without unscaling impl FP32x32IntoFelt252 of Into<FP32x32, felt252> { fn into(self: FP32x32) -> felt252 { let mag_felt = self.mag.into(); if self.sign { mag_felt * -1 } else { mag_felt * 1 } } } impl FP32x32TryIntoU64 of TryInto<FP32x32, u64> { fn try_into(self: FP32x32) -> Option<u64> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down Option::Some((self.mag / ONE).into()) } } } impl FP32x32TryIntoU16 of TryInto<FP32x32, u16> { fn try_into(self: FP32x32) -> Option<u16> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP32x32TryIntoU32 of TryInto<FP32x32, u32> { fn try_into(self: FP32x32) -> Option<u32> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP32x32TryIntoU8 of TryInto<FP32x32, u8> { fn try_into(self: FP32x32) -> Option<u8> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP32x32TryIntoI8 of TryInto<FP32x32, i8> { fn try_into(self: FP32x32) -> Option<i8> { _i8_try_from_fp(self) } } // impl FP32x32PartialEq of PartialEq<FP32x32> { // #[inline(always)] // fn eq(lhs: @FP32x32, rhs: @FP32x32) -> bool { // return fp32x32::ops::eq(lhs, rhs); // } // #[inline(always)] // fn ne(lhs: @FP32x32, rhs: @FP32x32) -> bool { // return fp32x32::ops::ne(lhs, rhs); // } // } impl FP32x32Add of Add<FP32x32> { fn add(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { fp32x32::ops::add(lhs, rhs) } } impl FP32x32AddEq of AddEq<FP32x32> { #[inline(always)] fn add_eq(ref self: FP32x32, other: FP32x32) { self = fp32x32::ops::add(self, other); } } impl FP32x32Sub of Sub<FP32x32> { fn sub(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { fp32x32::ops::sub(lhs, rhs) } } impl FP32x32SubEq of SubEq<FP32x32> { #[inline(always)] fn sub_eq(ref self: FP32x32, other: FP32x32) { self = fp32x32::ops::sub(self, other); } } impl FP32x32Mul of Mul<FP32x32> { fn mul(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { fp32x32::ops::mul(lhs, rhs) } } impl FP32x32MulEq of MulEq<FP32x32> { #[inline(always)] fn mul_eq(ref self: FP32x32, other: FP32x32) { self = fp32x32::ops::mul(self, other); } } impl FP32x32Div of Div<FP32x32> { fn div(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { fp32x32::ops::div(lhs, rhs) } } impl FP32x32DivEq of DivEq<FP32x32> { #[inline(always)] fn div_eq(ref self: FP32x32, other: FP32x32) { self = fp32x32::ops::div(self, other); } } impl FP32x32PartialOrd of PartialOrd<FP32x32> { #[inline(always)] fn ge(lhs: FP32x32, rhs: FP32x32) -> bool { fp32x32::ops::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP32x32, rhs: FP32x32) -> bool { fp32x32::ops::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP32x32, rhs: FP32x32) -> bool { fp32x32::ops::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP32x32, rhs: FP32x32) -> bool { fp32x32::ops::lt(lhs, rhs) } } impl FP32x32Neg of Neg<FP32x32> { #[inline(always)] fn neg(a: FP32x32) -> FP32x32 { fp32x32::ops::neg(a) } } impl FP32x32Rem of Rem<FP32x32> { #[inline(always)] fn rem(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { fp32x32::ops::rem(lhs, rhs) } } fn eq(a: @FP32x32, b: @FP32x32) -> bool { (*a.mag == *b.mag) && (*a.sign == *b.sign) } /// INTERNAL fn _i8_try_from_fp(x: FP32x32) -> Option<i8> { let unscaled_mag: Option<u8> = (x.mag / ONE).try_into(); match unscaled_mag { Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); if x.sign { return Option::Some(number_i8 * -1_i8); } Option::Some(number_i8) }, Option::None => Option::None(()) } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp32x32/erf.cairo
use cubit::f64::ONE; use orion::numbers::{FP32x32, FixedTrait}; use orion::numbers::fixed_point::implementations::fp32x32::lut::erf_lut; const ERF_COMPUTATIONAL_ACCURACY: u64 = 100; const ROUND_CHECK_NUMBER: u64 = 10; // Values > MAX_ERF_NUMBER return 1 const MAX_ERF_NUMBER: u64 = 15032385536; // Values <= ERF_TRUNCATION_NUMBER -> two decimal places, and values > ERF_TRUNCATION_NUMBER -> one decimal place const ERF_TRUNCATION_NUMBER: u64 = 8589934592; fn erf(x: FP32x32) -> FP32x32 { // Lookup // 1. if x.mag < 3.5 { lookup table } // 2. else{ return 1} let mut erf_value: u64 = 0_u64; if x.mag < MAX_ERF_NUMBER { erf_value = erf_lut(x.mag); } else { erf_value = ONE; } FP32x32 { mag: erf_value, sign: x.sign } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp32x32/lut.cairo
use orion::numbers::fixed_point::implementations::fp32x32::core::ONE; fn erf_lut(x: u64) -> u64 { // Construct the erf lookup table if x <= 386547056 { if x <= 0 { return 0; } if x <= 42949672 { return 48461900; } if x <= 85899345 { return 96914110; } if x <= 128849018 { return 145346943; } if x <= 171798691 { return 193750725; } if x <= 214748364 { return 242115801; } if x <= 257698037 { return 290432536; } if x <= 300647710 { return 338691327; } if x <= 343597383 { return 386882604; } if x <= 386547056 { return 434996838; } } if x <= 816043786 { if x <= 429496729 { return 483024546; } if x <= 472446402 { return 530956296; } if x <= 515396075 { return 578782713; } if x <= 558345748 { return 626494487; } if x <= 601295421 { return 674082374; } if x <= 644245094 { return 721537203; } if x <= 687194767 { return 768849883; } if x <= 730144440 { return 816011407; } if x <= 773094113 { return 863012857; } if x <= 816043786 { return 909845408; } } if x <= 1245540515 { if x <= 858993459 { return 956500337; } if x <= 901943132 { return 1002969022; } if x <= 944892805 { return 1049242950; } if x <= 987842478 { return 1095313724; } if x <= 1030792151 { return 1141173063; } if x <= 1073741824 { return 1186812808; } if x <= 1116691496 { return 1232224928; } if x <= 1159641169 { return 1277401521; } if x <= 1202590842 { return 1322334823; } if x <= 1245540515 { return 1367017205; } } if x <= 1675037245 { if x <= 1288490188 { return 1411441184; } if x <= 1331439861 { return 1455599421; } if x <= 1374389534 { return 1499484729; } if x <= 1417339207 { return 1543090073; } if x <= 1460288880 { return 1586408573; } if x <= 1503238553 { return 1629433512; } if x <= 1546188226 { return 1672158333; } if x <= 1589137899 { return 1714576645; } if x <= 1632087572 { return 1756682226; } if x <= 1675037245 { return 1798469022; } } if x <= 2104533975 { if x <= 1717986918 { return 1839931154; } if x <= 1760936591 { return 1881062918; } if x <= 1803886264 { return 1921858787; } if x <= 1846835937 { return 1962313411; } if x <= 1889785610 { return 2002421622; } if x <= 1932735283 { return 2042178436; } if x <= 1975684956 { return 2081579049; } if x <= 2018634629 { return 2120618846; } if x <= 2061584302 { return 2159293393; } if x <= 2104533975 { return 2197598448; } } if x <= 2534030704 { if x <= 2147483648 { return 2235529952; } if x <= 2190433320 { return 2273084038; } if x <= 2233382993 { return 2310257026; } if x <= 2276332666 { return 2347045424; } if x <= 2319282339 { return 2383445931; } if x <= 2362232012 { return 2419455435; } if x <= 2405181685 { return 2455071011; } if x <= 2448131358 { return 2490289925; } if x <= 2491081031 { return 2525109629; } if x <= 2534030704 { return 2559527765; } } if x <= 2963527434 { if x <= 2576980377 { return 2593542161; } if x <= 2619930050 { return 2627150830; } if x <= 2662879723 { return 2660351971; } if x <= 2705829396 { return 2693143967; } if x <= 2748779069 { return 2725525382; } if x <= 2791728742 { return 2757494964; } if x <= 2834678415 { return 2789051637; } if x <= 2877628088 { return 2820194507; } if x <= 2920577761 { return 2850922852; } if x <= 2963527434 { return 2881236128; } } if x <= 3393024163 { if x <= 3006477107 { return 2911133960; } if x <= 3049426780 { return 2940616146; } if x <= 3092376453 { return 2969682651; } if x <= 3135326126 { return 2998333604; } if x <= 3178275799 { return 3026569298; } if x <= 3221225472 { return 3054390188; } if x <= 3264175144 { return 3081796886; } if x <= 3307124817 { return 3108790160; } if x <= 3350074490 { return 3135370928; } if x <= 3393024163 { return 3161540260; } } if x <= 3822520893 { if x <= 3435973836 { return 3187299373; } if x <= 3478923509 { return 3212649627; } if x <= 3521873182 { return 3237592522; } if x <= 3564822855 { return 3262129696; } if x <= 3607772528 { return 3286262922; } if x <= 3650722201 { return 3309994103; } if x <= 3693671874 { return 3333325270; } if x <= 3736621547 { return 3356258580; } if x <= 3779571220 { return 3378796308; } if x <= 3822520893 { return 3400940848; } } if x <= 4252017623 { if x <= 3865470566 { return 3422694710; } if x <= 3908420239 { return 3444060511; } if x <= 3951369912 { return 3465040979; } if x <= 3994319585 { return 3485638942; } if x <= 4037269258 { return 3505857331; } if x <= 4080218931 { return 3525699170; } if x <= 4123168604 { return 3545167580; } if x <= 4166118277 { return 3564265768; } if x <= 4209067950 { return 3582997028; } if x <= 4252017623 { return 3601364736; } } if x <= 4681514352 { if x <= 4294967296 { return 3619372346; } if x <= 4337916968 { return 3637023387; } if x <= 4380866641 { return 3654321460; } if x <= 4423816314 { return 3671270233; } if x <= 4466765987 { return 3687873439; } if x <= 4509715660 { return 3704134870; } if x <= 4552665333 { return 3720058378; } if x <= 4595615006 { return 3735647866; } if x <= 4638564679 { return 3750907289; } if x <= 4681514352 { return 3765840647; } } if x <= 5111011082 { if x <= 4724464025 { return 3780451987; } if x <= 4767413698 { return 3794745393; } if x <= 4810363371 { return 3808724986; } if x <= 4853313044 { return 3822394923; } if x <= 4896262717 { return 3835759389; } if x <= 4939212390 { return 3848822598; } if x <= 4982162063 { return 3861588787; } if x <= 5025111736 { return 3874062214; } if x <= 5068061409 { return 3886247156; } if x <= 5111011082 { return 3898147905; } } if x <= 5540507811 { if x <= 5153960755 { return 3909768765; } if x <= 5196910428 { return 3921114049; } if x <= 5239860101 { return 3932188077; } if x <= 5282809774 { return 3942995173; } if x <= 5325759447 { return 3953539662; } if x <= 5368709120 { return 3963825868; } if x <= 5411658792 { return 3973858111; } if x <= 5454608465 { return 3983640704; } if x <= 5497558138 { return 3993177952; } if x <= 5540507811 { return 4002474150; } } if x <= 5970004541 { if x <= 5583457484 { return 4011533577; } if x <= 5626407157 { return 4020360499; } if x <= 5669356830 { return 4028959162; } if x <= 5712306503 { return 4037333795; } if x <= 5755256176 { return 4045488602; } if x <= 5798205849 { return 4053427767; } if x <= 5841155522 { return 4061155446; } if x <= 5884105195 { return 4068675768; } if x <= 5927054868 { return 4075992834; } if x <= 5970004541 { return 4083110714; } } if x <= 6399501271 { if x <= 6012954214 { return 4090033445; } if x <= 6055903887 { return 4096765032; } if x <= 6098853560 { return 4103309442; } if x <= 6141803233 { return 4109670609; } if x <= 6184752906 { return 4115852426; } if x <= 6227702579 { return 4121858749; } if x <= 6270652252 { return 4127693393; } if x <= 6313601925 { return 4133360131; } if x <= 6356551598 { return 4138862695; } if x <= 6399501271 { return 4144204773; } } if x <= 6828998000 { if x <= 6442450944 { return 4149390008; } if x <= 6485400616 { return 4154421999; } if x <= 6528350289 { return 4159304298; } if x <= 6571299962 { return 4164040410; } if x <= 6614249635 { return 4168633795; } if x <= 6657199308 { return 4173087863; } if x <= 6700148981 { return 4177405975; } if x <= 6743098654 { return 4181591444; } if x <= 6786048327 { return 4185647533; } if x <= 6828998000 { return 4189577456; } } if x <= 7258494730 { if x <= 6871947673 { return 4193384375; } if x <= 6914897346 { return 4197071404; } if x <= 6957847019 { return 4200641603; } if x <= 7000796692 { return 4204097984; } if x <= 7043746365 { return 4207443505; } if x <= 7086696038 { return 4210681075; } if x <= 7129645711 { return 4213813550; } if x <= 7172595384 { return 4216843737; } if x <= 7215545057 { return 4219774388; } if x <= 7258494730 { return 4222608207; } } if x <= 7687991459 { if x <= 7301444403 { return 4225347845; } if x <= 7344394076 { return 4227995903; } if x <= 7387343749 { return 4230554929; } if x <= 7430293422 { return 4233027424; } if x <= 7473243095 { return 4235415834; } if x <= 7516192768 { return 4237722559; } if x <= 7559142440 { return 4239949947; } if x <= 7602092113 { return 4242100295; } if x <= 7645041786 { return 4244175854; } if x <= 7687991459 { return 4246178824; } } if x <= 8117488189 { if x <= 7730941132 { return 4248111357; } if x <= 7773890805 { return 4249975557; } if x <= 7816840478 { return 4251773482; } if x <= 7859790151 { return 4253507139; } if x <= 7902739824 { return 4255178493; } if x <= 7945689497 { return 4256789460; } if x <= 7988639170 { return 4258341912; } if x <= 8031588843 { return 4259837674; } if x <= 8074538516 { return 4261278529; } if x <= 8117488189 { return 4262666214; } } if x <= 8546984919 { if x <= 8160437862 { return 4264002425; } if x <= 8203387535 { return 4265288813; } if x <= 8246337208 { return 4266526989; } if x <= 8289286881 { return 4267718520; } if x <= 8332236554 { return 4268864936; } if x <= 8375186227 { return 4269967724; } if x <= 8418135900 { return 4271028331; } if x <= 8461085573 { return 4272048167; } if x <= 8504035246 { return 4273028604; } if x <= 8546984919 { return 4273970975; } } if x <= 14602888806 { if x <= 8589934592 { return 4274876577; } if x <= 9019431321 { return 4282170584; } if x <= 9448928051 { return 4286966432; } if x <= 9878424780 { return 4290057389; } if x <= 10307921510 { return 4292010151; } if x <= 10737418240 { return 4293219450; } if x <= 11166914969 { return 4293953535; } if x <= 11596411699 { return 4294390341; } if x <= 12025908428 { return 4294645116; } if x <= 12455405158 { return 4294790781; } if x <= 12884901888 { return 4294872418; } if x <= 13314398617 { return 4294917265; } if x <= 13743895347 { return 4294941415; } if x <= 14173392076 { return 4294954163; } if x <= 14602888806 { return 4294960759; } } ONE }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp64x64.cairo
mod core; mod comp; mod erf; mod lut;
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp64x64/comp.cairo
use orion::numbers::{FP64x64, FixedTrait}; use orion::numbers::FP64x64Impl; fn xor(a: FP64x64, b: FP64x64) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { true } else { false } } fn or(a: FP64x64, b: FP64x64) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { false } else { true } } fn and(a: FP64x64, b: FP64x64) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { false } else { true } } fn where(a: FP64x64, b: FP64x64, c: FP64x64) -> FP64x64 { if a == FixedTrait::new(0, false) { c } else { b } } fn bitwise_and(a: FP64x64, b: FP64x64) -> FP64x64 { FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP64x64, b: FP64x64) -> FP64x64 { FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP64x64, b: FP64x64) -> FP64x64 { FixedTrait::new(a.mag | b.mag, a.sign | b.sign) }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp64x64/core.cairo
use core::debug::PrintTrait; use cubit::f128 as fp64x64; use cubit::f128::types::Fixed as FP64x64; use cubit::f128::ONE_u128 as ONE; use cubit::f128::ops::MAX_u128 as MAX; use orion::numbers::fixed_point::implementations::fp64x64::erf; use orion::numbers::fixed_point::core::{FixedTrait}; use orion::numbers::fixed_point::utils; const HALF: u128 = 9223372036854775808_u128; // 2 ** 63 impl FP64x64Impl of FixedTrait<FP64x64, u128> { fn ZERO() -> FP64x64 { FP64x64 { mag: 0, sign: false } } fn HALF() -> FP64x64 { FP64x64 { mag: HALF, sign: false } } fn ONE() -> FP64x64 { FP64x64 { mag: ONE, sign: false } } fn MAX() -> FP64x64 { FP64x64 { mag: MAX, sign: false } } fn new(mag: u128, sign: bool) -> FP64x64 { FP64x64 { mag: mag, sign: sign } } fn new_unscaled(mag: u128, sign: bool) -> FP64x64 { FP64x64 { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP64x64 { let mag = core::integer::u128_try_from_felt252(utils::felt_abs(val)).unwrap(); FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP64x64) -> FP64x64 { fp64x64::ops::abs(self) } fn acos(self: FP64x64) -> FP64x64 { fp64x64::trig::acos_fast(self) } fn acos_fast(self: FP64x64) -> FP64x64 { fp64x64::trig::acos_fast(self) } fn acosh(self: FP64x64) -> FP64x64 { fp64x64::hyp::acosh(self) } fn asin(self: FP64x64) -> FP64x64 { fp64x64::trig::asin_fast(self) } fn asin_fast(self: FP64x64) -> FP64x64 { fp64x64::trig::asin_fast(self) } fn asinh(self: FP64x64) -> FP64x64 { fp64x64::hyp::asinh(self) } fn atan(self: FP64x64) -> FP64x64 { fp64x64::trig::atan_fast(self) } fn atan_fast(self: FP64x64) -> FP64x64 { fp64x64::trig::atan_fast(self) } fn atanh(self: FP64x64) -> FP64x64 { fp64x64::hyp::atanh(self) } fn ceil(self: FP64x64) -> FP64x64 { fp64x64::ops::ceil(self) } fn cos(self: FP64x64) -> FP64x64 { fp64x64::trig::cos_fast(self) } fn cos_fast(self: FP64x64) -> FP64x64 { fp64x64::trig::cos_fast(self) } fn cosh(self: FP64x64) -> FP64x64 { fp64x64::hyp::cosh(self) } fn floor(self: FP64x64) -> FP64x64 { fp64x64::ops::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP64x64) -> FP64x64 { fp64x64::ops::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP64x64) -> FP64x64 { fp64x64::ops::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP64x64) -> FP64x64 { fp64x64::ops::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP64x64) -> FP64x64 { fp64x64::ops::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP64x64) -> FP64x64 { fp64x64::ops::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP64x64, b: FP64x64) -> FP64x64 { fp64x64::ops::pow(self, b) } fn round(self: FP64x64) -> FP64x64 { fp64x64::ops::round(self) } fn sin(self: FP64x64) -> FP64x64 { fp64x64::trig::sin_fast(self) } fn sin_fast(self: FP64x64) -> FP64x64 { fp64x64::trig::sin_fast(self) } fn sinh(self: FP64x64) -> FP64x64 { fp64x64::hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP64x64) -> FP64x64 { fp64x64::ops::sqrt(self) } fn tan(self: FP64x64) -> FP64x64 { fp64x64::trig::tan_fast(self) } fn tan_fast(self: FP64x64) -> FP64x64 { fp64x64::trig::tan_fast(self) } fn tanh(self: FP64x64) -> FP64x64 { fp64x64::hyp::tanh(self) } fn sign(self: FP64x64) -> FP64x64 { panic(array!['not supported!']) } fn NaN() -> FP64x64 { FP64x64 { mag: 0, sign: true } } fn is_nan(self: FP64x64) -> bool { self == FP64x64 { mag: 0, sign: true } } fn INF() -> FP64x64 { FP64x64 { mag: 4294967295, sign: false } } fn POS_INF() -> FP64x64 { FP64x64 { mag: 4294967295, sign: false } } fn NEG_INF() -> FP64x64 { FP64x64 { mag: 4294967295, sign: true } } fn is_inf(self: FP64x64) -> bool { self.mag == 4294967295 } fn is_pos_inf(self: FP64x64) -> bool { self.is_inf() && !self.sign } fn is_neg_inf(self: FP64x64) -> bool { self.is_inf() && self.sign } fn erf(self: FP64x64) -> FP64x64 { erf::erf(self) } } impl FP64x64Print of PrintTrait<FP64x64> { fn print(self: FP64x64) { self.sign.print(); self.mag.print(); } } // Into a raw felt without unscaling impl FP64x64IntoFelt252 of Into<FP64x64, felt252> { fn into(self: FP64x64) -> felt252 { let mag_felt = self.mag.into(); if self.sign { mag_felt * -1 } else { mag_felt * 1 } } } impl FP64x64TryIntoU128 of TryInto<FP64x64, u128> { fn try_into(self: FP64x64) -> Option<u128> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down Option::Some((self.mag / ONE).into()) } } } impl FP64x64TryIntoU16 of TryInto<FP64x64, u16> { fn try_into(self: FP64x64) -> Option<u16> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP64x64TryIntoU32 of TryInto<FP64x64, u32> { fn try_into(self: FP64x64) -> Option<u32> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP64x64TryIntoU8 of TryInto<FP64x64, u8> { fn try_into(self: FP64x64) -> Option<u8> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP64x64TryIntoI8 of TryInto<FP64x64, i8> { fn try_into(self: FP64x64) -> Option<i8> { _i8_try_from_fp(self) } } // impl FP64x64PartialEq of PartialEq<FP64x64> { // #[inline(always)] // fn eq(lhs: @FP64x64, rhs: @FP64x64) -> bool { // return fp64x64::ops::eq(lhs, rhs); // } // #[inline(always)] // fn ne(lhs: @FP64x64, rhs: @FP64x64) -> bool { // return fp64x64::ops::ne(lhs, rhs); // } // } impl FP64x64Add of Add<FP64x64> { fn add(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { fp64x64::ops::add(lhs, rhs) } } impl FP64x64AddEq of AddEq<FP64x64> { #[inline(always)] fn add_eq(ref self: FP64x64, other: FP64x64) { self = fp64x64::ops::add(self, other); } } impl FP64x64Sub of Sub<FP64x64> { fn sub(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { fp64x64::ops::sub(lhs, rhs) } } impl FP64x64SubEq of SubEq<FP64x64> { #[inline(always)] fn sub_eq(ref self: FP64x64, other: FP64x64) { self = fp64x64::ops::sub(self, other); } } impl FP64x64Mul of Mul<FP64x64> { fn mul(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { fp64x64::ops::mul(lhs, rhs) } } impl FP64x64MulEq of MulEq<FP64x64> { #[inline(always)] fn mul_eq(ref self: FP64x64, other: FP64x64) { self = fp64x64::ops::mul(self, other); } } impl FP64x64Div of Div<FP64x64> { fn div(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { fp64x64::ops::div(lhs, rhs) } } impl FP64x64DivEq of DivEq<FP64x64> { #[inline(always)] fn div_eq(ref self: FP64x64, other: FP64x64) { self = fp64x64::ops::div(self, other); } } impl FP64x64PartialOrd of PartialOrd<FP64x64> { #[inline(always)] fn ge(lhs: FP64x64, rhs: FP64x64) -> bool { fp64x64::ops::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP64x64, rhs: FP64x64) -> bool { fp64x64::ops::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP64x64, rhs: FP64x64) -> bool { fp64x64::ops::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP64x64, rhs: FP64x64) -> bool { fp64x64::ops::lt(lhs, rhs) } } impl FP64x64Neg of Neg<FP64x64> { #[inline(always)] fn neg(a: FP64x64) -> FP64x64 { fp64x64::ops::neg(a) } } impl FP64x64Rem of Rem<FP64x64> { #[inline(always)] fn rem(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { fp64x64::ops::rem(lhs, rhs) } } fn eq(a: @FP64x64, b: @FP64x64) -> bool { (*a.mag == *b.mag) && (*a.sign == *b.sign) } /// INTERNAL fn _i8_try_from_fp(x: FP64x64) -> Option<i8> { let unscaled_mag: Option<u8> = (x.mag / ONE).try_into(); match unscaled_mag { Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); if x.sign { return Option::Some(number_i8 * -1_i8); } Option::Some(number_i8) }, Option::None => Option::None(()) } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp64x64/erf.cairo
use cubit::f128::ONE_u128 as ONE; use orion::numbers::{FP64x64, FixedTrait}; use orion::numbers::fixed_point::implementations::fp64x64::lut::erf_lut; const ERF_COMPUTATIONAL_ACCURACY: u128 = 100_u128; const ROUND_CHECK_NUMBER: u128 = 10_u128; // Values > MAX_ERF_NUMBER return 1 const MAX_ERF_NUMBER: u128 = 64563604257983430656_u128; // Values <= ERF_TRUNCATION_NUMBER -> two decimal places, and values > ERF_TRUNCATION_NUMBER -> one decimal place const ERF_TRUNCATION_NUMBER: u128 = 36893488147419103232_u128; fn erf(x: FP64x64) -> FP64x64 { // Lookup // 1. if x.mag < 3.5 { lookup table } // 2. else{ return 1} let mut erf_value: u128 = 0_u128; if x.mag <= MAX_ERF_NUMBER { erf_value = erf_lut(x.mag); } else { erf_value = ONE; } FP64x64 { mag: erf_value, sign: x.sign } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp64x64/lut.cairo
use orion::numbers::fixed_point::implementations::fp64x64::core::ONE; fn erf_lut(x: u128) -> u128 { // Construct the erf lookup table if x <= 1660206966633859584 { if x <= 0 { return 0; } if x <= 184467440737095520 { return 208142279036071072; } if x <= 368934881474191040 { return 416242934472567232; } if x <= 553402322211286528 { return 624260367679495296; } if x <= 737869762948382080 { return 832153029941062528; } if x <= 922337203685477632 { return 1039879447350402944; } if x <= 1106804644422573056 { return 1247398245629553408; } if x <= 1291272085159668736 { return 1454668174849927424; } if x <= 1475739525896764160 { return 1661648134028665088; } if x <= 1660206966633859584 { return 1868297195576427008; } } if x <= 3504881374004814848 { if x <= 1844674407370955264 { return 2074574629572391936; } if x <= 2029141848108050688 { return 2280439927842463744; } if x <= 2213609288845146112 { return 2485852827816977408; } if x <= 2398076729582241792 { return 2690773336144481280; } if x <= 2582544170319337472 { return 2895161752038532608; } if x <= 2767011611056432640 { return 3098978690334796800; } if x <= 2951479051793528320 { return 3302185104236156928; } if x <= 3135946492530624000 { return 3504742307723958272; } if x <= 3320413933267719168 { return 3706611997613982720; } if x <= 3504881374004814848 { return 3907756275236240384; } } if x <= 5349555781375769600 { if x <= 3689348814741910528 { return 4108137667718166528; } if x <= 3873816255479005696 { return 4307719148851377152; } if x <= 4058283696216101376 { return 4506464159522699776; } if x <= 4242751136953197056 { return 4704336627690769408; } if x <= 4427218577690292224 { return 4901300987890141184; } if x <= 4611686018427387904 { return 5097322200245477376; } if x <= 4796153459164483584 { return 5292365768979031040; } if x <= 4980620899901579264 { return 5486397760395360256; } if x <= 5165088340638674944 { return 5679384820327877632; } if x <= 5349555781375769600 { return 5871294191032579072; } } if x <= 7194230188746725376 { if x <= 5534023222112865280 { return 6062093727515032576; } if x <= 5718490662849960960 { return 6251751913277435904; } if x <= 5902958103587056640 { return 6440237875473368064; } if x <= 6087425544324152320 { return 6627521399458594816; } if x <= 6271892985061248000 { return 6813572942727099392; } if x <= 6456360425798342656 { return 6998363648222307328; } if x <= 6640827866535438336 { return 7181865357014296576; } if x <= 6825295307272534016 { return 7364050620334585856; } if x <= 7009762748009629696 { return 7544892710960923648; } if x <= 7194230188746725376 { return 7724365633945352192; } } if x <= 9038904596117680128 { if x <= 7378697629483821056 { return 7902444136679609344; } if x <= 7563165070220915712 { return 8079103718292817920; } if x <= 7747632510958011392 { return 8254320638377208832; } if x <= 7932099951695107072 { return 8428071925038478336; } if x <= 8116567392432202752 { return 8600335382268215296; } if x <= 8301034833169298432 { return 8771089596636659712; } if x <= 8485502273906394112 { return 8940313943304876032; } if x <= 8669969714643488768 { return 9107988591356256256; } if x <= 8854437155380584448 { return 9274094508448081920; } if x <= 9038904596117680128 { return 9438613464784658432; } } if x <= 10883579003488634880 { if x <= 9223372036854775808 { return 9601528036414361600; } if x <= 9407839477591871488 { return 9762821607853701120; } if x <= 9592306918328967168 { return 9922478374042292224; } if x <= 9776774359066062848 { return 10080483341633368064; } if x <= 9961241799803158528 { return 10236822329625237504; } if x <= 10145709240540254208 { return 10391481969339820032; } if x <= 10330176681277349888 { return 10544449703755059200; } if x <= 10514644122014443520 { return 10695713786198818816; } if x <= 10699111562751539200 { return 10845263278412423168; } if x <= 10883579003488634880 { return 10993088047992748032; } } if x <= 12728253410859589632 { if x <= 11068046444225730560 { return 11139178765222393856; } if x <= 11252513884962826240 { return 11283526899298078720; } if x <= 11436981325699921920 { return 11426124713968005120; } if x <= 11621448766437017600 { return 11566965262589513728; } if x <= 11805916207174113280 { return 11706042382618923008; } if x <= 11990383647911208960 { return 11843350689545969664; } if x <= 12174851088648304640 { return 11978885570285762560; } if x <= 12359318529385400320 { return 12112643176041672704; } if x <= 12543785970122496000 { return 12244620414653018112; } if x <= 12728253410859589632 { return 12374814942441867264; } } if x <= 14572927818230546432 { if x <= 12912720851596685312 { return 12503225155573657600; } if x <= 13097188292333780992 { return 12629850180946728960; } if x <= 13281655733070876672 { return 12754689866626244608; } if x <= 13466123173807972352 { return 12877744771838261248; } if x <= 13650590614545068032 { return 12999016156540069888; } if x <= 13835058055282163712 { return 13118505970583140352; } if x <= 14019525496019259392 { return 13236216842485327872; } if x <= 14203992936756355072 { return 13352152067829151744; } if x <= 14388460377493450752 { return 13466315597303212032; } if x <= 14572927818230546432 { return 13578712024403965952; } } if x <= 16417602225601501184 { if x <= 14757395258967642112 { return 13689346572815177728; } if x <= 14941862699704737792 { return 13798225083482576896; } if x <= 15126330140441831424 { return 13905354001401262080; } if x <= 15310797581178927104 { return 14010740362133477376; } if x <= 15495265021916022784 { return 14114391778074478592; } if x <= 15679732462653118464 { return 14216316424484128768; } if x <= 15864199903390214144 { return 14316523025301962752; } if x <= 16048667344127309824 { return 14415020838763323392; } if x <= 16233134784864405504 { return 14511819642834194432; } if x <= 16417602225601501184 { return 14606929720482222080; } } if x <= 18262276632972455936 { if x <= 16602069666338596864 { return 14700361844801351680; } if x <= 16786537107075692544 { return 14792127264007346176; } if x <= 16971004547812788224 { return 14882237686321330176; } if x <= 17155471988549883904 { return 14970705264758325248; } if x <= 17339939429286977536 { return 15057542581837537280; } if x <= 17524406870024073216 { return 15142762634230988800; } if x <= 17708874310761168896 { return 15226378817366812672; } if x <= 17893341751498264576 { return 15308404910003300352; } if x <= 18077809192235360256 { return 15388855058789533696; } if x <= 18262276632972455936 { return 15467743762828154880; } } if x <= 20106951040343412736 { if x <= 18446744073709551616 { return 15545085858255493120; } if x <= 18631211514446647296 { return 15620896502854008832; } if x <= 18815678955183742976 { return 15695191160711634944; } if x <= 19000146395920838656 { return 15767985586942304256; } if x <= 19184613836657934336 { return 15839295812481531904; } if x <= 19369081277395030016 { return 15909138128970633216; } if x <= 19553548718132125696 { return 15977529073742716928; } if x <= 19738016158869221376 { return 16044485414923208704; } if x <= 19922483599606317056 { return 16110024136657332224; } if x <= 20106951040343412736 { return 16174162424476436480; } } if x <= 21951625447714365440 { if x <= 20291418481080508416 { return 16236917650814795776; } if x <= 20475885921817604096 { return 16298307360687947776; } if x <= 20660353362554699776 { return 16358349257543309312; } if x <= 20844820803291791360 { return 16417061189293291520; } if x <= 21029288244028887040 { return 16474461134540791808; } if x <= 21213755684765982720 { return 16530567189006364672; } if x <= 21398223125503078400 { return 16585397552166088704; } if x <= 21582690566240174080 { return 16638970514108524544; } if x <= 21767158006977269760 { return 16691304442618875904; } if x <= 21951625447714365440 { return 16742417770497863680; } } if x <= 23796299855085322240 { if x <= 22136092888451461120 { return 16792328983122491392; } if x <= 22320560329188556800 { return 16841056606255333376; } if x <= 22505027769925652480 { return 16888619194108602368; } if x <= 22689495210662748160 { return 16935035317668771840; } if x <= 22873962651399843840 { return 16980323553287045120; } if x <= 23058430092136939520 { return 17024502471540604928; } if x <= 23242897532874035200 { return 17067590626369081344; } if x <= 23427364973611130880 { return 17109606544490213376; } if x <= 23611832414348226560 { return 17150568715098355712; } if x <= 23796299855085322240 { return 17190495579848931328; } } if x <= 25640974262456274944 { if x <= 23980767295822417920 { return 17229405523131617280; } if x <= 24165234736559513600 { return 17267316862634573824; } if x <= 24349702177296609280 { return 17304247840201650176; } if x <= 24534169618033704960 { return 17340216612984107008; } if x <= 24718637058770800640 { return 17375241244887996416; } if x <= 24903104499507896320 { return 17409339698317971456; } if x <= 25087571940244992000 { return 17442529826217906176; } if x <= 25272039380982087680 { return 17474829364408369152; } if x <= 25456506821719179264 { return 17506255924220641280; } if x <= 25640974262456274944 { return 17536826985426591744; } } if x <= 27485648669827231744 { if x <= 25825441703193370624 { return 17566559889463431168; } if x <= 26009909143930466304 { return 17595471832952045568; } if x <= 26194376584667561984 { return 17623579861507229696; } if x <= 26378844025404657664 { return 17650900863837954048; } if x <= 26563311466141753344 { return 17677451566135410688; } if x <= 26747778906878849024 { return 17703248526746337280; } if x <= 26932246347615944704 { return 17728308131128877056; } if x <= 27116713788353040384 { return 17752646587087935488; } if x <= 27301181229090136064 { return 17776279920286781440; } if x <= 27485648669827231744 { return 17799223970031376384; } } if x <= 29330323077198188544 { if x <= 27670116110564327424 { return 17821494385323737088; } if x <= 27854583551301423104 { return 17843106621180358656; } if x <= 28039050992038518784 { return 17864075935211624448; } if x <= 28223518432775614464 { return 17884417384457840640; } if x <= 28407985873512710144 { return 17904145822477408256; } if x <= 28592453314249805824 { return 17923275896682506240; } if x <= 28776920754986901504 { return 17941822045917437952; } if x <= 28961388195723997184 { return 17959798498274711552; } if x <= 29145855636461092864 { return 17977219269143760896; } if x <= 29330323077198188544 { return 17994098159487121408; } } if x <= 31174997484569141248 { if x <= 29514790517935284224 { return 18010448754338713600; } if x <= 29699257958672379904 { return 18026284421518878720; } if x <= 29883725399409475584 { return 18041618310560610304; } if x <= 30068192840146567168 { return 18056463351841458176; } if x <= 30252660280883662848 { return 18070832255915431936; } if x <= 30437127721620758528 { return 18084737513039206400; } if x <= 30621595162357854208 { return 18098191392886906880; } if x <= 30806062603094949888 { return 18111205944447655936; } if x <= 30990530043832045568 { return 18123792996100098048; } if x <= 31174997484569141248 { return 18135964155858038784; } } if x <= 33019671891940098048 { if x <= 31359464925306236928 { return 18147730811781371904; } if x <= 31543932366043332608 { return 18159104132546453504; } if x <= 31728399806780428288 { return 18170095068170047488; } if x <= 31912867247517523968 { return 18180714350881038336; } if x <= 32097334688254619648 { return 18190972496134107136; } if x <= 32281802128991715328 { return 18200879803759552512; } if x <= 32466269569728811008 { return 18210446359243550720; } if x <= 32650737010465906688 { return 18219682035133120512; } if x <= 32835204451203002368 { return 18228596492560154624; } if x <= 33019671891940098048 { return 18237199182878894080; } } if x <= 34864346299311050752 { if x <= 33204139332677193728 { return 18245499349411323904; } if x <= 33388606773414289408 { return 18253506029294995456; } if x <= 33573074214151385088 { return 18261228055427880960; } if x <= 33757541654888480768 { return 18268674058504921088; } if x <= 33942009095625576448 { return 18275852469141008384; } if x <= 34126476536362672128 { return 18282771520075268096; } if x <= 34310943977099767808 { return 18289439248451522560; } if x <= 34495411417836863488 { return 18295863498169980928; } if x <= 34679878858573955072 { return 18302051922305267712; } if x <= 34864346299311050752 { return 18308011985585967104; } } if x <= 36709020706682007552 { if x <= 35048813740048146432 { return 18313750966931048448; } if x <= 35233281180785242112 { return 18319275962038544384; } if x <= 35417748621522337792 { return 18324593886022047744; } if x <= 35602216062259433472 { return 18329711476090615808; } if x <= 35786683502996529152 { return 18334635294267887616; } if x <= 35971150943733624832 { return 18339371730146226176; } if x <= 36155618384470720512 { return 18343927003671875584; } if x <= 36340085825207816192 { return 18348307167957243904; } if x <= 36524553265944911872 { return 18352518112116494336; } if x <= 36709020706682007552 { return 18356565564120772608; } } if x <= 62718929850612473856 { if x <= 36893488147419103232 { return 18360455093669533696; } if x <= 38738162554790060032 { return 18391782614824026112; } if x <= 40582836962161016832 { return 18412380624802023424; } if x <= 42427511369531965440 { return 18425656187587059712; } if x <= 44272185776902922240 { return 18434043234066948096; } if x <= 46116860184273879040 { return 18439237133993463808; } if x <= 47961534591644835840 { return 18442390007235248128; } if x <= 49806208999015792640 { return 18444266072035147776; } if x <= 51650883406386741248 { return 18445360324505407488; } if x <= 53495557813757698048 { return 18445985951670278144; } if x <= 55340232221128654848 { return 18446336575964956672; } if x <= 57184906628499611648 { return 18446529193908295680; } if x <= 59029581035870568448 { return 18446632918035736576; } if x <= 60874255443241517056 { return 18446687668919484416; } if x <= 62718929850612473856 { return 18446715997887504384; } } ONE }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23.cairo
mod core; mod math; mod helpers;
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23/core.cairo
use core::debug::PrintTrait; use orion::numbers::fixed_point::core::{FixedTrait}; use orion::numbers::fixed_point::implementations::fp8x23::math::{core as core_math, trig, hyp, erf}; use orion::numbers::fixed_point::utils; /// A struct representing a fixed point number. #[derive(Serde, Copy, Drop)] struct FP8x23 { mag: u32, sign: bool } // CONSTANTS const TWO: u32 = 16777216; // 2 ** 24 const ONE: u32 = 8388608; // 2 ** 23 const HALF: u32 = 4194304; // 2 ** 22 const MAX: u32 = 2147483648; // 2 ** 31 impl FP8x23Impl of FixedTrait<FP8x23, u32> { fn ZERO() -> FP8x23 { FP8x23 { mag: 0, sign: false } } fn HALF() -> FP8x23 { FP8x23 { mag: HALF, sign: false } } fn ONE() -> FP8x23 { FP8x23 { mag: ONE, sign: false } } fn MAX() -> FP8x23 { FP8x23 { mag: MAX, sign: false } } fn new(mag: u32, sign: bool) -> FP8x23 { FP8x23 { mag: mag, sign: sign } } fn new_unscaled(mag: u32, sign: bool) -> FP8x23 { FP8x23 { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP8x23 { let mag = core::integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap(); FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP8x23) -> FP8x23 { core_math::abs(self) } fn acos(self: FP8x23) -> FP8x23 { trig::acos_fast(self) } fn acos_fast(self: FP8x23) -> FP8x23 { trig::acos_fast(self) } fn acosh(self: FP8x23) -> FP8x23 { hyp::acosh(self) } fn asin(self: FP8x23) -> FP8x23 { trig::asin_fast(self) } fn asin_fast(self: FP8x23) -> FP8x23 { trig::asin_fast(self) } fn asinh(self: FP8x23) -> FP8x23 { hyp::asinh(self) } fn atan(self: FP8x23) -> FP8x23 { trig::atan_fast(self) } fn atan_fast(self: FP8x23) -> FP8x23 { trig::atan_fast(self) } fn atanh(self: FP8x23) -> FP8x23 { hyp::atanh(self) } fn ceil(self: FP8x23) -> FP8x23 { core_math::ceil(self) } fn cos(self: FP8x23) -> FP8x23 { trig::cos_fast(self) } fn cos_fast(self: FP8x23) -> FP8x23 { trig::cos_fast(self) } fn cosh(self: FP8x23) -> FP8x23 { hyp::cosh(self) } fn floor(self: FP8x23) -> FP8x23 { core_math::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP8x23) -> FP8x23 { core_math::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP8x23) -> FP8x23 { core_math::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP8x23) -> FP8x23 { core_math::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP8x23) -> FP8x23 { core_math::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP8x23) -> FP8x23 { core_math::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP8x23, b: FP8x23) -> FP8x23 { core_math::pow(self, b) } fn round(self: FP8x23) -> FP8x23 { core_math::round(self) } fn sin(self: FP8x23) -> FP8x23 { trig::sin_fast(self) } fn sin_fast(self: FP8x23) -> FP8x23 { trig::sin_fast(self) } fn sinh(self: FP8x23) -> FP8x23 { hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP8x23) -> FP8x23 { core_math::sqrt(self) } fn tan(self: FP8x23) -> FP8x23 { trig::tan_fast(self) } fn tan_fast(self: FP8x23) -> FP8x23 { trig::tan_fast(self) } fn tanh(self: FP8x23) -> FP8x23 { hyp::tanh(self) } fn sign(self: FP8x23) -> FP8x23 { core_math::sign(self) } fn NaN() -> FP8x23 { FP8x23 { mag: 0, sign: true } } fn is_nan(self: FP8x23) -> bool { self == FP8x23 { mag: 0, sign: true } } fn INF() -> FP8x23 { FP8x23 { mag: 4294967295, sign: false } } fn POS_INF() -> FP8x23 { FP8x23 { mag: 4294967295, sign: false } } fn NEG_INF() -> FP8x23 { FP8x23 { mag: 4294967295, sign: true } } fn is_inf(self: FP8x23) -> bool { self.mag == 4294967295 } fn is_pos_inf(self: FP8x23) -> bool { self.is_inf() && !self.sign } fn is_neg_inf(self: FP8x23) -> bool { self.is_inf() && self.sign } fn erf(self: FP8x23) -> FP8x23 { erf::erf(self) } } impl FP8x23Print of PrintTrait<FP8x23> { fn print(self: FP8x23) { self.sign.print(); self.mag.print(); } } // Into a raw felt without unscaling impl FP8x23IntoFelt252 of Into<FP8x23, felt252> { fn into(self: FP8x23) -> felt252 { let mag_felt = self.mag.into(); if self.sign { mag_felt * -1 } else { mag_felt * 1 } } } impl FP8x23TryIntoU128 of TryInto<FP8x23, u128> { fn try_into(self: FP8x23) -> Option<u128> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down Option::Some((self.mag / ONE).into()) } } } impl FP8x23TryIntoU64 of TryInto<FP8x23, u64> { fn try_into(self: FP8x23) -> Option<u64> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down Option::Some((self.mag / ONE).into()) } } } impl FP8x23TryIntoU32 of TryInto<FP8x23, u32> { fn try_into(self: FP8x23) -> Option<u32> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down Option::Some(self.mag / ONE) } } } impl FP8x23TryIntoU16 of TryInto<FP8x23, u16> { fn try_into(self: FP8x23) -> Option<u16> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP8x23TryIntoU8 of TryInto<FP8x23, u8> { fn try_into(self: FP8x23) -> Option<u8> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP8x23IntoI32 of Into<FP8x23, i32> { fn into(self: FP8x23) -> i32 { _i32_into_fp(self) } } impl FP8x23TryIntoI8 of TryInto<FP8x23, i8> { fn try_into(self: FP8x23) -> Option<i8> { _i8_try_from_fp(self) } } impl FP8x23PartialEq of PartialEq<FP8x23> { #[inline(always)] fn eq(lhs: @FP8x23, rhs: @FP8x23) -> bool { core_math::eq(lhs, rhs) } #[inline(always)] fn ne(lhs: @FP8x23, rhs: @FP8x23) -> bool { core_math::ne(lhs, rhs) } } impl FP8x23Add of Add<FP8x23> { fn add(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { core_math::add(lhs, rhs) } } impl FP8x23AddEq of AddEq<FP8x23> { #[inline(always)] fn add_eq(ref self: FP8x23, other: FP8x23) { self = Add::add(self, other); } } impl FP8x23Sub of Sub<FP8x23> { fn sub(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { core_math::sub(lhs, rhs) } } impl FP8x23SubEq of SubEq<FP8x23> { #[inline(always)] fn sub_eq(ref self: FP8x23, other: FP8x23) { self = Sub::sub(self, other); } } impl FP8x23Mul of Mul<FP8x23> { fn mul(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { core_math::mul(lhs, rhs) } } impl FP8x23MulEq of MulEq<FP8x23> { #[inline(always)] fn mul_eq(ref self: FP8x23, other: FP8x23) { self = Mul::mul(self, other); } } impl FP8x23Div of Div<FP8x23> { fn div(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { core_math::div(lhs, rhs) } } impl FP8x23DivEq of DivEq<FP8x23> { #[inline(always)] fn div_eq(ref self: FP8x23, other: FP8x23) { self = Div::div(self, other); } } impl FP8x23PartialOrd of PartialOrd<FP8x23> { #[inline(always)] fn ge(lhs: FP8x23, rhs: FP8x23) -> bool { core_math::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP8x23, rhs: FP8x23) -> bool { core_math::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP8x23, rhs: FP8x23) -> bool { core_math::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP8x23, rhs: FP8x23) -> bool { core_math::lt(lhs, rhs) } } impl FP8x23Neg of Neg<FP8x23> { #[inline(always)] fn neg(a: FP8x23) -> FP8x23 { core_math::neg(a) } } impl FP8x23Rem of Rem<FP8x23> { #[inline(always)] fn rem(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { core_math::rem(lhs, rhs) } } /// INTERNAL fn _i32_into_fp(x: FP8x23) -> i32 { // i32 { mag: x.mag / ONE, sign: x.sign } let number_felt: felt252 = (x.mag / ONE).into(); let number_i32: i32 = number_felt.try_into().unwrap(); if x.sign { return number_i32 * -1_i32; } number_i32 } fn _i8_try_from_fp(x: FP8x23) -> Option<i8> { let unscaled_mag: Option<u8> = (x.mag / ONE).try_into(); // Option::Some(i8 { mag: unscaled_mag.unwrap(), sign: x.sign }) match unscaled_mag { Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); if x.sign { return Option::Some(number_i8 * -1_i8); } Option::Some(number_i8) }, Option::None => Option::None(()) } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23/helpers.cairo
use core::debug::PrintTrait; use orion::numbers::fixed_point::implementations::fp8x23::core::{ HALF, ONE, TWO, FP8x23, FP8x23Sub, FP8x23Div, FixedTrait, FP8x23Print }; const DEFAULT_PRECISION: u32 = 8; // 1e-6 // To use `DEFAULT_PRECISION`, final arg is: `Option::None(())`. // To use `custom_precision` of 430_u32: `Option::Some(430_u32)`. fn assert_precise(result: FP8x23, expected: felt252, msg: felt252, custom_precision: Option<u32>) { let precision = match custom_precision { Option::Some(val) => val, Option::None => DEFAULT_PRECISION, }; let diff = (result - FixedTrait::from_felt(expected)).mag; if (diff > precision) { result.print(); assert(diff <= precision, msg); } } fn assert_relative(result: FP8x23, expected: felt252, msg: felt252, custom_precision: Option<u32>) { let precision = match custom_precision { Option::Some(val) => val, Option::None => DEFAULT_PRECISION, }; let diff = result - FixedTrait::from_felt(expected); let rel_diff = (diff / result).mag; if (rel_diff > precision) { result.print(); assert(rel_diff <= precision, msg); } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23/math.cairo
mod core; mod comp; mod lut; mod trig; mod hyp; mod erf;
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo
use orion::numbers::fixed_point::implementations::fp8x23::core::{ FP8x23, FixedTrait, FP8x23PartialOrd, FP8x23PartialEq }; fn max(a: FP8x23, b: FP8x23) -> FP8x23 { if a >= b { a } else { b } } fn min(a: FP8x23, b: FP8x23) -> FP8x23 { if a <= b { a } else { b } } fn xor(a: FP8x23, b: FP8x23) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { true } else { false } } fn or(a: FP8x23, b: FP8x23) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { false } else { true } } fn and(a: FP8x23, b: FP8x23) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { false } else { true } } fn where(a: FP8x23, b: FP8x23, c: FP8x23) -> FP8x23 { if a == FixedTrait::new(0, false) { c } else { b } } fn bitwise_and(a: FP8x23, b: FP8x23) -> FP8x23 { FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP8x23, b: FP8x23) -> FP8x23 { FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP8x23, b: FP8x23) -> FP8x23 { FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use super::{FixedTrait, max, min, bitwise_and, bitwise_xor, bitwise_or}; #[test] fn test_max() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::new_unscaled(1, true); assert(max(a, a) == a, 'max(a, a)'); assert(max(a, b) == a, 'max(a, b)'); assert(max(a, c) == a, 'max(a, c)'); assert(max(b, a) == a, 'max(b, a)'); assert(max(b, b) == b, 'max(b, b)'); assert(max(b, c) == b, 'max(b, c)'); assert(max(c, a) == a, 'max(c, a)'); assert(max(c, b) == b, 'max(c, b)'); assert(max(c, c) == c, 'max(c, c)'); } #[test] fn test_min() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::new_unscaled(1, true); assert(min(a, a) == a, 'min(a, a)'); assert(min(a, b) == b, 'min(a, b)'); assert(min(a, c) == c, 'min(a, c)'); assert(min(b, a) == b, 'min(b, a)'); assert(min(b, b) == b, 'min(b, b)'); assert(min(b, c) == c, 'min(b, c)'); assert(min(c, a) == c, 'min(c, a)'); assert(min(c, b) == c, 'min(c, b)'); assert(min(c, c) == c, 'min(c, c)'); } #[test] fn test_bitwise_and() { let a = FixedTrait::new(28835840, false); // 3.4375 let b = FixedTrait::new(1639448576, true); // -60.5625 assert(bitwise_and(a, b) == a, 'bitwise_and(a,b)') } #[test] fn test_bitwise_xor() { let a = FixedTrait::new(28835840, false); // 3.4375 let b = FixedTrait::new(1639448576, true); // -60.5625 let c = FixedTrait::new(1610612736, true); assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)') } #[test] fn test_bitwise_or() { let a = FixedTrait::new(28835840, false); // 3.4375 let b = FixedTrait::new(1639448576, true); // -60.5625 assert(bitwise_or(a, b) == b, 'bitwise_or(a,b)') } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23/math/core.cairo
use core::integer; use orion::numbers::fixed_point::implementations::fp8x23::core::{ HALF, ONE, MAX, FP8x23, FP8x23Add, FP8x23Impl, FP8x23AddEq, FP8x23Sub, FP8x23Mul, FP8x23MulEq, FP8x23TryIntoU128, FP8x23PartialEq, FP8x23PartialOrd, FP8x23SubEq, FP8x23Neg, FP8x23Div, FP8x23IntoFelt252, FixedTrait }; use orion::numbers::fixed_point::implementations::fp8x23::math::lut; // PUBLIC fn abs(a: FP8x23) -> FP8x23 { FixedTrait::new(a.mag, false) } fn add(a: FP8x23, b: FP8x23) -> FP8x23 { if a.sign == b.sign { return FixedTrait::new(a.mag + b.mag, a.sign); } if a.mag == b.mag { return FixedTrait::ZERO(); } if (a.mag > b.mag) { FixedTrait::new(a.mag - b.mag, a.sign) } else { FixedTrait::new(b.mag - a.mag, b.sign) } } fn ceil(a: FP8x23) -> FP8x23 { let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { a } else if !a.sign { FixedTrait::new_unscaled(div + 1, false) } else if div == 0 { FixedTrait::new_unscaled(0, false) } else { FixedTrait::new_unscaled(div, true) } } fn div(a: FP8x23, b: FP8x23) -> FP8x23 { let a_u64 = integer::u32_wide_mul(a.mag, ONE); let res_u64 = a_u64 / b.mag.into(); // Re-apply sign FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign) } fn eq(a: @FP8x23, b: @FP8x23) -> bool { (*a.mag == *b.mag) && (*a.sign == *b.sign) } // Calculates the natural exponent of x: e^x fn exp(a: FP8x23) -> FP8x23 { exp2(FixedTrait::new(12102203, false) * a) // log2(e) * 2^23 ≈ 12102203 } // Calculates the binary exponent of x: 2^x fn exp2(a: FP8x23) -> FP8x23 { if (a.mag == 0) { return FixedTrait::ONE(); } let (int_part, frac_part) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false); let mut res_u = int_res; if frac_part != 0 { let frac = FixedTrait::new(frac_part, false); let r8 = FixedTrait::new(19, false) * frac; let r7 = (r8 + FixedTrait::new(105, false)) * frac; let r6 = (r7 + FixedTrait::new(1324, false)) * frac; let r5 = (r6 + FixedTrait::new(11159, false)) * frac; let r4 = (r5 + FixedTrait::new(80695, false)) * frac; let r3 = (r4 + FixedTrait::new(465599, false)) * frac; let r2 = (r3 + FixedTrait::new(2015166, false)) * frac; let r1 = (r2 + FixedTrait::new(5814540, false)) * frac; res_u = res_u * (r1 + FixedTrait::ONE()); } if a.sign { FixedTrait::ONE() / res_u } else { res_u } } fn exp2_int(exp: u32) -> FP8x23 { FixedTrait::new_unscaled(lut::exp2(exp), false) } fn floor(a: FP8x23) -> FP8x23 { let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { a } else if !a.sign { FixedTrait::new_unscaled(div, false) } else { FixedTrait::new_unscaled(div + 1, true) } } fn ge(a: FP8x23, b: FP8x23) -> bool { if a.sign != b.sign { !a.sign } else { (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign) } } fn gt(a: FP8x23, b: FP8x23) -> bool { if a.sign != b.sign { !a.sign } else { (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign) } } fn le(a: FP8x23, b: FP8x23) -> bool { if a.sign != b.sign { a.sign } else { (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign) } } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(a: FP8x23) -> FP8x23 { FixedTrait::new(5814540, false) * log2(a) // ln(2) = 0.693... } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(a: FP8x23) -> FP8x23 { assert(!a.sign, 'must be positive'); if (a.mag == ONE) { return FixedTrait::ZERO(); } else if (a.mag < ONE) { // Compute true inverse binary log if 0 < x < 1 let div = FixedTrait::ONE() / a; return -log2(div); } let whole = a.mag / ONE; let (msb, div) = lut::msb(whole); if a.mag == div * ONE { FixedTrait::new_unscaled(msb, false) } else { let norm = a / FixedTrait::new_unscaled(div, false); let r8 = FixedTrait::new(76243, true) * norm; let r7 = (r8 + FixedTrait::new(1038893, false)) * norm; let r6 = (r7 + FixedTrait::new(6277679, true)) * norm; let r5 = (r6 + FixedTrait::new(22135645, false)) * norm; let r4 = (r5 + FixedTrait::new(50444339, true)) * norm; let r3 = (r4 + FixedTrait::new(77896489, false)) * norm; let r2 = (r3 + FixedTrait::new(83945943, true)) * norm; let r1 = (r2 + FixedTrait::new(68407458, false)) * norm; r1 + FixedTrait::new(28734280, true) + FixedTrait::new_unscaled(msb, false) } } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(a: FP8x23) -> FP8x23 { FixedTrait::new(2525223, false) * log2(a) // log10(2) = 0.301... } fn lt(a: FP8x23, b: FP8x23) -> bool { if a.sign != b.sign { a.sign } else { (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign) } } fn mul(a: FP8x23, b: FP8x23) -> FP8x23 { let prod_u128 = integer::u32_wide_mul(a.mag, b.mag); // Re-apply sign FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign) } fn ne(a: @FP8x23, b: @FP8x23) -> bool { (*a.mag != *b.mag) || (*a.sign != *b.sign) } fn neg(a: FP8x23) -> FP8x23 { if a.mag == 0 { a } else if !a.sign { FixedTrait::new(a.mag, !a.sign) } else { FixedTrait::new(a.mag, false) } } // Calclates the value of x^y and checks for overflow before returning // self is a FP8x23 point value // b is a FP8x23 point value fn pow(a: FP8x23, b: FP8x23) -> FP8x23 { let (_, rem) = integer::u32_safe_divmod(b.mag, integer::u32_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { return pow_int(a, b.mag / ONE, b.sign); } // x^y = exp(y*ln(x)) for x > 0 will error for x < 0 exp(b * ln(a)) } // Calclates the value of a^b and checks for overflow before returning fn pow_int(a: FP8x23, b: u32, sign: bool) -> FP8x23 { let mut x = a; let mut n = b; if sign { x = FixedTrait::ONE() / x; } if n == 0 { return FixedTrait::ONE(); } let mut y = FixedTrait::ONE(); let two = integer::u32_as_non_zero(2); while n > 1 { let (div, rem) = integer::u32_safe_divmod(n, two); if rem == 1 { y = x * y; } x = x * x; n = div; }; x * y } fn rem(a: FP8x23, b: FP8x23) -> FP8x23 { a - floor(a / b) * b } fn round(a: FP8x23) -> FP8x23 { let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if (HALF <= rem) { FixedTrait::new_unscaled(div + 1, a.sign) } else { FixedTrait::new_unscaled(div, a.sign) } } // Calculates the square root of a FP8x23 point value // x must be positive fn sqrt(a: FP8x23) -> FP8x23 { assert(!(a.sign), 'must be positive'); let root = integer::u64_sqrt(a.mag.into() * ONE.into()); FixedTrait::new(root.into(), false) } fn sub(a: FP8x23, b: FP8x23) -> FP8x23 { add(a, -b) } fn sign(a: FP8x23) -> FP8x23 { if a.mag == 0 { FixedTrait::new(0, false) } else { FixedTrait::new(ONE, a.sign) } } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use orion::numbers::fixed_point::implementations::fp8x23::helpers::{ assert_precise, assert_relative }; use orion::numbers::fixed_point::implementations::fp8x23::math::trig::{PI, HALF_PI}; use super::{ FixedTrait, ONE, FP8x23, ceil, floor, sqrt, round, lut, pow, exp, exp2, exp2_int, ln, log2, log10, eq, add, ne, HALF }; #[test] fn test_into() { let a = FixedTrait::<FP8x23>::new_unscaled(5, false); assert(a.mag == 5 * ONE, 'invalid result'); } #[test] fn test_try_into_u128() { // Positive unscaled let a = FixedTrait::<FP8x23>::new_unscaled(5, false); assert(a.try_into().unwrap() == 5_u128, 'invalid result'); // Positive scaled let b = FixedTrait::<FP8x23>::new(5 * ONE, false); assert(b.try_into().unwrap() == 5_u128, 'invalid result'); // Zero let d = FixedTrait::<FP8x23>::new_unscaled(0, false); assert(d.try_into().unwrap() == 0_u128, 'invalid result'); } #[test] #[should_panic] fn test_negative_try_into_u128() { let a = FixedTrait::<FP8x23>::new_unscaled(1, true); let _a: u128 = a.try_into().unwrap(); } #[test] #[available_gas(1000000)] fn test_acos() { let a = FixedTrait::<FP8x23>::ONE(); assert(a.acos().into() == 0, 'invalid one'); } #[test] #[available_gas(1000000)] fn test_asin() { let a = FixedTrait::ONE(); assert_precise(a.asin(), HALF_PI.into(), 'invalid one', Option::None(())); // PI / 2 } #[test] #[available_gas(2000000)] fn test_atan() { let a = FixedTrait::new(2 * ONE, false); assert_relative(a.atan(), 9287469, 'invalid two', Option::None(())); } #[test] fn test_ceil() { let a = FixedTrait::new(24326963, false); // 2.9 assert(ceil(a).mag == 3 * ONE, 'invalid pos decimal'); } #[test] fn test_floor() { let a = FixedTrait::new(24326963, false); // 2.9 assert(floor(a).mag == 2 * ONE, 'invalid pos decimal'); } #[test] fn test_round() { let a = FixedTrait::new(24326963, false); // 2.9 assert(round(a).mag == 3 * ONE, 'invalid pos decimal'); } #[test] #[should_panic] fn test_sqrt_fail() { let a = FixedTrait::new_unscaled(25, true); sqrt(a); } #[test] fn test_sqrt() { let mut a = FixedTrait::new_unscaled(0, false); assert(sqrt(a).mag == 0, 'invalid zero root'); a = FixedTrait::new_unscaled(25, false); assert(sqrt(a).mag == 5 * ONE, 'invalid pos root'); } #[test] #[available_gas(100000)] fn test_msb() { let a = FixedTrait::<FP8x23>::new_unscaled(100, false); let (msb, div) = lut::msb(a.mag / ONE); assert(msb == 6, 'invalid msb'); assert(div == 64, 'invalid msb ceil'); } #[test] #[available_gas(600000)] fn test_pow() { let a = FixedTrait::new_unscaled(3, false); let b = FixedTrait::new_unscaled(4, false); assert(pow(a, b).mag == 81 * ONE, 'invalid pos base power'); } #[test] #[available_gas(900000)] fn test_pow_frac() { let a = FixedTrait::new_unscaled(3, false); let b = FixedTrait::new(4194304, false); // 0.5 assert_relative( pow(a, b), 14529495, 'invalid pos base power', Option::None(()) ); // 1.7320508075688772 } #[test] #[available_gas(1000000)] fn test_exp() { let a = FixedTrait::new_unscaled(2, false); assert_relative( exp(a), 61983895, 'invalid exp of 2', Option::None(()) ); // 7.389056098793725 } #[test] #[available_gas(400000)] fn test_exp2() { let a = FixedTrait::new_unscaled(5, false); assert(exp2(a).mag == 268435456, 'invalid exp2 of 2'); } #[test] #[available_gas(20000)] fn test_exp2_int() { assert(exp2_int(5).into() == 268435456, 'invalid exp2 of 2'); } #[test] #[available_gas(1000000)] fn test_ln() { let mut a = FixedTrait::new_unscaled(1, false); assert(ln(a).mag == 0, 'invalid ln of 1'); a = FixedTrait::new(22802601, false); assert_relative(ln(a), ONE.into(), 'invalid ln of 2.7...', Option::None(())); } #[test] #[available_gas(1000000)] fn test_log2() { let mut a = FixedTrait::new_unscaled(32, false); assert(log2(a) == FixedTrait::new_unscaled(5, false), 'invalid log2 32'); a = FixedTrait::new_unscaled(10, false); assert_relative( log2(a), 27866353, 'invalid log2 10', Option::None(()) ); // 3.321928094887362 } #[test] #[available_gas(1000000)] fn test_log10() { let a = FixedTrait::new_unscaled(100, false); assert_relative(log10(a), 2 * ONE.into(), 'invalid log10', Option::None(())); } #[test] fn test_eq() { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = eq(@a, @b); assert(c, 'invalid result'); } #[test] fn test_ne() { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = ne(@a, @b); assert(!c, 'invalid result'); } #[test] fn test_add() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(2, false); assert(add(a, b) == FixedTrait::new_unscaled(3, false), 'invalid result'); } #[test] fn test_add_eq() { let mut a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(2, false); a += b; assert(a == FixedTrait::<FP8x23>::new_unscaled(3, false), 'invalid result'); } #[test] fn test_sub() { let a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, false); let c = a - b; assert(c == FixedTrait::<FP8x23>::new_unscaled(3, false), 'false result invalid'); } #[test] fn test_sub_eq() { let mut a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, false); a -= b; assert(a == FixedTrait::<FP8x23>::new_unscaled(3, false), 'invalid result'); } #[test] #[available_gas(100000)] fn test_mul_pos() { let a = FP8x23 { mag: 24326963, sign: false }; let b = FP8x23 { mag: 24326963, sign: false }; let c = a * b; assert(c.mag == 70548192, 'invalid result'); } #[test] fn test_mul_neg() { let a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, true); let c = a * b; assert(c == FixedTrait::<FP8x23>::new_unscaled(10, true), 'invalid result'); } #[test] fn test_mul_eq() { let mut a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, true); a *= b; assert(a == FixedTrait::<FP8x23>::new_unscaled(10, true), 'invalid result'); } #[test] fn test_div() { let a = FixedTrait::new_unscaled(10, false); let b = FixedTrait::<FP8x23>::new(24326963, false); // 2.9 let c = a / b; assert(c.mag == 28926234, 'invalid pos decimal'); // 3.4482758620689653 } #[test] fn test_le() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP8x23>::new_unscaled(1, true); assert(a <= a, 'a <= a'); assert(!(a <= b), 'a <= b'); assert(!(a <= c), 'a <= c'); assert(b <= a, 'b <= a'); assert(b <= b, 'b <= b'); assert(!(b <= c), 'b <= c'); assert(c <= a, 'c <= a'); assert(c <= b, 'c <= b'); assert(c <= c, 'c <= c'); } #[test] fn test_lt() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP8x23>::new_unscaled(1, true); assert(!(a < a), 'a < a'); assert(!(a < b), 'a < b'); assert(!(a < c), 'a < c'); assert(b < a, 'b < a'); assert(!(b < b), 'b < b'); assert(!(b < c), 'b < c'); assert(c < a, 'c < a'); assert(c < b, 'c < b'); assert(!(c < c), 'c < c'); } #[test] fn test_ge() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP8x23>::new_unscaled(1, true); assert(a >= a, 'a >= a'); assert(a >= b, 'a >= b'); assert(a >= c, 'a >= c'); assert(!(b >= a), 'b >= a'); assert(b >= b, 'b >= b'); assert(b >= c, 'b >= c'); assert(!(c >= a), 'c >= a'); assert(!(c >= b), 'c >= b'); assert(c >= c, 'c >= c'); } #[test] fn test_gt() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP8x23>::new_unscaled(1, true); assert(!(a > a), 'a > a'); assert(a > b, 'a > b'); assert(a > c, 'a > c'); assert(!(b > a), 'b > a'); assert(!(b > b), 'b > b'); assert(b > c, 'b > c'); assert(!(c > a), 'c > a'); assert(!(c > b), 'c > b'); assert(!(c > c), 'c > c'); } #[test] #[available_gas(1000000)] fn test_cos() { let a = FixedTrait::<FP8x23>::new(HALF_PI, false); assert(a.cos().into() == 0, 'invalid half pi'); } #[test] #[available_gas(1000000)] fn test_sin() { let a = FixedTrait::new(HALF_PI, false); assert_precise(a.sin(), ONE.into(), 'invalid half pi', Option::None(())); } #[test] #[available_gas(2000000)] fn test_tan() { let a = FixedTrait::<FP8x23>::new(HALF_PI / 2, false); assert(a.tan().mag == 8388608, 'invalid quarter pi'); } #[test] #[available_gas(2000000)] fn test_sign() { let a = FixedTrait::<FP8x23>::new(0, false); assert(a.sign().mag == 0 && !a.sign().sign, 'invalid sign (0, true)'); let a = FixedTrait::<FP8x23>::new(HALF, true); assert(a.sign().mag == ONE && a.sign().sign, 'invalid sign (HALF, true)'); let a = FixedTrait::<FP8x23>::new(HALF, false); assert(a.sign().mag == ONE && !a.sign().sign, 'invalid sign (HALF, false)'); let a = FixedTrait::<FP8x23>::new(ONE, true); assert(a.sign().mag == ONE && a.sign().sign, 'invalid sign (ONE, true)'); let a = FixedTrait::<FP8x23>::new(ONE, false); assert(a.sign().mag == ONE && !a.sign().sign, 'invalid sign (ONE, false)'); } #[test] #[should_panic] #[available_gas(2000000)] fn test_sign_fail() { let a = FixedTrait::<FP8x23>::new(HALF, true); assert(a.sign().mag != ONE && !a.sign().sign, 'invalid sign (HALF, true)'); } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23/math/erf.cairo
use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE, FP8x23, FixedTrait}; use orion::numbers::fixed_point::implementations::fp8x23::math::lut::erf_lut; const ERF_COMPUTATIONAL_ACCURACY: u32 = 100; const MAX_ERF_COMPUTATIONAL_ACCURACY: u32 = 10; const ROUND_CHECK_NUMBER: u32 = 1; // Values > MAX_ERF_NUMBER return 1 const MAX_ERF_NUMBER: u32 = 29360128; // Values <= ERF_TRUNCATION_NUMBER -> two decimal places, and values > ERF_TRUNCATION_NUMBER -> one decimal place const ERF_TRUNCATION_NUMBER: u32 = 16777216; fn erf(x: FP8x23) -> FP8x23 { // Lookup // 1. if x.mag < 3.5 { lookup table } // 2. else{ return 1} let mut erf_value: u32 = 0; if x.mag < MAX_ERF_NUMBER { erf_value = erf_lut(x.mag); } else { erf_value = ONE; } FP8x23 { mag: erf_value, sign: x.sign } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo
use core::debug::PrintTrait; use orion::numbers::fixed_point::implementations::fp8x23::core::{ HALF, ONE, TWO, FP8x23, FP8x23Impl, FP8x23Add, FP8x23AddEq, FP8x23Sub, FP8x23Mul, FP8x23MulEq, FP8x23TryIntoU128, FP8x23PartialEq, FP8x23PartialOrd, FP8x23SubEq, FP8x23Neg, FP8x23Div, FP8x23IntoFelt252, FixedTrait }; // Calculates hyperbolic cosine of a (fixed point) fn cosh(a: FP8x23) -> FP8x23 { let ea = a.exp(); (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic sine of a (fixed point) fn sinh(a: FP8x23) -> FP8x23 { let ea = a.exp(); (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic tangent of a (fixed point) fn tanh(a: FP8x23) -> FP8x23 { let ea = a.exp(); let ea_i = FixedTrait::ONE() / ea; (ea - ea_i) / (ea + ea_i) } // Calculates inverse hyperbolic cosine of a (fixed point) fn acosh(a: FP8x23) -> FP8x23 { let root = (a * a - FixedTrait::ONE()).sqrt(); (a + root).ln() } // Calculates inverse hyperbolic sine of a (fixed point) fn asinh(a: FP8x23) -> FP8x23 { let root = (a * a + FixedTrait::ONE()).sqrt(); (a + root).ln() } // Calculates inverse hyperbolic tangent of a (fixed point) fn atanh(a: FP8x23) -> FP8x23 { let one = FixedTrait::ONE(); let ln_arg = (one + a) / (one - a); ln_arg.ln() / FixedTrait::new(TWO, false) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use orion::numbers::fixed_point::implementations::fp8x23::helpers::assert_precise; use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF}; #[test] #[available_gas(10000000)] fn test_cosh() { let a = FixedTrait::new(TWO, false); assert_precise(cosh(a), 31559585, 'invalid two', Option::None(())); // 3.762195691016423 let a = FixedTrait::ONE(); assert_precise(cosh(a), 12944299, 'invalid one', Option::None(())); // 1.5430806347841253 let a = FixedTrait::ZERO(); assert_precise(cosh(a), ONE.into(), 'invalid zero', Option::None(())); let a = FixedTrait::ONE(); assert_precise( cosh(a), 12944299, 'invalid neg one', Option::None(()) ); // 1.5430806347841253 let a = FixedTrait::new(TWO, true); assert_precise(cosh(a), 31559602, 'invalid neg two', Option::None(())); // 3.762195691016423 } #[test] #[available_gas(10000000)] fn test_sinh() { let a = FixedTrait::new(TWO, false); assert_precise(sinh(a), 30424310, 'invalid two', Option::None(())); // 3.6268604077773023 let a = FixedTrait::ONE(); assert_precise(sinh(a), 9858302, 'invalid one', Option::None(())); // 1.1752011936029418 let a = FixedTrait::ZERO(); assert(sinh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE, true); assert_precise( sinh(a), -9858302, 'invalid neg one', Option::None(()) ); // -1.1752011936029418 let a = FixedTrait::new(TWO, true); assert_precise( sinh(a), -30424328, 'invalid neg two', Option::None(()) ); // -3.6268604077773023 } #[test] #[available_gas(10000000)] fn test_tanh() { let a = FixedTrait::new(TWO, false); assert_precise(tanh(a), 8086849, 'invalid two', Option::None(())); // 0.9640275800745076 let a = FixedTrait::ONE(); assert_precise(tanh(a), 6388715, 'invalid one', Option::None(())); // 0.7615941559446443 let a = FixedTrait::ZERO(); assert(tanh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE, true); assert_precise( tanh(a), -6388715, 'invalid neg one', Option::None(()) ); // -0.7615941559446443 let a = FixedTrait::new(TWO, true); assert_precise( tanh(a), -8086849, 'invalid neg two', Option::None(()) ); // 0.9640275800745076 } #[test] #[available_gas(10000000)] fn test_acosh() { let a = FixedTrait::new(31559585, false); // 3.762195691016423 assert_precise(acosh(a), 16777257, 'invalid two', Option::None(())); let a = FixedTrait::new(12944299, false); // 1.5430806347841253 assert_precise(acosh(a), ONE.into(), 'invalid one', Option::None(())); let a = FixedTrait::ONE(); // 1 assert(acosh(a).into() == 0, 'invalid zero'); } #[test] #[available_gas(10000000)] fn test_asinh() { let a = FixedTrait::new(30424310, false); // 3.6268604077773023 assert_precise(asinh(a), 16777257, 'invalid two', Option::None(())); let a = FixedTrait::new(9858302, false); // 1.1752011936029418 assert_precise(asinh(a), ONE.into(), 'invalid one', Option::None(())); let a = FixedTrait::ZERO(); assert(asinh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(9858302, true); // -1.1752011936029418 assert_precise(asinh(a), -ONE.into(), 'invalid neg one', Option::None(())); let a = FixedTrait::new(30424310, true); // -3.6268604077773023 assert_precise(asinh(a), -16777238, 'invalid neg two', Option::None(())); } #[test] #[available_gas(10000000)] fn test_atanh() { let a = FixedTrait::new(7549747, false); // 0.9 assert_precise(atanh(a), 12349872, 'invalid 0.9', Option::None(())); // 1.4722194895832204 let a = FixedTrait::new(HALF, false); // 0.5 assert_precise(atanh(a), 4607914, 'invalid half', Option::None(())); // 0.5493061443340548 let a = FixedTrait::ZERO(); assert(atanh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(HALF, true); // 0.5 assert_precise( atanh(a), -4607914, 'invalid neg half', Option::None(()) ); // 0.5493061443340548 let a = FixedTrait::new(7549747, true); // 0.9 assert_precise(atanh(a), -12349872, 'invalid -0.9', Option::None(())); // 1.4722194895832204 } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23/math/lut.cairo
use orion::numbers::fixed_point::implementations::fp8x23::core::ONE; // Calculates the most significant bit fn msb(whole: u32) -> (u32, u32) { if whole < 256 { if whole < 2 { return (0, 1); } if whole < 4 { return (1, 2); } if whole < 8 { return (2, 4); } if whole < 16 { return (3, 8); } if whole < 32 { return (4, 16); } if whole < 64 { return (5, 32); } if whole < 128 { return (6, 64); } if whole < 256 { return (7, 128); } } (8, 256) } fn exp2(exp: u32) -> u32 { if exp <= 16 { if exp == 0 { return 1; } if exp == 1 { return 2; } if exp == 2 { return 4; } if exp == 3 { return 8; } if exp == 4 { return 16; } if exp == 5 { return 32; } if exp == 6 { return 64; } if exp == 7 { return 128; } if exp == 8 { return 256; } if exp == 9 { return 512; } if exp == 10 { return 1024; } if exp == 11 { return 2048; } if exp == 12 { return 4096; } if exp == 13 { return 8192; } if exp == 14 { return 16384; } if exp == 15 { return 32768; } if exp == 16 { return 65536; } } else if exp <= 32 { if exp == 17 { return 131072; } if exp == 18 { return 262144; } if exp == 19 { return 524288; } if exp == 20 { return 1048576; } if exp == 21 { return 2097152; } if exp == 22 { return 4194304; } } 8388608 } fn sin(a: u32) -> (u32, u32, u32) { let slot = a / 51472; if slot < 128 { if slot < 64 { if slot < 32 { if slot < 16 { if slot == 0 { return (0, 0, 51472); } if slot == 1 { return (51472, 51472, 102941); } if slot == 2 { return (102944, 102941, 154407); } if slot == 3 { return (154416, 154407, 205867); } if slot == 4 { return (205887, 205867, 257319); } if slot == 5 { return (257359, 257319, 308761); } if slot == 6 { return (308831, 308761, 360192); } if slot == 7 { return (360303, 360192, 411609); } if slot == 8 { return (411775, 411609, 463011); } if slot == 9 { return (463247, 463011, 514396); } if slot == 10 { return (514723, 514396, 565761); } if slot == 11 { return (566190, 565761, 617104); } if slot == 12 { return (617662, 617104, 668425); } if slot == 13 { return (669134, 668425, 719720); } if slot == 14 { return (720606, 719720, 770988); } if slot == 15 { return (772078, 770988, 822227); } } else { if slot == 16 { return (823550, 822227, 873436); } if slot == 17 { return (875022, 873436, 924611); } if slot == 18 { return (926493, 924611, 975751); } if slot == 19 { return (977965, 975751, 1026855); } if slot == 20 { return (1029437, 1026855, 1077920); } if slot == 21 { return (1080909, 1077920, 1128945); } if slot == 22 { return (1132381, 1128945, 1179927); } if slot == 23 { return (1183853, 1179927, 1230864); } if slot == 24 { return (1235324, 1230864, 1281756); } if slot == 25 { return (1286796, 1281756, 1332599); } if slot == 26 { return (1338268, 1332599, 1383392); } if slot == 27 { return (1389740, 1383392, 1434132); } if slot == 28 { return (1441212, 1434132, 1484819); } if slot == 29 { return (1492684, 1484819, 1535450); } if slot == 30 { return (1544156, 1535450, 1586023); } if slot == 31 { return (1595627, 1586023, 1636536); } } } else { if slot < 48 { if slot == 32 { return (1647099, 1636536, 1686988); } if slot == 33 { return (1698571, 1686988, 1737376); } if slot == 34 { return (1750043, 1737376, 1787699); } if slot == 35 { return (1801515, 1787699, 1837954); } if slot == 36 { return (1852987, 1837954, 1888141); } if slot == 37 { return (1904459, 1888141, 1938256); } if slot == 38 { return (1955930, 1938256, 1988298); } if slot == 39 { return (2007402, 1988298, 2038265); } if slot == 40 { return (2058871, 2038265, 2088156); } if slot == 41 { return (2110346, 2088156, 2137968); } if slot == 42 { return (2161818, 2137968, 2187700); } if slot == 43 { return (2213290, 2187700, 2237349); } if slot == 44 { return (2264762, 2237349, 2286914); } if slot == 45 { return (2316233, 2286914, 2336392); } if slot == 46 { return (2367705, 2336392, 2385783); } if slot == 47 { return (2419177, 2385783, 2435084); } } else { if slot == 48 { return (2470649, 2435084, 2484294); } if slot == 49 { return (2522121, 2484294, 2533410); } if slot == 50 { return (2573593, 2533410, 2582430); } if slot == 51 { return (2625065, 2582430, 2631353); } if slot == 52 { return (2676536, 2631353, 2680177); } if slot == 53 { return (2728008, 2680177, 2728901); } if slot == 54 { return (2779480, 2728901, 2777521); } if slot == 55 { return (2830952, 2777521, 2826037); } if slot == 56 { return (2882424, 2826037, 2874446); } if slot == 57 { return (2933896, 2874446, 2922748); } if slot == 58 { return (2985368, 2922748, 2970939); } if slot == 59 { return (3036839, 2970939, 3019018); } if slot == 60 { return (3088311, 3019018, 3066984); } if slot == 61 { return (3139783, 3066984, 3114834); } if slot == 62 { return (3191255, 3114834, 3162567); } if slot == 63 { return (3242727, 3162567, 3210181); } } } } else { if slot < 96 { if slot < 80 { if slot == 64 { return (3294199, 3210181, 3257674); } if slot == 65 { return (3345671, 3257674, 3305045); } if slot == 66 { return (3397142, 3305045, 3352291); } if slot == 67 { return (3448614, 3352291, 3399411); } if slot == 68 { return (3500086, 3399411, 3446402); } if slot == 69 { return (3551558, 3446402, 3493264); } if slot == 70 { return (3603030, 3493264, 3539995); } if slot == 71 { return (3654502, 3539995, 3586592); } if slot == 72 { return (3705973, 3586592, 3633054); } if slot == 73 { return (3757445, 3633054, 3679380); } if slot == 74 { return (3808917, 3679380, 3725567); } if slot == 75 { return (3860389, 3725567, 3771613); } if slot == 76 { return (3911861, 3771613, 3817518); } if slot == 77 { return (3963333, 3817518, 3863279); } if slot == 78 { return (4014805, 3863279, 3908894); } if slot == 79 { return (4066276, 3908894, 3954362); } } else { if slot == 80 { return (4117751, 3954362, 3999682); } if slot == 81 { return (4169220, 3999682, 4044851); } if slot == 82 { return (4220692, 4044851, 4089867); } if slot == 83 { return (4272164, 4089867, 4134730); } if slot == 84 { return (4323636, 4134730, 4179437); } if slot == 85 { return (4375108, 4179437, 4223986); } if slot == 86 { return (4426579, 4223986, 4268377); } if slot == 87 { return (4478051, 4268377, 4312606); } if slot == 88 { return (4529523, 4312606, 4356674); } if slot == 89 { return (4580995, 4356674, 4400577); } if slot == 90 { return (4632474, 4400577, 4444315); } if slot == 91 { return (4683939, 4444315, 4487885); } if slot == 92 { return (4735411, 4487885, 4531287); } if slot == 93 { return (4786882, 4531287, 4574518); } if slot == 94 { return (4838354, 4574518, 4617576); } if slot == 95 { return (4889826, 4617576, 4660461); } } } else { if slot < 112 { if slot == 96 { return (4941298, 4660461, 4703170); } if slot == 97 { return (4992770, 4703170, 4745702); } if slot == 98 { return (5044242, 4745702, 4788056); } if slot == 99 { return (5095714, 4788056, 4830229); } if slot == 100 { return (5147227, 4830229, 4872221); } if slot == 101 { return (5198657, 4872221, 4914029); } if slot == 102 { return (5250129, 4914029, 4955652); } if slot == 103 { return (5301601, 4955652, 4997088); } if slot == 104 { return (5353073, 4997088, 5038336); } if slot == 105 { return (5404545, 5038336, 5079395); } if slot == 106 { return (5456017, 5079395, 5120262); } if slot == 107 { return (5507488, 5120262, 5160937); } if slot == 108 { return (5558960, 5160937, 5201417); } if slot == 109 { return (5610432, 5201417, 5241701); } if slot == 110 { return (5661904, 5241701, 5281788); } if slot == 111 { return (5713376, 5281788, 5321677); } } else { if slot == 112 { return (5764848, 5321677, 5361364); } if slot == 113 { return (5816320, 5361364, 5400850); } if slot == 114 { return (5867791, 5400850, 5440133); } if slot == 115 { return (5919263, 5440133, 5479211); } if slot == 116 { return (5970735, 5479211, 5518082); } if slot == 117 { return (6022207, 5518082, 5556746); } if slot == 118 { return (6073679, 5556746, 5595201); } if slot == 119 { return (6125151, 5595201, 5633445); } if slot == 120 { return (6176622, 5633445, 5671477); } if slot == 121 { return (6228094, 5671477, 5709295); } if slot == 122 { return (6279566, 5709295, 5746898); } if slot == 123 { return (6331038, 5746898, 5784285); } if slot == 124 { return (6382510, 5784285, 5821455); } if slot == 125 { return (6433982, 5821455, 5858405); } if slot == 126 { return (6485454, 5858405, 5895134); } if slot == 127 { return (6536925, 5895134, 5931642); } } } } } else { if slot < 192 { if slot < 160 { if slot < 144 { if slot == 128 { return (6588397, 5931642, 5967926); } if slot == 129 { return (6639869, 5967926, 6003985); } if slot == 130 { return (6691345, 6003985, 6039819); } if slot == 131 { return (6742813, 6039819, 6075425); } if slot == 132 { return (6794285, 6075425, 6110802); } if slot == 133 { return (6845757, 6110802, 6145949); } if slot == 134 { return (6897228, 6145949, 6180865); } if slot == 135 { return (6948700, 6180865, 6215549); } if slot == 136 { return (7000172, 6215549, 6249998); } if slot == 137 { return (7051644, 6249998, 6284212); } if slot == 138 { return (7103116, 6284212, 6318189); } if slot == 139 { return (7154588, 6318189, 6351928); } if slot == 140 { return (7206060, 6351928, 6385428); } if slot == 141 { return (7257531, 6385428, 6418688); } if slot == 142 { return (7309003, 6418688, 6451706); } if slot == 143 { return (7360475, 6451706, 6484482); } } else { if slot == 144 { return (7411947, 6484482, 6517013); } if slot == 145 { return (7463419, 6517013, 6549299); } if slot == 146 { return (7514891, 6549299, 6581338); } if slot == 147 { return (7566363, 6581338, 6613129); } if slot == 148 { return (7617834, 6613129, 6644672); } if slot == 149 { return (7669306, 6644672, 6675964); } if slot == 150 { return (7720780, 6675964, 6707005); } if slot == 151 { return (7772250, 6707005, 6737793); } if slot == 152 { return (7823722, 6737793, 6768328); } if slot == 153 { return (7875194, 6768328, 6798608); } if slot == 154 { return (7926666, 6798608, 6828632); } if slot == 155 { return (7978137, 6828632, 6858399); } if slot == 156 { return (8029609, 6858399, 6887907); } if slot == 157 { return (8081081, 6887907, 6917156); } if slot == 158 { return (8132553, 6917156, 6946145); } if slot == 159 { return (8184025, 6946145, 6974873); } if slot == 160 { return (8235503, 6974873, 7003337); } } } else { if slot < 176 { if slot == 161 { return (8286968, 7003337, 7031538); } if slot == 162 { return (8338440, 7031538, 7059475); } if slot == 163 { return (8389912, 7059475, 7087145); } if slot == 164 { return (8441384, 7087145, 7114549); } if slot == 165 { return (8492856, 7114549, 7141685); } if slot == 166 { return (8544328, 7141685, 7168552); } if slot == 167 { return (8595800, 7168552, 7195149); } if slot == 168 { return (8647271, 7195149, 7221475); } if slot == 169 { return (8698743, 7221475, 7247530); } if slot == 170 { return (8750215, 7247530, 7273311); } if slot == 171 { return (8801687, 7273311, 7298819); } if slot == 172 { return (8853159, 7298819, 7324052); } if slot == 173 { return (8904631, 7324052, 7349009); } if slot == 174 { return (8956103, 7349009, 7373689); } if slot == 175 { return (9007574, 7373689, 7398092); } } else { if slot == 176 { return (9059046, 7398092, 7422216); } if slot == 177 { return (9110518, 7422216, 7446061); } if slot == 178 { return (9161990, 7446061, 7469625); } if slot == 179 { return (9213462, 7469625, 7492909); } if slot == 180 { return (9264934, 7492909, 7515910); } if slot == 181 { return (9316406, 7515910, 7538628); } if slot == 182 { return (9367877, 7538628, 7561062); } if slot == 183 { return (9419349, 7561062, 7583212); } if slot == 184 { return (9470821, 7583212, 7605076); } if slot == 185 { return (9522293, 7605076, 7626654); } if slot == 186 { return (9573765, 7626654, 7647945); } if slot == 187 { return (9625237, 7647945, 7668947); } if slot == 188 { return (9676709, 7668947, 7689661); } if slot == 189 { return (9728180, 7689661, 7710086); } if slot == 190 { return (9779651, 7710086, 7730220); } if slot == 191 { return (9831124, 7730220, 7750063); } } } } else { if slot < 224 { if slot < 208 { if slot == 192 { return (9882596, 7750063, 7769615); } if slot == 193 { return (9934068, 7769615, 7788874); } if slot == 194 { return (9985540, 7788874, 7807839); } if slot == 195 { return (10037012, 7807839, 7826511); } if slot == 196 { return (10088483, 7826511, 7844888); } if slot == 197 { return (10139955, 7844888, 7862970); } if slot == 198 { return (10191427, 7862970, 7880755); } if slot == 199 { return (10242899, 7880755, 7898244); } if slot == 200 { return (10294373, 7898244, 7915436); } if slot == 201 { return (10345843, 7915436, 7932329); } if slot == 202 { return (10397315, 7932329, 7948924); } if slot == 203 { return (10448786, 7948924, 7965220); } if slot == 204 { return (10500258, 7965220, 7981215); } if slot == 205 { return (10551730, 7981215, 7996911); } if slot == 206 { return (10603202, 7996911, 8012305); } if slot == 207 { return (10654674, 8012305, 8027397); } } else { if slot == 208 { return (10706146, 8027397, 8042188); } if slot == 209 { return (10757617, 8042188, 8056675); } if slot == 210 { return (10809089, 8056675, 8070859); } if slot == 211 { return (10860561, 8070859, 8084740); } if slot == 212 { return (10912033, 8084740, 8098316); } if slot == 213 { return (10963505, 8098316, 8111587); } if slot == 214 { return (11014977, 8111587, 8124552); } if slot == 215 { return (11066449, 8124552, 8137212); } if slot == 216 { return (11117920, 8137212, 8149565); } if slot == 217 { return (11169392, 8149565, 8161612); } if slot == 218 { return (11220864, 8161612, 8173351); } if slot == 219 { return (11272336, 8173351, 8184783); } if slot == 220 { return (11323808, 8184783, 8195906); } if slot == 221 { return (11375280, 8195906, 8206721); } if slot == 222 { return (11426752, 8206721, 8217227); } if slot == 223 { return (11478223, 8217227, 8227423); } } } else { if slot < 240 { if slot == 224 { return (11529695, 8227423, 8237310); } if slot == 225 { return (11581167, 8237310, 8246887); } if slot == 226 { return (11632639, 8246887, 8256153); } if slot == 227 { return (11684111, 8256153, 8265108); } if slot == 228 { return (11735583, 8265108, 8273752); } if slot == 229 { return (11787055, 8273752, 8282085); } if slot == 230 { return (11838531, 8282085, 8290105); } if slot == 231 { return (11889998, 8290105, 8297814); } if slot == 232 { return (11941470, 8297814, 8305210); } if slot == 233 { return (11992942, 8305210, 8312294); } if slot == 234 { return (12044414, 8312294, 8319064); } if slot == 235 { return (12095886, 8319064, 8325522); } if slot == 236 { return (12147358, 8325522, 8331666); } if slot == 237 { return (12198829, 8331666, 8337496); } if slot == 238 { return (12250301, 8337496, 8343012); } if slot == 239 { return (12301773, 8343012, 8348215); } } else { if slot == 240 { return (12353244, 8348215, 8353102); } if slot == 241 { return (12404717, 8353102, 8357676); } if slot == 242 { return (12456189, 8357676, 8361935); } if slot == 243 { return (12507661, 8361935, 8365879); } if slot == 244 { return (12559132, 8365879, 8369508); } if slot == 245 { return (12610604, 8369508, 8372822); } if slot == 246 { return (12662076, 8372822, 8375820); } if slot == 247 { return (12713548, 8375820, 8378504); } if slot == 248 { return (12765020, 8378504, 8380871); } if slot == 249 { return (12816492, 8380871, 8382924); } if slot == 250 { return (12867964, 8382924, 8384660); } if slot == 251 { return (12919435, 8384660, 8386082); } if slot == 252 { return (12970907, 8386082, 8387187); } if slot == 253 { return (13022379, 8387187, 8387976); } if slot == 254 { return (13073851, 8387976, 8388450); } } } } } (13125323, 8388450, 8388608) } fn atan(a: u32) -> (u32, u32, u32) { let slot = a / 58720; if slot == 0 { return (0, 0, 58719); } if slot == 1 { return (58720, 58719, 117433); } if slot == 2 { return (117441, 117433, 176135); } if slot == 3 { return (176161, 176135, 234820); } if slot == 4 { return (234881, 234820, 293481); } if slot == 5 { return (293601, 293481, 352115); } if slot == 6 { return (352322, 352115, 410713); } if slot == 7 { return (411042, 410713, 469272); } if slot == 8 { return (469762, 469272, 527785); } if slot == 9 { return (528482, 527785, 586246); } if slot == 10 { return (587201, 586246, 644651); } if slot == 11 { return (645923, 644651, 702993); } if slot == 12 { return (704643, 702993, 761267); } if slot == 13 { return (763363, 761267, 819467); } if slot == 14 { return (822084, 819467, 877588); } if slot == 15 { return (880804, 877588, 935625); } if slot == 16 { return (939524, 935625, 993572); } if slot == 17 { return (998244, 993572, 1051424); } if slot == 18 { return (1056965, 1051424, 1109175); } if slot == 19 { return (1115685, 1109175, 1166821); } if slot == 20 { return (1174411, 1166821, 1224357); } if slot == 21 { return (1233125, 1224357, 1281776); } if slot == 22 { return (1291846, 1281776, 1339075); } if slot == 23 { return (1350566, 1339075, 1396248); } if slot == 24 { return (1409286, 1396248, 1453290); } if slot == 25 { return (1468006, 1453290, 1510197); } if slot == 26 { return (1526727, 1510197, 1566964); } if slot == 27 { return (1585447, 1566964, 1623585); } if slot == 28 { return (1644167, 1623585, 1680058); } if slot == 29 { return (1702887, 1680058, 1736376); } if slot == 30 { return (1761612, 1736376, 1792537); } if slot == 31 { return (1820328, 1792537, 1848534); } if slot == 32 { return (1879048, 1848534, 1904364); } if slot == 33 { return (1937768, 1904364, 1960024); } if slot == 34 { return (1996489, 1960024, 2015508); } if slot == 35 { return (2055209, 2015508, 2070813); } if slot == 36 { return (2113929, 2070813, 2125935); } if slot == 37 { return (2172649, 2125935, 2180869); } if slot == 38 { return (2231370, 2180869, 2235613); } if slot == 39 { return (2290090, 2235613, 2290163); } if slot == 40 { return (2348813, 2290163, 2344515); } if slot == 41 { return (2407530, 2344515, 2398665); } if slot == 42 { return (2466251, 2398665, 2452611); } if slot == 43 { return (2524971, 2452611, 2506348); } if slot == 44 { return (2583691, 2506348, 2559875); } if slot == 45 { return (2642412, 2559875, 2613187); } if slot == 46 { return (2701132, 2613187, 2666281); } if slot == 47 { return (2759852, 2666281, 2719156); } if slot == 48 { return (2818572, 2719156, 2771807); } if slot == 49 { return (2877293, 2771807, 2824233); } if slot == 50 { return (2936014, 2824233, 2876431); } if slot == 51 { return (2994733, 2876431, 2928397); } if slot == 52 { return (3053453, 2928397, 2980130); } if slot == 53 { return (3112174, 2980130, 3031628); } if slot == 54 { return (3170894, 3031628, 3082888); } if slot == 55 { return (3229614, 3082888, 3133907); } if slot == 56 { return (3288334, 3133907, 3184685); } if slot == 57 { return (3347055, 3184685, 3235218); } if slot == 58 { return (3405775, 3235218, 3285506); } if slot == 59 { return (3464495, 3285506, 3335545); } if slot == 60 { return (3523224, 3335545, 3385336); } if slot == 61 { return (3581936, 3385336, 3434875); } if slot == 62 { return (3640656, 3434875, 3484161); } if slot == 63 { return (3699376, 3484161, 3533193); } if slot == 64 { return (3758096, 3533193, 3581970); } if slot == 65 { return (3816817, 3581970, 3630491); } if slot == 66 { return (3875537, 3630491, 3678753); } if slot == 67 { return (3934257, 3678753, 3726756); } if slot == 68 { return (3992977, 3726756, 3774499); } if slot == 69 { return (4051698, 3774499, 3821981); } if slot == 70 { return (4110418, 3821981, 3869201); } if slot == 71 { return (4169138, 3869201, 3916159); } if slot == 72 { return (4227858, 3916159, 3962853); } if slot == 73 { return (4286579, 3962853, 4009282); } if slot == 74 { return (4345299, 4009282, 4055447); } if slot == 75 { return (4404019, 4055447, 4101347); } if slot == 76 { return (4462739, 4101347, 4146981); } if slot == 77 { return (4521460, 4146981, 4192350); } if slot == 78 { return (4580180, 4192350, 4237451); } if slot == 79 { return (4638900, 4237451, 4282286); } if slot == 80 { return (4697620, 4282286, 4326855); } if slot == 81 { return (4756341, 4326855, 4371156); } if slot == 82 { return (4815061, 4371156, 4415191); } if slot == 83 { return (4873781, 4415191, 4458958); } if slot == 84 { return (4932502, 4458958, 4502459); } if slot == 85 { return (4991222, 4502459, 4545693); } if slot == 86 { return (5049942, 4545693, 4588660); } if slot == 87 { return (5108662, 4588660, 4631361); } if slot == 88 { return (5167383, 4631361, 4673795); } if slot == 89 { return (5226103, 4673795, 4715964); } if slot == 90 { return (5284823, 4715964, 4757868); } if slot == 91 { return (5343543, 4757868, 4799506); } if slot == 92 { return (5402264, 4799506, 4840880); } if slot == 93 { return (5460984, 4840880, 4881990); } if slot == 94 { return (5519704, 4881990, 4922837); } if slot == 95 { return (5578424, 4922837, 4963420); } if slot == 96 { return (5637145, 4963420, 5003742); } if slot == 97 { return (5695865, 5003742, 5043802); } if slot == 98 { return (5754585, 5043802, 5083601); } (5813305, 5083601, 5123141) } fn erf_lut(x: u32) -> u32 { // Construct the erf lookup table if x <= 754974 { if x <= 0 { return 0; } if x <= 83886 { return 94652; } if x <= 167772 { return 189285; } if x <= 251658 { return 283880; } if x <= 335544 { return 378419; } if x <= 419430 { return 472882; } if x <= 503316 { return 567251; } if x <= 587202 { return 661506; } if x <= 671088 { return 755630; } if x <= 754974 { return 849603; } } if x <= 1593835 { if x <= 838860 { return 943407; } if x <= 922746 { return 1037024; } if x <= 1006632 { return 1130434; } if x <= 1090519 { return 1223622; } if x <= 1174405 { return 1316567; } if x <= 1258291 { return 1409252; } if x <= 1342177 { return 1501659; } if x <= 1426063 { return 1593772; } if x <= 1509949 { return 1685571; } if x <= 1593835 { return 1777041; } } if x <= 2432696 { if x <= 1677721 { return 1868164; } if x <= 1761607 { return 1958923; } if x <= 1845493 { return 2049302; } if x <= 1929379 { return 2139284; } if x <= 2013265 { return 2228853; } if x <= 2097152 { return 2317993; } if x <= 2181038 { return 2406689; } if x <= 2264924 { return 2494924; } if x <= 2348810 { return 2582685; } if x <= 2432696 { return 2669955; } } if x <= 3271557 { if x <= 2516582 { return 2756721; } if x <= 2600468 { return 2842967; } if x <= 2684354 { return 2928681; } if x <= 2768240 { return 3013847; } if x <= 2852126 { return 3098454; } if x <= 2936012 { return 3182487; } if x <= 3019898 { return 3265934; } if x <= 3103784 { return 3348782; } if x <= 3187671 { return 3431019; } if x <= 3271557 { return 3512634; } } if x <= 4110417 { if x <= 3355443 { return 3593615; } if x <= 3439329 { return 3673951; } if x <= 3523215 { return 3753630; } if x <= 3607101 { return 3832643; } if x <= 3690987 { return 3910979; } if x <= 3774873 { return 3988629; } if x <= 3858759 { return 4065584; } if x <= 3942645 { return 4141833; } if x <= 4026531 { return 4217369; } if x <= 4110417 { return 4292184; } } if x <= 4949278 { if x <= 4194304 { return 4366269; } if x <= 4278190 { return 4439617; } if x <= 4362076 { return 4512220; } if x <= 4445962 { return 4584073; } if x <= 4529848 { return 4655167; } if x <= 4613734 { return 4725498; } if x <= 4697620 { return 4795060; } if x <= 4781506 { return 4863847; } if x <= 4865392 { return 4931854; } if x <= 4949278 { return 4999077; } } if x <= 5788139 { if x <= 5033164 { return 5065512; } if x <= 5117050 { return 5131153; } if x <= 5200936 { return 5195999; } if x <= 5284823 { return 5260046; } if x <= 5368709 { return 5323291; } if x <= 5452595 { return 5385732; } if x <= 5536481 { return 5447366; } if x <= 5620367 { return 5508192; } if x <= 5704253 { return 5568208; } if x <= 5788139 { return 5627414; } } if x <= 6627000 { if x <= 5872025 { return 5685808; } if x <= 5955911 { return 5743390; } if x <= 6039797 { return 5800161; } if x <= 6123683 { return 5856120; } if x <= 6207569 { return 5911268; } if x <= 6291456 { return 5965605; } if x <= 6375342 { return 6019134; } if x <= 6459228 { return 6071855; } if x <= 6543114 { return 6123771; } if x <= 6627000 { return 6174883; } } if x <= 7465861 { if x <= 6710886 { return 6225194; } if x <= 6794772 { return 6274706; } if x <= 6878658 { return 6323422; } if x <= 6962544 { return 6371347; } if x <= 7046430 { return 6418482; } if x <= 7130316 { return 6464832; } if x <= 7214202 { return 6510400; } if x <= 7298088 { return 6555192; } if x <= 7381975 { return 6599211; } if x <= 7465861 { return 6642462; } } if x <= 8304721 { if x <= 7549747 { return 6684950; } if x <= 7633633 { return 6726680; } if x <= 7717519 { return 6767658; } if x <= 7801405 { return 6807888; } if x <= 7885291 { return 6847377; } if x <= 7969177 { return 6886131; } if x <= 8053063 { return 6924155; } if x <= 8136949 { return 6961456; } if x <= 8220835 { return 6998041; } if x <= 8304721 { return 7033915; } } if x <= 9143582 { if x <= 8388608 { return 7069086; } if x <= 8472494 { return 7103561; } if x <= 8556380 { return 7137346; } if x <= 8640266 { return 7170449; } if x <= 8724152 { return 7202877; } if x <= 8808038 { return 7234638; } if x <= 8891924 { return 7265739; } if x <= 8975810 { return 7296187; } if x <= 9059696 { return 7325990; } if x <= 9143582 { return 7355157; } } if x <= 9982443 { if x <= 9227468 { return 7383695; } if x <= 9311354 { return 7411612; } if x <= 9395240 { return 7438915; } if x <= 9479127 { return 7465615; } if x <= 9563013 { return 7491717; } if x <= 9646899 { return 7517231; } if x <= 9730785 { return 7542165; } if x <= 9814671 { return 7566527; } if x <= 9898557 { return 7590326; } if x <= 9982443 { return 7613570; } } if x <= 10821304 { if x <= 10066329 { return 7636267; } if x <= 10150215 { return 7658425; } if x <= 10234101 { return 7680054; } if x <= 10317987 { return 7701162; } if x <= 10401873 { return 7721757; } if x <= 10485760 { return 7741847; } if x <= 10569646 { return 7761441; } if x <= 10653532 { return 7780548; } if x <= 10737418 { return 7799175; } if x <= 10821304 { return 7817332; } } if x <= 11660165 { if x <= 10905190 { return 7835026; } if x <= 10989076 { return 7852266; } if x <= 11072962 { return 7869060; } if x <= 11156848 { return 7885417; } if x <= 11240734 { return 7901344; } if x <= 11324620 { return 7916851; } if x <= 11408506 { return 7931944; } if x <= 11492392 { return 7946632; } if x <= 11576279 { return 7960923; } if x <= 11660165 { return 7974825; } } if x <= 12499025 { if x <= 11744051 { return 7988346; } if x <= 11827937 { return 8001494; } if x <= 11911823 { return 8014276; } if x <= 11995709 { return 8026700; } if x <= 12079595 { return 8038774; } if x <= 12163481 { return 8050505; } if x <= 12247367 { return 8061901; } if x <= 12331253 { return 8072969; } if x <= 12415139 { return 8083716; } if x <= 12499025 { return 8094149; } } if x <= 13337886 { if x <= 12582912 { return 8104277; } if x <= 12666798 { return 8114105; } if x <= 12750684 { return 8123641; } if x <= 12834570 { return 8132891; } if x <= 12918456 { return 8141862; } if x <= 13002342 { return 8150562; } if x <= 13086228 { return 8158996; } if x <= 13170114 { return 8167170; } if x <= 13254000 { return 8175092; } if x <= 13337886 { return 8182768; } } if x <= 14176747 { if x <= 13421772 { return 8190203; } if x <= 13505658 { return 8197405; } if x <= 13589544 { return 8204378; } if x <= 13673431 { return 8211128; } if x <= 13757317 { return 8217663; } if x <= 13841203 { return 8223986; } if x <= 13925089 { return 8230104; } if x <= 14008975 { return 8236022; } if x <= 14092861 { return 8241746; } if x <= 14176747 { return 8247281; } } if x <= 15015608 { if x <= 14260633 { return 8252632; } if x <= 14344519 { return 8257804; } if x <= 14428405 { return 8262802; } if x <= 14512291 { return 8267631; } if x <= 14596177 { return 8272296; } if x <= 14680064 { return 8276801; } if x <= 14763950 { return 8281152; } if x <= 14847836 { return 8285352; } if x <= 14931722 { return 8289405; } if x <= 15015608 { return 8293318; } } if x <= 15854469 { if x <= 15099494 { return 8297092; } if x <= 15183380 { return 8300733; } if x <= 15267266 { return 8304245; } if x <= 15351152 { return 8307631; } if x <= 15435038 { return 8310895; } if x <= 15518924 { return 8314041; } if x <= 15602810 { return 8317074; } if x <= 15686696 { return 8319995; } if x <= 15770583 { return 8322809; } if x <= 15854469 { return 8325519; } } if x <= 16693329 { if x <= 15938355 { return 8328129; } if x <= 16022241 { return 8330642; } if x <= 16106127 { return 8333060; } if x <= 16190013 { return 8335387; } if x <= 16273899 { return 8337626; } if x <= 16357785 { return 8339780; } if x <= 16441671 { return 8341852; } if x <= 16525557 { return 8343844; } if x <= 16609443 { return 8345758; } if x <= 16693329 { return 8347599; } } if x <= 28521267 { if x <= 16777216 { return 8349368; } if x <= 17616076 { return 8363614; } if x <= 18454937 { return 8372981; } if x <= 19293798 { return 8379018; } if x <= 20132659 { return 8382832; } if x <= 20971520 { return 8385194; } if x <= 21810380 { return 8386627; } if x <= 22649241 { return 8387481; } if x <= 23488102 { return 8387978; } if x <= 24326963 { return 8388263; } if x <= 25165824 { return 8388422; } if x <= 26004684 { return 8388510; } if x <= 26843545 { return 8388557; } if x <= 27682406 { return 8388582; } if x <= 28521267 { return 8388595; } } ONE }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo
use core::integer; use orion::numbers::fixed_point::implementations::fp8x23::math::lut; use orion::numbers::fixed_point::implementations::fp8x23::core::{ HALF, ONE, TWO, FP8x23, FP8x23Impl, FP8x23Add, FP8x23Sub, FP8x23Mul, FP8x23Div, FP8x23IntoFelt252, FixedTrait }; // CONSTANTS const TWO_PI: u32 = 52707178; const PI: u32 = 26353589; const HALF_PI: u32 = 13176795; // PUBLIC // Calculates arccos(a) for -1 <= a <= 1 (fixed point) // arccos(a) = arcsin(sqrt(1 - a^2)) - arctan identity has discontinuity at zero fn acos(a: FP8x23) -> FP8x23 { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin(asin_arg); if (a.sign) { FixedTrait::new(PI, false) - asin_res } else { asin_res } } fn acos_fast(a: FP8x23) -> FP8x23 { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin_fast(asin_arg); if (a.sign) { FixedTrait::new(PI, false) - asin_res } else { asin_res } } // Calculates arcsin(a) for -1 <= a <= 1 (fixed point) // arcsin(a) = arctan(a / sqrt(1 - a^2)) fn asin(a: FP8x23) -> FP8x23 { if (a.mag == ONE) { return FixedTrait::new(HALF_PI, a.sign); } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 atan(a / div) } fn asin_fast(a: FP8x23) -> FP8x23 { if (a.mag == ONE) { return FixedTrait::new(HALF_PI, a.sign); } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 atan_fast(a / div) } // Calculates arctan(a) (fixed point) // See https://stackoverflow.com/a/50894477 for range adjustments fn atan(a: FP8x23) -> FP8x23 { let mut at = a.abs(); let mut shift = false; let mut invert = false; // Invert value when a > 1 if (at.mag > ONE) { at = FixedTrait::ONE() / at; invert = true; } // Account for lack of precision in polynomaial when a > 0.7 if (at.mag > 5872026) { let sqrt3_3 = FixedTrait::new(4843165, false); // sqrt(3) / 3 at = (at - sqrt3_3) / (FixedTrait::ONE() + at * sqrt3_3); shift = true; } let r10 = FixedTrait::new(15363, true) * at; let r9 = (r10 + FixedTrait::new(392482, true)) * at; let r8 = (r9 + FixedTrait::new(1629064, false)) * at; let r7 = (r8 + FixedTrait::new(2197820, true)) * at; let r6 = (r7 + FixedTrait::new(366693, false)) * at; let r5 = (r6 + FixedTrait::new(1594324, false)) * at; let r4 = (r5 + FixedTrait::new(11519, false)) * at; let r3 = (r4 + FixedTrait::new(2797104, true)) * at; let r2 = (r3 + FixedTrait::new(34, false)) * at; let mut res = (r2 + FixedTrait::new(8388608, false)) * at; // Adjust for sign change, inversion, and shift if (shift) { res = res + FixedTrait::new(4392265, false); // pi / 6 } if (invert) { res = res - FixedTrait::new(HALF_PI, false); } FixedTrait::new(res.mag, a.sign) } fn atan_fast(a: FP8x23) -> FP8x23 { let mut at = a.abs(); let mut shift = false; let mut invert = false; // Invert value when a > 1 if (at.mag > ONE) { at = FixedTrait::ONE() / at; invert = true; } // Account for lack of precision in polynomaial when a > 0.7 if (at.mag > 5872026) { let sqrt3_3 = FixedTrait::new(4843165, false); // sqrt(3) / 3 at = (at - sqrt3_3) / (FixedTrait::ONE() + at * sqrt3_3); shift = true; } let (start, low, high) = lut::atan(at.mag); let partial_step = FixedTrait::new(at.mag - start, false) / FixedTrait::new(58720, false); let mut res = partial_step * FixedTrait::new(high - low, false) + FixedTrait::new(low, false); // Adjust for sign change, inversion, and shift if (shift) { res = res + FixedTrait::new(4392265, false); // pi / 6 } if (invert) { res = res - FixedTrait::<FP8x23>::new(HALF_PI, false); } FixedTrait::new(res.mag, a.sign) } // Calculates cos(a) with a in radians (fixed point) fn cos(a: FP8x23) -> FP8x23 { sin(FixedTrait::new(HALF_PI, false) - a) } fn cos_fast(a: FP8x23) -> FP8x23 { sin_fast(FixedTrait::new(HALF_PI, false) - a) } fn sin(a: FP8x23) -> FP8x23 { let a1 = a.mag % TWO_PI; let (whole_rem, partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI)); let a2 = FixedTrait::new(partial_rem, false); let partial_sign = whole_rem == 1; let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE()); FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0) } fn sin_fast(a: FP8x23) -> FP8x23 { let a1 = a.mag % TWO_PI; let (whole_rem, mut partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI)); let partial_sign = whole_rem == 1; if partial_rem >= HALF_PI { partial_rem = PI - partial_rem; } let (start, low, high) = lut::sin(partial_rem); let partial_step = FixedTrait::new(partial_rem - start, false) / FixedTrait::new(51472, false); let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false)) + FixedTrait::<FP8x23>::new(low, false); FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0) } // Calculates tan(a) with a in radians (fixed point) fn tan(a: FP8x23) -> FP8x23 { let sinx = sin(a); let cosx = cos(a); assert(cosx.mag != 0, 'tan undefined'); sinx / cosx } fn tan_fast(a: FP8x23) -> FP8x23 { let sinx = sin_fast(a); let cosx = cos_fast(a); assert(cosx.mag != 0, 'tan undefined'); sinx / cosx } // Helper function to calculate Taylor series for sin fn _sin_loop(a: FP8x23, i: u32, acc: FP8x23) -> FP8x23 { let div = (2 * i + 2) * (2 * i + 3); let term = a * a * acc / FixedTrait::new_unscaled(div, false); let new_acc = FixedTrait::ONE() - term; if (i == 0) { return new_acc; } _sin_loop(a, i - 1, new_acc) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use orion::numbers::fixed_point::implementations::fp8x23::helpers::{ assert_precise, assert_relative }; use super::{ FixedTrait, acos, HALF_PI, ONE, acos_fast, PI, atan_fast, atan, asin, cos, cos_fast, sin, sin_fast, tan }; #[test] #[available_gas(3000000)] fn test_acos() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::ONE(); assert(acos(a).into() == 0, 'invalid one'); let a = FixedTrait::new(ONE / 2, false); assert_relative(acos(a), 8784530, 'invalid half', error); // 1.0471975506263043 let a = FixedTrait::ZERO(); assert_relative(acos(a), HALF_PI.into(), 'invalid zero', Option::None(())); // PI / 2 let a = FixedTrait::new(ONE / 2, true); assert_relative(acos(a), 17569060, 'invalid neg half', error); // 2.094395102963489 let a = FixedTrait::new(ONE, true); assert_relative(acos(a), PI.into(), 'invalid neg one', Option::None(())); // PI } #[test] #[available_gas(3000000)] fn test_acos_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::ONE(); assert(acos_fast(a).into() == 0, 'invalid one'); let a = FixedTrait::new(ONE / 2, false); assert_relative(acos_fast(a), 8784530, 'invalid half', error); // 1.0471975506263043 let a = FixedTrait::ZERO(); assert_relative(acos_fast(a), HALF_PI.into(), 'invalid zero', Option::None(())); // PI / 2 let a = FixedTrait::new(ONE / 2, true); assert_relative(acos_fast(a), 17569060, 'invalid neg half', error); // 2.094395102963489 let a = FixedTrait::new(ONE, true); assert_relative(acos_fast(a), PI.into(), 'invalid neg one', Option::None(())); // PI } #[test] #[should_panic] #[available_gas(1000000)] fn test_acos_fail() { let a = FixedTrait::new(2 * ONE, true); acos(a); } #[test] #[available_gas(1400000)] fn test_atan_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::new(2 * ONE, false); assert_relative(atan_fast(a), 9287437, 'invalid two', error); let a = FixedTrait::ONE(); assert_relative(atan_fast(a), 6588397, 'invalid one', error); let a = FixedTrait::new(ONE / 2, false); assert_relative(atan_fast(a), 3889358, 'invalid half', error); let a = FixedTrait::ZERO(); assert(atan_fast(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE / 2, true); assert_relative(atan_fast(a), -3889358, 'invalid neg half', error); let a = FixedTrait::new(ONE, true); assert_relative(atan_fast(a), -6588397, 'invalid neg one', error); let a = FixedTrait::new(2 * ONE, true); assert_relative(atan_fast(a), -9287437, 'invalid neg two', error); } #[test] #[available_gas(2600000)] fn test_atan() { let a = FixedTrait::new(2 * ONE, false); assert_relative(atan(a), 9287437, 'invalid two', Option::None(())); let a = FixedTrait::ONE(); assert_relative(atan(a), 6588397, 'invalid one', Option::None(())); let a = FixedTrait::new(ONE / 2, false); assert_relative(atan(a), 3889358, 'invalid half', Option::None(())); let a = FixedTrait::ZERO(); assert(atan(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE / 2, true); assert_relative(atan(a), -3889358, 'invalid neg half', Option::None(())); let a = FixedTrait::new(ONE, true); assert_relative(atan(a), -6588397, 'invalid neg one', Option::None(())); let a = FixedTrait::new(2 * ONE, true); assert_relative(atan(a), -9287437, 'invalid neg two', Option::None(())); } #[test] #[available_gas(3000000)] fn test_asin() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::ONE(); assert_relative(asin(a), HALF_PI.into(), 'invalid one', Option::None(())); // PI / 2 let a = FixedTrait::new(ONE / 2, false); assert_relative(asin(a), 4392265, 'invalid half', error); let a = FixedTrait::ZERO(); assert_precise(asin(a), 0, 'invalid zero', Option::None(())); let a = FixedTrait::new(ONE / 2, true); assert_relative(asin(a), -4392265, 'invalid neg half', error); let a = FixedTrait::new(ONE, true); assert_relative(asin(a), -HALF_PI.into(), 'invalid neg one', Option::None(())); // -PI / 2 } #[test] #[should_panic] #[available_gas(1000000)] fn test_asin_fail() { let a = FixedTrait::new(2 * ONE, false); asin(a); } #[test] #[available_gas(6000000)] fn test_cos() { let a = FixedTrait::new(HALF_PI, false); assert(cos(a).into() == 0, 'invalid half pi'); let a = FixedTrait::new(HALF_PI / 2, false); assert_relative( cos(a), 5931642, 'invalid quarter pi', Option::None(()) ); // 0.7071067811865475 let a = FixedTrait::new(PI, false); assert_relative(cos(a), -1 * ONE.into(), 'invalid pi', Option::None(())); let a = FixedTrait::new(HALF_PI, true); assert_precise(cos(a), 0, 'invalid neg half pi', Option::None(())); let a = FixedTrait::new_unscaled(17, false); assert_relative(cos(a), -2308239, 'invalid 17', Option::None(())); // -0.2751631780463348 let a = FixedTrait::new_unscaled(17, true); assert_relative(cos(a), -2308236, 'invalid -17', Option::None(())); // -0.2751631780463348 } #[test] #[available_gas(6000000)] fn test_cos_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::new(HALF_PI, false); assert(cos_fast(a).into() == 0, 'invalid half pi'); let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(cos_fast(a), 5931642, 'invalid quarter pi', error); // 0.7071067811865475 let a = FixedTrait::new(PI, false); assert_precise(cos_fast(a), -1 * ONE.into(), 'invalid pi', error); let a = FixedTrait::new(HALF_PI, true); assert_precise(cos(a), 0, 'invalid neg half pi', Option::None(())); let a = FixedTrait::new_unscaled(17, false); assert_precise(cos_fast(a), -2308239, 'invalid 17', error); // -0.2751631780463348 } #[test] #[available_gas(6000000)] fn test_sin() { let a = FixedTrait::new(HALF_PI, false); assert_precise(sin(a), ONE.into(), 'invalid half pi', Option::None(())); let a = FixedTrait::new(HALF_PI / 2, false); assert_precise( sin(a), 5931642, 'invalid quarter pi', Option::None(()) ); // 0.7071067811865475 let a = FixedTrait::new(PI, false); assert(sin(a).into() == 0, 'invalid pi'); let a = FixedTrait::new(HALF_PI, true); assert_precise( sin(a), -ONE.into(), 'invalid neg half pi', Option::None(()) ); // 0.9999999999939766 let a = FixedTrait::new_unscaled(17, false); assert_precise(sin(a), -8064787, 'invalid 17', Option::None(())); // -0.9613974918793389 let a = FixedTrait::new_unscaled(17, true); assert_precise(sin(a), 8064787, 'invalid -17', Option::None(())); // 0.9613974918793389 } #[test] #[available_gas(1000000)] fn test_sin_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::new(HALF_PI, false); assert_precise(sin_fast(a), ONE.into(), 'invalid half pi', error); let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(sin_fast(a), 5931642, 'invalid quarter pi', error); // 0.7071067811865475 let a = FixedTrait::new(PI, false); assert(sin_fast(a).into() == 0, 'invalid pi'); let a = FixedTrait::new(HALF_PI, true); assert_precise( sin_fast(a), -ONE.into(), 'invalid neg half pi', error ); // 0.9999999999939766 let a = FixedTrait::new_unscaled(17, false); assert_precise(sin_fast(a), -8064787, 'invalid 17', error); // -0.9613974918793389 let a = FixedTrait::new_unscaled(17, true); assert_precise(sin_fast(a), 8064787, 'invalid -17', error); // 0.9613974918793389 } #[test] #[available_gas(8000000)] fn test_tan() { let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(tan(a), ONE.into(), 'invalid quarter pi', Option::None(())); let a = FixedTrait::new(PI, false); assert_precise(tan(a), 0, 'invalid pi', Option::None(())); let a = FixedTrait::new_unscaled(17, false); assert_precise(tan(a), 29309069, 'invalid 17', Option::None(())); // 3.493917677159002 let a = FixedTrait::new_unscaled(17, true); assert_precise(tan(a), -29309106, 'invalid -17', Option::None(())); // -3.493917677159002 } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23wide.cairo
mod core; mod math; mod helpers;
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23wide/core.cairo
use core::debug::PrintTrait; use orion::numbers::{fixed_point::core::{FixedTrait}, FP8x23}; use orion::numbers::fixed_point::implementations::fp8x23wide::math::{ core as core_math, trig, hyp, erf }; use orion::numbers::fixed_point::utils; /// A struct representing a fixed point number. #[derive(Serde, Copy, Drop)] struct FP8x23W { mag: u64, sign: bool } // CONSTANTS const TWO: u64 = 16777216; // 2 ** 24 const ONE: u64 = 8388608; // 2 ** 23 const HALF: u64 = 4194304; // 2 ** 22 const MAX: u64 = 2147483648; // 2 ** 31 impl FP8x23WImpl of FixedTrait<FP8x23W, u64> { fn ZERO() -> FP8x23W { FP8x23W { mag: 0, sign: false } } fn HALF() -> FP8x23W { FP8x23W { mag: HALF, sign: false } } fn ONE() -> FP8x23W { FP8x23W { mag: ONE, sign: false } } fn MAX() -> FP8x23W { FP8x23W { mag: MAX, sign: false } } fn new(mag: u64, sign: bool) -> FP8x23W { FP8x23W { mag: mag, sign: sign } } fn new_unscaled(mag: u64, sign: bool) -> FP8x23W { FP8x23W { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP8x23W { let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap(); FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP8x23W) -> FP8x23W { core_math::abs(self) } fn acos(self: FP8x23W) -> FP8x23W { trig::acos_fast(self) } fn acos_fast(self: FP8x23W) -> FP8x23W { trig::acos_fast(self) } fn acosh(self: FP8x23W) -> FP8x23W { hyp::acosh(self) } fn asin(self: FP8x23W) -> FP8x23W { trig::asin_fast(self) } fn asin_fast(self: FP8x23W) -> FP8x23W { trig::asin_fast(self) } fn asinh(self: FP8x23W) -> FP8x23W { hyp::asinh(self) } fn atan(self: FP8x23W) -> FP8x23W { trig::atan_fast(self) } fn atan_fast(self: FP8x23W) -> FP8x23W { trig::atan_fast(self) } fn atanh(self: FP8x23W) -> FP8x23W { hyp::atanh(self) } fn ceil(self: FP8x23W) -> FP8x23W { core_math::ceil(self) } fn cos(self: FP8x23W) -> FP8x23W { trig::cos_fast(self) } fn cos_fast(self: FP8x23W) -> FP8x23W { trig::cos_fast(self) } fn cosh(self: FP8x23W) -> FP8x23W { hyp::cosh(self) } fn floor(self: FP8x23W) -> FP8x23W { core_math::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP8x23W) -> FP8x23W { core_math::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP8x23W) -> FP8x23W { core_math::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP8x23W) -> FP8x23W { core_math::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP8x23W) -> FP8x23W { core_math::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP8x23W) -> FP8x23W { core_math::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP8x23W, b: FP8x23W) -> FP8x23W { core_math::pow(self, b) } fn round(self: FP8x23W) -> FP8x23W { core_math::round(self) } fn sin(self: FP8x23W) -> FP8x23W { trig::sin_fast(self) } fn sin_fast(self: FP8x23W) -> FP8x23W { trig::sin_fast(self) } fn sinh(self: FP8x23W) -> FP8x23W { hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP8x23W) -> FP8x23W { core_math::sqrt(self) } fn tan(self: FP8x23W) -> FP8x23W { trig::tan_fast(self) } fn tan_fast(self: FP8x23W) -> FP8x23W { trig::tan_fast(self) } fn tanh(self: FP8x23W) -> FP8x23W { hyp::tanh(self) } fn sign(self: FP8x23W) -> FP8x23W { core_math::sign(self) } fn NaN() -> FP8x23W { FP8x23W { mag: 0, sign: true } } fn is_nan(self: FP8x23W) -> bool { self == FP8x23W { mag: 0, sign: true } } fn INF() -> FP8x23W { FP8x23W { mag: 4294967295, sign: false } } fn POS_INF() -> FP8x23W { FP8x23W { mag: 4294967295, sign: false } } fn NEG_INF() -> FP8x23W { FP8x23W { mag: 4294967295, sign: true } } fn is_inf(self: FP8x23W) -> bool { self.mag == 4294967295 } fn is_pos_inf(self: FP8x23W) -> bool { self.is_inf() && !self.sign } fn is_neg_inf(self: FP8x23W) -> bool { self.is_inf() && self.sign } fn erf(self: FP8x23W) -> FP8x23W { erf::erf(self) } } impl FP8x23WPrint of PrintTrait<FP8x23W> { fn print(self: FP8x23W) { self.sign.print(); self.mag.print(); } } // Into a raw felt without unscaling impl FP8x23WIntoFelt252 of Into<FP8x23W, felt252> { fn into(self: FP8x23W) -> felt252 { let mag_felt = self.mag.into(); if self.sign { mag_felt * -1 } else { mag_felt * 1 } } } impl FP8x23IntoFP8x23W of Into<FP8x23, FP8x23W> { fn into(self: FP8x23) -> FP8x23W { FP8x23W { mag: self.mag.into(), sign: self.sign } } } impl FP8x23WTryIntoFP8x23 of TryInto<FP8x23W, FP8x23> { fn try_into(self: FP8x23W) -> Option<FP8x23> { match self.mag.try_into() { Option::Some(val) => { Option::Some(FP8x23 { mag: val, sign: self.sign }) }, Option::None => { Option::None(()) } } } } impl FP8x23WTryIntoU128 of TryInto<FP8x23W, u128> { fn try_into(self: FP8x23W) -> Option<u128> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down Option::Some((self.mag / ONE).into()) } } } impl FP8x23WTryIntoU64 of TryInto<FP8x23W, u64> { fn try_into(self: FP8x23W) -> Option<u64> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down Option::Some((self.mag / ONE).into()) } } } impl FP8x23WTryIntoU32 of TryInto<FP8x23W, u32> { fn try_into(self: FP8x23W) -> Option<u32> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP8x23WTryIntoU16 of TryInto<FP8x23W, u16> { fn try_into(self: FP8x23W) -> Option<u16> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP8x23WTryIntoU8 of TryInto<FP8x23W, u8> { fn try_into(self: FP8x23W) -> Option<u8> { if self.sign { Option::None(()) } else { // Unscale the magnitude and round down (self.mag / ONE).try_into() } } } impl FP8x23WIntoI32 of Into<FP8x23W, i32> { fn into(self: FP8x23W) -> i32 { _i32_into_fp(self) } } impl FP8x23WTryIntoI8 of TryInto<FP8x23W, i8> { fn try_into(self: FP8x23W) -> Option<i8> { _i8_try_from_fp(self) } } impl FP8x23WPartialEq of PartialEq<FP8x23W> { #[inline(always)] fn eq(lhs: @FP8x23W, rhs: @FP8x23W) -> bool { core_math::eq(lhs, rhs) } #[inline(always)] fn ne(lhs: @FP8x23W, rhs: @FP8x23W) -> bool { core_math::ne(lhs, rhs) } } impl FP8x23WAdd of Add<FP8x23W> { fn add(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { core_math::add(lhs, rhs) } } impl FP8x23WAddEq of AddEq<FP8x23W> { #[inline(always)] fn add_eq(ref self: FP8x23W, other: FP8x23W) { self = Add::add(self, other); } } impl FP8x23WSub of Sub<FP8x23W> { fn sub(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { core_math::sub(lhs, rhs) } } impl FP8x23WSubEq of SubEq<FP8x23W> { #[inline(always)] fn sub_eq(ref self: FP8x23W, other: FP8x23W) { self = Sub::sub(self, other); } } impl FP8x23WMul of Mul<FP8x23W> { fn mul(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { core_math::mul(lhs, rhs) } } impl FP8x23WMulEq of MulEq<FP8x23W> { #[inline(always)] fn mul_eq(ref self: FP8x23W, other: FP8x23W) { self = Mul::mul(self, other); } } impl FP8x23WDiv of Div<FP8x23W> { fn div(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { core_math::div(lhs, rhs) } } impl FP8x23WDivEq of DivEq<FP8x23W> { #[inline(always)] fn div_eq(ref self: FP8x23W, other: FP8x23W) { self = Div::div(self, other); } } impl FP8x23WPartialOrd of PartialOrd<FP8x23W> { #[inline(always)] fn ge(lhs: FP8x23W, rhs: FP8x23W) -> bool { core_math::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP8x23W, rhs: FP8x23W) -> bool { core_math::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP8x23W, rhs: FP8x23W) -> bool { core_math::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP8x23W, rhs: FP8x23W) -> bool { core_math::lt(lhs, rhs) } } impl FP8x23WNeg of Neg<FP8x23W> { #[inline(always)] fn neg(a: FP8x23W) -> FP8x23W { core_math::neg(a) } } impl FP8x23WRem of Rem<FP8x23W> { #[inline(always)] fn rem(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { core_math::rem(lhs, rhs) } } /// INTERNAL fn _i32_into_fp(x: FP8x23W) -> i32 { let number_felt: felt252 = (x.mag / ONE).into(); let number_i32: i32 = number_felt.try_into().unwrap(); if x.sign { return number_i32 * -1_i32; } number_i32 } fn _i8_try_from_fp(x: FP8x23W) -> Option<i8> { let unscaled_mag: Option<u8> = (x.mag / ONE).try_into(); match unscaled_mag { Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); if x.sign { return Option::Some(number_i8 * -1_i8); } Option::Some(number_i8) }, Option::None => Option::None(()) } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo
use core::debug::PrintTrait; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ HALF, ONE, TWO, FP8x23W, FP8x23WSub, FP8x23WDiv, FixedTrait, FP8x23WPrint }; const DEFAULT_PRECISION: u64 = 8; // 1e-6 // To use `DEFAULT_PRECISION`, final arg is: `Option::None(())`. // To use `custom_precision` of 430_u64: `Option::Some(430_u64)`. fn assert_precise(result: FP8x23W, expected: felt252, msg: felt252, custom_precision: Option<u64>) { let precision = match custom_precision { Option::Some(val) => val, Option::None => DEFAULT_PRECISION, }; let diff = (result - FixedTrait::from_felt(expected)).mag; if (diff > precision) { result.print(); assert(diff <= precision, msg); } } fn assert_relative( result: FP8x23W, expected: felt252, msg: felt252, custom_precision: Option<u64> ) { let precision = match custom_precision { Option::Some(val) => val, Option::None => DEFAULT_PRECISION, }; let diff = result - FixedTrait::from_felt(expected); let rel_diff = (diff / result).mag; if (rel_diff > precision) { result.print(); assert(rel_diff <= precision, msg); } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23wide/math.cairo
mod core; mod comp; mod lut; mod trig; mod hyp; mod erf;
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo
use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ FP8x23W, FixedTrait, FP8x23WPartialOrd, FP8x23WPartialEq }; fn max(a: FP8x23W, b: FP8x23W) -> FP8x23W { if a >= b { a } else { b } } fn min(a: FP8x23W, b: FP8x23W) -> FP8x23W { if a <= b { a } else { b } } fn xor(a: FP8x23W, b: FP8x23W) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { true } else { false } } fn or(a: FP8x23W, b: FP8x23W) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { false } else { true } } fn and(a: FP8x23W, b: FP8x23W) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { false } else { true } } fn where(a: FP8x23W, b: FP8x23W, c: FP8x23W) -> FP8x23W { if a == FixedTrait::new(0, false) { c } else { b } } fn bitwise_and(a: FP8x23W, b: FP8x23W) -> FP8x23W { FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP8x23W, b: FP8x23W) -> FP8x23W { FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP8x23W, b: FP8x23W) -> FP8x23W { FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use super::{FixedTrait, max, min, bitwise_and, bitwise_xor, bitwise_or}; #[test] fn test_max() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::new_unscaled(1, true); assert(max(a, a) == a, 'max(a, a)'); assert(max(a, b) == a, 'max(a, b)'); assert(max(a, c) == a, 'max(a, c)'); assert(max(b, a) == a, 'max(b, a)'); assert(max(b, b) == b, 'max(b, b)'); assert(max(b, c) == b, 'max(b, c)'); assert(max(c, a) == a, 'max(c, a)'); assert(max(c, b) == b, 'max(c, b)'); assert(max(c, c) == c, 'max(c, c)'); } #[test] fn test_min() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::new_unscaled(1, true); assert(min(a, a) == a, 'min(a, a)'); assert(min(a, b) == b, 'min(a, b)'); assert(min(a, c) == c, 'min(a, c)'); assert(min(b, a) == b, 'min(b, a)'); assert(min(b, b) == b, 'min(b, b)'); assert(min(b, c) == c, 'min(b, c)'); assert(min(c, a) == c, 'min(c, a)'); assert(min(c, b) == c, 'min(c, b)'); assert(min(c, c) == c, 'min(c, c)'); } #[test] fn test_bitwise_and() { let a = FixedTrait::new(28835840, false); // 3.4375 let b = FixedTrait::new(1639448576, true); // -60.5625 assert(bitwise_and(a, b) == a, 'bitwise_and(a,b)') } #[test] fn test_bitwise_xor() { let a = FixedTrait::new(28835840, false); // 3.4375 let b = FixedTrait::new(1639448576, true); // -60.5625 let c = FixedTrait::new(1610612736, true); assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)') } #[test] fn test_bitwise_or() { let a = FixedTrait::new(28835840, false); // 3.4375 let b = FixedTrait::new(1639448576, true); // -60.5625 assert(bitwise_or(a, b) == b, 'bitwise_or(a,b)') } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo
use core::integer; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ HALF, ONE, MAX, FP8x23W, FP8x23WAdd, FP8x23WImpl, FP8x23WAddEq, FP8x23WSub, FP8x23WMul, FP8x23WMulEq, FP8x23WTryIntoU128, FP8x23WPartialEq, FP8x23WPartialOrd, FP8x23WSubEq, FP8x23WNeg, FP8x23WDiv, FP8x23WIntoFelt252, FixedTrait }; use orion::numbers::fixed_point::implementations::fp8x23wide::math::lut; // PUBLIC fn abs(a: FP8x23W) -> FP8x23W { FixedTrait::new(a.mag, false) } fn add(a: FP8x23W, b: FP8x23W) -> FP8x23W { if a.sign == b.sign { return FixedTrait::new(a.mag + b.mag, a.sign); } if a.mag == b.mag { return FixedTrait::ZERO(); } if (a.mag > b.mag) { return FixedTrait::new(a.mag - b.mag, a.sign); } else { return FixedTrait::new(b.mag - a.mag, b.sign); } } fn ceil(a: FP8x23W) -> FP8x23W { let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if rem == 0 { return a; } else if !a.sign { return FixedTrait::new_unscaled(div + 1, false); } else if div == 0 { return FixedTrait::new_unscaled(0, false); } else { return FixedTrait::new_unscaled(div, true); } } fn div(a: FP8x23W, b: FP8x23W) -> FP8x23W { let a_u64 = integer::u64_wide_mul(a.mag, ONE); let res_u64 = a_u64 / b.mag.into(); // Re-apply sign FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign) } fn eq(a: @FP8x23W, b: @FP8x23W) -> bool { (*a.mag == *b.mag) && (*a.sign == *b.sign) } // Calculates the natural exponent of x: e^x fn exp(a: FP8x23W) -> FP8x23W { exp2(FixedTrait::new(12102203, false) * a) // log2(e) * 2^23 ≈ 12102203 } // Calculates the binary exponent of x: 2^x fn exp2(a: FP8x23W) -> FP8x23W { if (a.mag == 0) { return FixedTrait::ONE(); } let (int_part, frac_part) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false); let mut res_u = int_res; if frac_part != 0 { let frac = FixedTrait::new(frac_part, false); let r8 = FixedTrait::new(19, false) * frac; let r7 = (r8 + FixedTrait::new(105, false)) * frac; let r6 = (r7 + FixedTrait::new(1324, false)) * frac; let r5 = (r6 + FixedTrait::new(11159, false)) * frac; let r4 = (r5 + FixedTrait::new(80695, false)) * frac; let r3 = (r4 + FixedTrait::new(465599, false)) * frac; let r2 = (r3 + FixedTrait::new(2015166, false)) * frac; let r1 = (r2 + FixedTrait::new(5814540, false)) * frac; res_u = res_u * (r1 + FixedTrait::ONE()); } if a.sign { FixedTrait::ONE() / res_u } else { res_u } } fn exp2_int(exp: u64) -> FP8x23W { FixedTrait::new_unscaled(lut::exp2(exp), false) } fn floor(a: FP8x23W) -> FP8x23W { let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if rem == 0 { return a; } else if !a.sign { return FixedTrait::new_unscaled(div, false); } else { return FixedTrait::new_unscaled(div + 1, true); } } fn ge(a: FP8x23W, b: FP8x23W) -> bool { if a.sign != b.sign { !a.sign } else { (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign) } } fn gt(a: FP8x23W, b: FP8x23W) -> bool { if a.sign != b.sign { !a.sign } else { (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign) } } fn le(a: FP8x23W, b: FP8x23W) -> bool { if a.sign != b.sign { a.sign } else { (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign) } } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(a: FP8x23W) -> FP8x23W { FixedTrait::new(5814540, false) * log2(a) // ln(2) = 0.693... } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(a: FP8x23W) -> FP8x23W { assert(!a.sign, 'must be positive'); if (a.mag == ONE) { return FixedTrait::ZERO(); } else if (a.mag < ONE) { // Compute true inverse binary log if 0 < x < 1 let div = FixedTrait::ONE() / a; return -log2(div); } let whole = a.mag / ONE; let (msb, div) = lut::msb(whole); if a.mag == div * ONE { FixedTrait::new_unscaled(msb, false) } else { let norm = a / FixedTrait::new_unscaled(div, false); let r8 = FixedTrait::new(76243, true) * norm; let r7 = (r8 + FixedTrait::new(1038893, false)) * norm; let r6 = (r7 + FixedTrait::new(6277679, true)) * norm; let r5 = (r6 + FixedTrait::new(22135645, false)) * norm; let r4 = (r5 + FixedTrait::new(50444339, true)) * norm; let r3 = (r4 + FixedTrait::new(77896489, false)) * norm; let r2 = (r3 + FixedTrait::new(83945943, true)) * norm; let r1 = (r2 + FixedTrait::new(68407458, false)) * norm; r1 + FixedTrait::new(28734280, true) + FixedTrait::new_unscaled(msb, false) } } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(a: FP8x23W) -> FP8x23W { FixedTrait::new(2525223, false) * log2(a) // log10(2) = 0.301... } fn lt(a: FP8x23W, b: FP8x23W) -> bool { if a.sign != b.sign { a.sign } else { (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign) } } fn mul(a: FP8x23W, b: FP8x23W) -> FP8x23W { let prod_u128 = integer::u64_wide_mul(a.mag, b.mag); // Re-apply sign FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign) } fn ne(a: @FP8x23W, b: @FP8x23W) -> bool { (*a.mag != *b.mag) || (*a.sign != *b.sign) } fn neg(a: FP8x23W) -> FP8x23W { if a.mag == 0 { a } else if !a.sign { FixedTrait::new(a.mag, !a.sign) } else { FixedTrait::new(a.mag, false) } } // Calclates the value of x^y and checks for overflow before returning // self is a FP8x23W point value // b is a FP8x23W point value fn pow(a: FP8x23W, b: FP8x23W) -> FP8x23W { let (_, rem) = integer::u64_safe_divmod(b.mag, integer::u64_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { return pow_int(a, b.mag / ONE, b.sign); } // x^y = exp(y*ln(x)) for x > 0 will error for x < 0 exp(b * ln(a)) } // Calclates the value of a^b and checks for overflow before returning fn pow_int(a: FP8x23W, b: u64, sign: bool) -> FP8x23W { let mut x = a; let mut n = b; if sign { x = FixedTrait::ONE() / x; } if n == 0 { return FixedTrait::ONE(); } let mut y = FixedTrait::ONE(); let two = integer::u64_as_non_zero(2); while n > 1 { let (div, rem) = integer::u64_safe_divmod(n, two); if rem == 1 { y = x * y; } x = x * x; n = div; }; x * y } fn rem(a: FP8x23W, b: FP8x23W) -> FP8x23W { a - floor(a / b) * b } fn round(a: FP8x23W) -> FP8x23W { let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if (HALF <= rem) { FixedTrait::new_unscaled(div + 1, a.sign) } else { FixedTrait::new_unscaled(div, a.sign) } } // Calculates the square root of a FP8x23W point value // x must be positive fn sqrt(a: FP8x23W) -> FP8x23W { assert(!a.sign, 'must be positive'); let root = integer::u64_sqrt(a.mag.into() * ONE.into()); FixedTrait::new(root.into(), false) } fn sub(a: FP8x23W, b: FP8x23W) -> FP8x23W { add(a, -b) } fn sign(a: FP8x23W) -> FP8x23W { if a.mag == 0 { FixedTrait::new(0, false) } else { FixedTrait::new(ONE, a.sign) } } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use orion::numbers::fixed_point::implementations::fp8x23wide::helpers::{ assert_precise, assert_relative }; use orion::numbers::fixed_point::implementations::fp8x23wide::math::trig::{PI, HALF_PI}; use super::{ FixedTrait, ONE, FP8x23W, ceil, floor, sqrt, round, lut, pow, exp, exp2, exp2_int, ln, log2, log10, eq, add, ne, HALF }; #[test] fn test_into() { let a = FixedTrait::<FP8x23W>::new_unscaled(5, false); assert(a.mag == 5 * ONE, 'invalid result'); } #[test] fn test_try_into_u128() { // Positive unscaled let a = FixedTrait::<FP8x23W>::new_unscaled(5, false); assert(a.try_into().unwrap() == 5_u128, 'invalid result'); // Positive scaled let b = FixedTrait::<FP8x23W>::new(5 * ONE, false); assert(b.try_into().unwrap() == 5_u128, 'invalid result'); // Zero let d = FixedTrait::<FP8x23W>::new_unscaled(0, false); assert(d.try_into().unwrap() == 0_u128, 'invalid result'); } #[test] #[should_panic] fn test_negative_try_into_u128() { let a = FixedTrait::<FP8x23W>::new_unscaled(1, true); let _a: u128 = a.try_into().unwrap(); } #[test] #[available_gas(1000000)] fn test_acos() { let a = FixedTrait::<FP8x23W>::ONE(); assert(a.acos().into() == 0, 'invalid one'); } #[test] #[available_gas(1000000)] fn test_asin() { let a = FixedTrait::ONE(); assert_precise(a.asin(), HALF_PI.into(), 'invalid one', Option::None(())); // PI / 2 } #[test] #[available_gas(2000000)] fn test_atan() { let a = FixedTrait::new(2 * ONE, false); assert_relative(a.atan(), 9287469, 'invalid two', Option::None(())); } #[test] fn test_ceil() { let a = FixedTrait::new(24326963, false); // 2.9 assert(ceil(a).mag == 3 * ONE, 'invalid pos decimal'); } #[test] fn test_floor() { let a = FixedTrait::new(24326963, false); // 2.9 assert(floor(a).mag == 2 * ONE, 'invalid pos decimal'); } #[test] fn test_round() { let a = FixedTrait::new(24326963, false); // 2.9 assert(round(a).mag == 3 * ONE, 'invalid pos decimal'); } #[test] #[should_panic] fn test_sqrt_fail() { let a = FixedTrait::new_unscaled(25, true); sqrt(a); } #[test] fn test_sqrt() { let mut a = FixedTrait::new_unscaled(0, false); assert(sqrt(a).mag == 0, 'invalid zero root'); a = FixedTrait::new_unscaled(25, false); assert(sqrt(a).mag == 5 * ONE, 'invalid pos root'); } #[test] #[available_gas(100000)] fn test_msb() { let a = FixedTrait::<FP8x23W>::new_unscaled(100, false); let (msb, div) = lut::msb(a.mag / ONE); assert(msb == 6, 'invalid msb'); assert(div == 64, 'invalid msb ceil'); } #[test] #[available_gas(600000)] fn test_pow() { let a = FixedTrait::new_unscaled(3, false); let b = FixedTrait::new_unscaled(4, false); assert(pow(a, b).mag == 81 * ONE, 'invalid pos base power'); } #[test] #[available_gas(900000)] fn test_pow_frac() { let a = FixedTrait::new_unscaled(3, false); let b = FixedTrait::new(4194304, false); // 0.5 assert_relative( pow(a, b), 14529495, 'invalid pos base power', Option::None(()) ); // 1.7320508075688772 } #[test] #[available_gas(1000000)] fn test_exp() { let a = FixedTrait::new_unscaled(2, false); assert_relative( exp(a), 61983895, 'invalid exp of 2', Option::None(()) ); // 7.389056098793725 } #[test] #[available_gas(400000)] fn test_exp2() { let a = FixedTrait::new_unscaled(5, false); assert(exp2(a).mag == 268435456, 'invalid exp2 of 2'); } #[test] #[available_gas(20000)] fn test_exp2_int() { assert(exp2_int(5).into() == 268435456, 'invalid exp2 of 2'); } #[test] #[available_gas(1000000)] fn test_ln() { let mut a = FixedTrait::new_unscaled(1, false); assert(ln(a).mag == 0, 'invalid ln of 1'); a = FixedTrait::new(22802601, false); assert_relative(ln(a), ONE.into(), 'invalid ln of 2.7...', Option::None(())); } #[test] #[available_gas(1000000)] fn test_log2() { let mut a = FixedTrait::new_unscaled(32, false); assert(log2(a) == FixedTrait::new_unscaled(5, false), 'invalid log2 32'); a = FixedTrait::new_unscaled(10, false); assert_relative( log2(a), 27866353, 'invalid log2 10', Option::None(()) ); // 3.321928094887362 } #[test] #[available_gas(1000000)] fn test_log10() { let a = FixedTrait::new_unscaled(100, false); assert_relative(log10(a), 2 * ONE.into(), 'invalid log10', Option::None(())); } #[test] fn test_eq() { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = eq(@a, @b); assert(c, 'invalid result'); } #[test] fn test_ne() { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = ne(@a, @b); assert(!c, 'invalid result'); } #[test] fn test_add() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(2, false); assert(add(a, b) == FixedTrait::new_unscaled(3, false), 'invalid result'); } #[test] fn test_add_eq() { let mut a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(2, false); a += b; assert(a == FixedTrait::<FP8x23W>::new_unscaled(3, false), 'invalid result'); } #[test] fn test_sub() { let a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, false); let c = a - b; assert(c == FixedTrait::<FP8x23W>::new_unscaled(3, false), 'false result invalid'); } #[test] fn test_sub_eq() { let mut a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, false); a -= b; assert(a == FixedTrait::<FP8x23W>::new_unscaled(3, false), 'invalid result'); } #[test] #[available_gas(100000)] fn test_mul_pos() { let a = FP8x23W { mag: 24326963, sign: false }; let b = FP8x23W { mag: 24326963, sign: false }; let c = a * b; assert(c.mag == 70548192, 'invalid result'); } #[test] fn test_mul_neg() { let a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, true); let c = a * b; assert(c == FixedTrait::<FP8x23W>::new_unscaled(10, true), 'invalid result'); } #[test] fn test_mul_eq() { let mut a = FixedTrait::new_unscaled(5, false); let b = FixedTrait::new_unscaled(2, true); a *= b; assert(a == FixedTrait::<FP8x23W>::new_unscaled(10, true), 'invalid result'); } #[test] fn test_div() { let a = FixedTrait::new_unscaled(10, false); let b = FixedTrait::<FP8x23W>::new(24326963, false); // 2.9 let c = a / b; assert(c.mag == 28926234, 'invalid pos decimal'); // 3.4482758620689653 } #[test] fn test_le() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP8x23W>::new_unscaled(1, true); assert(a <= a, 'a <= a'); assert(!(a <= b), 'a <= b'); assert(!(a <= c), 'a <= c'); assert(b <= a, 'b <= a'); assert(b <= b, 'b <= b'); assert(!(b <= c), 'b <= c'); assert(c <= a, 'c <= a'); assert(c <= b, 'c <= b'); assert(c <= c, 'c <= c'); } #[test] fn test_lt() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP8x23W>::new_unscaled(1, true); assert(!(a < a), 'a < a'); assert(!(a < b), 'a < b'); assert(!(a < c), 'a < c'); assert(b < a, 'b < a'); assert(!(b < b), 'b < b'); assert(!(b < c), 'b < c'); assert(c < a, 'c < a'); assert(c < b, 'c < b'); assert(!(c < c), 'c < c'); } #[test] fn test_ge() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP8x23W>::new_unscaled(1, true); assert(a >= a, 'a >= a'); assert(a >= b, 'a >= b'); assert(a >= c, 'a >= c'); assert(!(b >= a), 'b >= a'); assert(b >= b, 'b >= b'); assert(b >= c, 'b >= c'); assert(!(c >= a), 'c >= a'); assert(!(c >= b), 'c >= b'); assert(c >= c, 'c >= c'); } #[test] fn test_gt() { let a = FixedTrait::new_unscaled(1, false); let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::<FP8x23W>::new_unscaled(1, true); assert(!(a > a), 'a > a'); assert(a > b, 'a > b'); assert(a > c, 'a > c'); assert(!(b > a), 'b > a'); assert(!(b > b), 'b > b'); assert(b > c, 'b > c'); assert(!(c > a), 'c > a'); assert(!(c > b), 'c > b'); assert(!(c > c), 'c > c'); } #[test] #[available_gas(1000000)] fn test_cos() { let a = FixedTrait::<FP8x23W>::new(HALF_PI, false); assert(a.cos().into() == 0, 'invalid half pi'); } #[test] #[available_gas(1000000)] fn test_sin() { let a = FixedTrait::new(HALF_PI, false); assert_precise(a.sin(), ONE.into(), 'invalid half pi', Option::None(())); } #[test] #[available_gas(2000000)] fn test_tan() { let a = FixedTrait::<FP8x23W>::new(HALF_PI / 2, false); assert(a.tan().mag == 8388608, 'invalid quarter pi'); } #[test] #[available_gas(2000000)] fn test_sign() { let a = FixedTrait::<FP8x23W>::new(0, false); assert(a.sign().mag == 0 && !a.sign().sign, 'invalid sign (0, true)'); let a = FixedTrait::<FP8x23W>::new(HALF, true); assert(a.sign().mag == ONE && a.sign().sign, 'invalid sign (HALF, true)'); let a = FixedTrait::<FP8x23W>::new(HALF, false); assert(a.sign().mag == ONE && !a.sign().sign, 'invalid sign (HALF, false)'); let a = FixedTrait::<FP8x23W>::new(ONE, true); assert(a.sign().mag == ONE && a.sign().sign, 'invalid sign (ONE, true)'); let a = FixedTrait::<FP8x23W>::new(ONE, false); assert(a.sign().mag == ONE && !a.sign().sign, 'invalid sign (ONE, false)'); } #[test] #[should_panic] #[available_gas(2000000)] fn test_sign_fail() { let a = FixedTrait::<FP8x23W>::new(HALF, true); assert(a.sign().mag != ONE && !a.sign().sign, 'invalid sign (HALF, true)'); } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23wide/math/erf.cairo
use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ONE, FP8x23W, FixedTrait}; use orion::numbers::fixed_point::implementations::fp8x23wide::math::lut::erf_lut; const ERF_COMPUTATIONAL_ACCURACY: u64 = 100; const MAX_ERF_COMPUTATIONAL_ACCURACY: u64 = 10; const ROUND_CHECK_NUMBER: u64 = 1; // Values > MAX_ERF_NUMBER return 1 const MAX_ERF_NUMBER: u64 = 29360128; // Values <= ERF_TRUNCATION_NUMBER -> two decimal places, and values > ERF_TRUNCATION_NUMBER -> one decimal place const ERF_TRUNCATION_NUMBER: u64 = 16777216; fn erf(x: FP8x23W) -> FP8x23W { // Lookup // 1. if x.mag < 3.5 { lookup table } // 2. else{ return 1} let mut erf_value: u64 = 0; if x.mag < MAX_ERF_NUMBER { erf_value = erf_lut(x.mag); } else { erf_value = ONE; } FP8x23W { mag: erf_value, sign: x.sign } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo
use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ HALF, ONE, TWO, FP8x23W, FP8x23WImpl, FP8x23WAdd, FP8x23WAddEq, FP8x23WSub, FP8x23WMul, FP8x23WMulEq, FP8x23WTryIntoU128, FP8x23WPartialEq, FP8x23WPartialOrd, FP8x23WSubEq, FP8x23WNeg, FP8x23WDiv, FP8x23WIntoFelt252, FixedTrait }; // Calculates hyperbolic cosine of a (fixed point) fn cosh(a: FP8x23W) -> FP8x23W { let ea = a.exp(); (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic sine of a (fixed point) fn sinh(a: FP8x23W) -> FP8x23W { let ea = a.exp(); (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic tangent of a (fixed point) fn tanh(a: FP8x23W) -> FP8x23W { let ea = a.exp(); let ea_i = FixedTrait::ONE() / ea; (ea - ea_i) / (ea + ea_i) } // Calculates inverse hyperbolic cosine of a (fixed point) fn acosh(a: FP8x23W) -> FP8x23W { let root = (a * a - FixedTrait::ONE()).sqrt(); (a + root).ln() } // Calculates inverse hyperbolic sine of a (fixed point) fn asinh(a: FP8x23W) -> FP8x23W { let root = (a * a + FixedTrait::ONE()).sqrt(); (a + root).ln() } // Calculates inverse hyperbolic tangent of a (fixed point) fn atanh(a: FP8x23W) -> FP8x23W { let one = FixedTrait::ONE(); let ln_arg = (one + a) / (one - a); ln_arg.ln() / FixedTrait::new(TWO, false) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use orion::numbers::fixed_point::implementations::fp8x23wide::helpers::assert_precise; use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF}; #[test] #[available_gas(10000000)] fn test_cosh() { let a = FixedTrait::new(TWO, false); assert_precise(cosh(a), 31559585, 'invalid two', Option::None(())); // 3.762195691016423 let a = FixedTrait::ONE(); assert_precise(cosh(a), 12944299, 'invalid one', Option::None(())); // 1.5430806347841253 let a = FixedTrait::ZERO(); assert_precise(cosh(a), ONE.into(), 'invalid zero', Option::None(())); let a = FixedTrait::ONE(); assert_precise( cosh(a), 12944299, 'invalid neg one', Option::None(()) ); // 1.5430806347841253 let a = FixedTrait::new(TWO, true); assert_precise(cosh(a), 31559602, 'invalid neg two', Option::None(())); // 3.762195691016423 } #[test] #[available_gas(10000000)] fn test_sinh() { let a = FixedTrait::new(TWO, false); assert_precise(sinh(a), 30424310, 'invalid two', Option::None(())); // 3.6268604077773023 let a = FixedTrait::ONE(); assert_precise(sinh(a), 9858302, 'invalid one', Option::None(())); // 1.1752011936029418 let a = FixedTrait::ZERO(); assert(sinh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE, true); assert_precise( sinh(a), -9858302, 'invalid neg one', Option::None(()) ); // -1.1752011936029418 let a = FixedTrait::new(TWO, true); assert_precise( sinh(a), -30424328, 'invalid neg two', Option::None(()) ); // -3.6268604077773023 } #[test] #[available_gas(10000000)] fn test_tanh() { let a = FixedTrait::new(TWO, false); assert_precise(tanh(a), 8086849, 'invalid two', Option::None(())); // 0.9640275800745076 let a = FixedTrait::ONE(); assert_precise(tanh(a), 6388715, 'invalid one', Option::None(())); // 0.7615941559446443 let a = FixedTrait::ZERO(); assert(tanh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE, true); assert_precise( tanh(a), -6388715, 'invalid neg one', Option::None(()) ); // -0.7615941559446443 let a = FixedTrait::new(TWO, true); assert_precise( tanh(a), -8086849, 'invalid neg two', Option::None(()) ); // 0.9640275800745076 } #[test] #[available_gas(10000000)] fn test_acosh() { let a = FixedTrait::new(31559585, false); // 3.762195691016423 assert_precise(acosh(a), 16777257, 'invalid two', Option::None(())); let a = FixedTrait::new(12944299, false); // 1.5430806347841253 assert_precise(acosh(a), ONE.into(), 'invalid one', Option::None(())); let a = FixedTrait::ONE(); // 1 assert(acosh(a).into() == 0, 'invalid zero'); } #[test] #[available_gas(10000000)] fn test_asinh() { let a = FixedTrait::new(30424310, false); // 3.6268604077773023 assert_precise(asinh(a), 16777257, 'invalid two', Option::None(())); let a = FixedTrait::new(9858302, false); // 1.1752011936029418 assert_precise(asinh(a), ONE.into(), 'invalid one', Option::None(())); let a = FixedTrait::ZERO(); assert(asinh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(9858302, true); // -1.1752011936029418 assert_precise(asinh(a), -ONE.into(), 'invalid neg one', Option::None(())); let a = FixedTrait::new(30424310, true); // -3.6268604077773023 assert_precise(asinh(a), -16777238, 'invalid neg two', Option::None(())); } #[test] #[available_gas(10000000)] fn test_atanh() { let a = FixedTrait::new(7549747, false); // 0.9 assert_precise(atanh(a), 12349872, 'invalid 0.9', Option::None(())); // 1.4722194895832204 let a = FixedTrait::new(HALF, false); // 0.5 assert_precise(atanh(a), 4607914, 'invalid half', Option::None(())); // 0.5493061443340548 let a = FixedTrait::ZERO(); assert(atanh(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(HALF, true); // 0.5 assert_precise( atanh(a), -4607914, 'invalid neg half', Option::None(()) ); // 0.5493061443340548 let a = FixedTrait::new(7549747, true); // 0.9 assert_precise(atanh(a), -12349872, 'invalid -0.9', Option::None(())); // 1.4722194895832204 } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23wide/math/lut.cairo
use orion::numbers::fixed_point::implementations::fp8x23wide::core::ONE; // Calculates the most significant bit fn msb(whole: u64) -> (u64, u64) { if whole < 256 { if whole < 2 { return (0, 1); } if whole < 4 { return (1, 2); } if whole < 8 { return (2, 4); } if whole < 16 { return (3, 8); } if whole < 32 { return (4, 16); } if whole < 64 { return (5, 32); } if whole < 128 { return (6, 64); } if whole < 256 { return (7, 128); } } (8, 256) } fn exp2(exp: u64) -> u64 { if exp <= 16 { if exp == 0 { return 1; } if exp == 1 { return 2; } if exp == 2 { return 4; } if exp == 3 { return 8; } if exp == 4 { return 16; } if exp == 5 { return 32; } if exp == 6 { return 64; } if exp == 7 { return 128; } if exp == 8 { return 256; } if exp == 9 { return 512; } if exp == 10 { return 1024; } if exp == 11 { return 2048; } if exp == 12 { return 4096; } if exp == 13 { return 8192; } if exp == 14 { return 16384; } if exp == 15 { return 32768; } if exp == 16 { return 65536; } } else if exp <= 32 { if exp == 17 { return 131072; } if exp == 18 { return 262144; } if exp == 19 { return 524288; } if exp == 20 { return 1048576; } if exp == 21 { return 2097152; } if exp == 22 { return 4194304; } } 8388608 } fn sin(a: u64) -> (u64, u64, u64) { let slot = a / 51472; if slot < 128 { if slot < 64 { if slot < 32 { if slot < 16 { if slot == 0 { return (0, 0, 51472); } if slot == 1 { return (51472, 51472, 102941); } if slot == 2 { return (102944, 102941, 154407); } if slot == 3 { return (154416, 154407, 205867); } if slot == 4 { return (205887, 205867, 257319); } if slot == 5 { return (257359, 257319, 308761); } if slot == 6 { return (308831, 308761, 360192); } if slot == 7 { return (360303, 360192, 411609); } if slot == 8 { return (411775, 411609, 463011); } if slot == 9 { return (463247, 463011, 514396); } if slot == 10 { return (514723, 514396, 565761); } if slot == 11 { return (566190, 565761, 617104); } if slot == 12 { return (617662, 617104, 668425); } if slot == 13 { return (669134, 668425, 719720); } if slot == 14 { return (720606, 719720, 770988); } if slot == 15 { return (772078, 770988, 822227); } } else { if slot == 16 { return (823550, 822227, 873436); } if slot == 17 { return (875022, 873436, 924611); } if slot == 18 { return (926493, 924611, 975751); } if slot == 19 { return (977965, 975751, 1026855); } if slot == 20 { return (1029437, 1026855, 1077920); } if slot == 21 { return (1080909, 1077920, 1128945); } if slot == 22 { return (1132381, 1128945, 1179927); } if slot == 23 { return (1183853, 1179927, 1230864); } if slot == 24 { return (1235324, 1230864, 1281756); } if slot == 25 { return (1286796, 1281756, 1332599); } if slot == 26 { return (1338268, 1332599, 1383392); } if slot == 27 { return (1389740, 1383392, 1434132); } if slot == 28 { return (1441212, 1434132, 1484819); } if slot == 29 { return (1492684, 1484819, 1535450); } if slot == 30 { return (1544156, 1535450, 1586023); } if slot == 31 { return (1595627, 1586023, 1636536); } } } else { if slot < 48 { if slot == 32 { return (1647099, 1636536, 1686988); } if slot == 33 { return (1698571, 1686988, 1737376); } if slot == 34 { return (1750043, 1737376, 1787699); } if slot == 35 { return (1801515, 1787699, 1837954); } if slot == 36 { return (1852987, 1837954, 1888141); } if slot == 37 { return (1904459, 1888141, 1938256); } if slot == 38 { return (1955930, 1938256, 1988298); } if slot == 39 { return (2007402, 1988298, 2038265); } if slot == 40 { return (2058871, 2038265, 2088156); } if slot == 41 { return (2110346, 2088156, 2137968); } if slot == 42 { return (2161818, 2137968, 2187700); } if slot == 43 { return (2213290, 2187700, 2237349); } if slot == 44 { return (2264762, 2237349, 2286914); } if slot == 45 { return (2316233, 2286914, 2336392); } if slot == 46 { return (2367705, 2336392, 2385783); } if slot == 47 { return (2419177, 2385783, 2435084); } } else { if slot == 48 { return (2470649, 2435084, 2484294); } if slot == 49 { return (2522121, 2484294, 2533410); } if slot == 50 { return (2573593, 2533410, 2582430); } if slot == 51 { return (2625065, 2582430, 2631353); } if slot == 52 { return (2676536, 2631353, 2680177); } if slot == 53 { return (2728008, 2680177, 2728901); } if slot == 54 { return (2779480, 2728901, 2777521); } if slot == 55 { return (2830952, 2777521, 2826037); } if slot == 56 { return (2882424, 2826037, 2874446); } if slot == 57 { return (2933896, 2874446, 2922748); } if slot == 58 { return (2985368, 2922748, 2970939); } if slot == 59 { return (3036839, 2970939, 3019018); } if slot == 60 { return (3088311, 3019018, 3066984); } if slot == 61 { return (3139783, 3066984, 3114834); } if slot == 62 { return (3191255, 3114834, 3162567); } if slot == 63 { return (3242727, 3162567, 3210181); } } } } else { if slot < 96 { if slot < 80 { if slot == 64 { return (3294199, 3210181, 3257674); } if slot == 65 { return (3345671, 3257674, 3305045); } if slot == 66 { return (3397142, 3305045, 3352291); } if slot == 67 { return (3448614, 3352291, 3399411); } if slot == 68 { return (3500086, 3399411, 3446402); } if slot == 69 { return (3551558, 3446402, 3493264); } if slot == 70 { return (3603030, 3493264, 3539995); } if slot == 71 { return (3654502, 3539995, 3586592); } if slot == 72 { return (3705973, 3586592, 3633054); } if slot == 73 { return (3757445, 3633054, 3679380); } if slot == 74 { return (3808917, 3679380, 3725567); } if slot == 75 { return (3860389, 3725567, 3771613); } if slot == 76 { return (3911861, 3771613, 3817518); } if slot == 77 { return (3963333, 3817518, 3863279); } if slot == 78 { return (4014805, 3863279, 3908894); } if slot == 79 { return (4066276, 3908894, 3954362); } } else { if slot == 80 { return (4117751, 3954362, 3999682); } if slot == 81 { return (4169220, 3999682, 4044851); } if slot == 82 { return (4220692, 4044851, 4089867); } if slot == 83 { return (4272164, 4089867, 4134730); } if slot == 84 { return (4323636, 4134730, 4179437); } if slot == 85 { return (4375108, 4179437, 4223986); } if slot == 86 { return (4426579, 4223986, 4268377); } if slot == 87 { return (4478051, 4268377, 4312606); } if slot == 88 { return (4529523, 4312606, 4356674); } if slot == 89 { return (4580995, 4356674, 4400577); } if slot == 90 { return (4632474, 4400577, 4444315); } if slot == 91 { return (4683939, 4444315, 4487885); } if slot == 92 { return (4735411, 4487885, 4531287); } if slot == 93 { return (4786882, 4531287, 4574518); } if slot == 94 { return (4838354, 4574518, 4617576); } if slot == 95 { return (4889826, 4617576, 4660461); } } } else { if slot < 112 { if slot == 96 { return (4941298, 4660461, 4703170); } if slot == 97 { return (4992770, 4703170, 4745702); } if slot == 98 { return (5044242, 4745702, 4788056); } if slot == 99 { return (5095714, 4788056, 4830229); } if slot == 100 { return (5147227, 4830229, 4872221); } if slot == 101 { return (5198657, 4872221, 4914029); } if slot == 102 { return (5250129, 4914029, 4955652); } if slot == 103 { return (5301601, 4955652, 4997088); } if slot == 104 { return (5353073, 4997088, 5038336); } if slot == 105 { return (5404545, 5038336, 5079395); } if slot == 106 { return (5456017, 5079395, 5120262); } if slot == 107 { return (5507488, 5120262, 5160937); } if slot == 108 { return (5558960, 5160937, 5201417); } if slot == 109 { return (5610432, 5201417, 5241701); } if slot == 110 { return (5661904, 5241701, 5281788); } if slot == 111 { return (5713376, 5281788, 5321677); } } else { if slot == 112 { return (5764848, 5321677, 5361364); } if slot == 113 { return (5816320, 5361364, 5400850); } if slot == 114 { return (5867791, 5400850, 5440133); } if slot == 115 { return (5919263, 5440133, 5479211); } if slot == 116 { return (5970735, 5479211, 5518082); } if slot == 117 { return (6022207, 5518082, 5556746); } if slot == 118 { return (6073679, 5556746, 5595201); } if slot == 119 { return (6125151, 5595201, 5633445); } if slot == 120 { return (6176622, 5633445, 5671477); } if slot == 121 { return (6228094, 5671477, 5709295); } if slot == 122 { return (6279566, 5709295, 5746898); } if slot == 123 { return (6331038, 5746898, 5784285); } if slot == 124 { return (6382510, 5784285, 5821455); } if slot == 125 { return (6433982, 5821455, 5858405); } if slot == 126 { return (6485454, 5858405, 5895134); } if slot == 127 { return (6536925, 5895134, 5931642); } } } } } else { if slot < 192 { if slot < 160 { if slot < 144 { if slot == 128 { return (6588397, 5931642, 5967926); } if slot == 129 { return (6639869, 5967926, 6003985); } if slot == 130 { return (6691345, 6003985, 6039819); } if slot == 131 { return (6742813, 6039819, 6075425); } if slot == 132 { return (6794285, 6075425, 6110802); } if slot == 133 { return (6845757, 6110802, 6145949); } if slot == 134 { return (6897228, 6145949, 6180865); } if slot == 135 { return (6948700, 6180865, 6215549); } if slot == 136 { return (7000172, 6215549, 6249998); } if slot == 137 { return (7051644, 6249998, 6284212); } if slot == 138 { return (7103116, 6284212, 6318189); } if slot == 139 { return (7154588, 6318189, 6351928); } if slot == 140 { return (7206060, 6351928, 6385428); } if slot == 141 { return (7257531, 6385428, 6418688); } if slot == 142 { return (7309003, 6418688, 6451706); } if slot == 143 { return (7360475, 6451706, 6484482); } } else { if slot == 144 { return (7411947, 6484482, 6517013); } if slot == 145 { return (7463419, 6517013, 6549299); } if slot == 146 { return (7514891, 6549299, 6581338); } if slot == 147 { return (7566363, 6581338, 6613129); } if slot == 148 { return (7617834, 6613129, 6644672); } if slot == 149 { return (7669306, 6644672, 6675964); } if slot == 150 { return (7720780, 6675964, 6707005); } if slot == 151 { return (7772250, 6707005, 6737793); } if slot == 152 { return (7823722, 6737793, 6768328); } if slot == 153 { return (7875194, 6768328, 6798608); } if slot == 154 { return (7926666, 6798608, 6828632); } if slot == 155 { return (7978137, 6828632, 6858399); } if slot == 156 { return (8029609, 6858399, 6887907); } if slot == 157 { return (8081081, 6887907, 6917156); } if slot == 158 { return (8132553, 6917156, 6946145); } if slot == 159 { return (8184025, 6946145, 6974873); } if slot == 160 { return (8235503, 6974873, 7003337); } } } else { if slot < 176 { if slot == 161 { return (8286968, 7003337, 7031538); } if slot == 162 { return (8338440, 7031538, 7059475); } if slot == 163 { return (8389912, 7059475, 7087145); } if slot == 164 { return (8441384, 7087145, 7114549); } if slot == 165 { return (8492856, 7114549, 7141685); } if slot == 166 { return (8544328, 7141685, 7168552); } if slot == 167 { return (8595800, 7168552, 7195149); } if slot == 168 { return (8647271, 7195149, 7221475); } if slot == 169 { return (8698743, 7221475, 7247530); } if slot == 170 { return (8750215, 7247530, 7273311); } if slot == 171 { return (8801687, 7273311, 7298819); } if slot == 172 { return (8853159, 7298819, 7324052); } if slot == 173 { return (8904631, 7324052, 7349009); } if slot == 174 { return (8956103, 7349009, 7373689); } if slot == 175 { return (9007574, 7373689, 7398092); } } else { if slot == 176 { return (9059046, 7398092, 7422216); } if slot == 177 { return (9110518, 7422216, 7446061); } if slot == 178 { return (9161990, 7446061, 7469625); } if slot == 179 { return (9213462, 7469625, 7492909); } if slot == 180 { return (9264934, 7492909, 7515910); } if slot == 181 { return (9316406, 7515910, 7538628); } if slot == 182 { return (9367877, 7538628, 7561062); } if slot == 183 { return (9419349, 7561062, 7583212); } if slot == 184 { return (9470821, 7583212, 7605076); } if slot == 185 { return (9522293, 7605076, 7626654); } if slot == 186 { return (9573765, 7626654, 7647945); } if slot == 187 { return (9625237, 7647945, 7668947); } if slot == 188 { return (9676709, 7668947, 7689661); } if slot == 189 { return (9728180, 7689661, 7710086); } if slot == 190 { return (9779651, 7710086, 7730220); } if slot == 191 { return (9831124, 7730220, 7750063); } } } } else { if slot < 224 { if slot < 208 { if slot == 192 { return (9882596, 7750063, 7769615); } if slot == 193 { return (9934068, 7769615, 7788874); } if slot == 194 { return (9985540, 7788874, 7807839); } if slot == 195 { return (10037012, 7807839, 7826511); } if slot == 196 { return (10088483, 7826511, 7844888); } if slot == 197 { return (10139955, 7844888, 7862970); } if slot == 198 { return (10191427, 7862970, 7880755); } if slot == 199 { return (10242899, 7880755, 7898244); } if slot == 200 { return (10294373, 7898244, 7915436); } if slot == 201 { return (10345843, 7915436, 7932329); } if slot == 202 { return (10397315, 7932329, 7948924); } if slot == 203 { return (10448786, 7948924, 7965220); } if slot == 204 { return (10500258, 7965220, 7981215); } if slot == 205 { return (10551730, 7981215, 7996911); } if slot == 206 { return (10603202, 7996911, 8012305); } if slot == 207 { return (10654674, 8012305, 8027397); } } else { if slot == 208 { return (10706146, 8027397, 8042188); } if slot == 209 { return (10757617, 8042188, 8056675); } if slot == 210 { return (10809089, 8056675, 8070859); } if slot == 211 { return (10860561, 8070859, 8084740); } if slot == 212 { return (10912033, 8084740, 8098316); } if slot == 213 { return (10963505, 8098316, 8111587); } if slot == 214 { return (11014977, 8111587, 8124552); } if slot == 215 { return (11066449, 8124552, 8137212); } if slot == 216 { return (11117920, 8137212, 8149565); } if slot == 217 { return (11169392, 8149565, 8161612); } if slot == 218 { return (11220864, 8161612, 8173351); } if slot == 219 { return (11272336, 8173351, 8184783); } if slot == 220 { return (11323808, 8184783, 8195906); } if slot == 221 { return (11375280, 8195906, 8206721); } if slot == 222 { return (11426752, 8206721, 8217227); } if slot == 223 { return (11478223, 8217227, 8227423); } } } else { if slot < 240 { if slot == 224 { return (11529695, 8227423, 8237310); } if slot == 225 { return (11581167, 8237310, 8246887); } if slot == 226 { return (11632639, 8246887, 8256153); } if slot == 227 { return (11684111, 8256153, 8265108); } if slot == 228 { return (11735583, 8265108, 8273752); } if slot == 229 { return (11787055, 8273752, 8282085); } if slot == 230 { return (11838531, 8282085, 8290105); } if slot == 231 { return (11889998, 8290105, 8297814); } if slot == 232 { return (11941470, 8297814, 8305210); } if slot == 233 { return (11992942, 8305210, 8312294); } if slot == 234 { return (12044414, 8312294, 8319064); } if slot == 235 { return (12095886, 8319064, 8325522); } if slot == 236 { return (12147358, 8325522, 8331666); } if slot == 237 { return (12198829, 8331666, 8337496); } if slot == 238 { return (12250301, 8337496, 8343012); } if slot == 239 { return (12301773, 8343012, 8348215); } } else { if slot == 240 { return (12353244, 8348215, 8353102); } if slot == 241 { return (12404717, 8353102, 8357676); } if slot == 242 { return (12456189, 8357676, 8361935); } if slot == 243 { return (12507661, 8361935, 8365879); } if slot == 244 { return (12559132, 8365879, 8369508); } if slot == 245 { return (12610604, 8369508, 8372822); } if slot == 246 { return (12662076, 8372822, 8375820); } if slot == 247 { return (12713548, 8375820, 8378504); } if slot == 248 { return (12765020, 8378504, 8380871); } if slot == 249 { return (12816492, 8380871, 8382924); } if slot == 250 { return (12867964, 8382924, 8384660); } if slot == 251 { return (12919435, 8384660, 8386082); } if slot == 252 { return (12970907, 8386082, 8387187); } if slot == 253 { return (13022379, 8387187, 8387976); } if slot == 254 { return (13073851, 8387976, 8388450); } } } } } (13125323, 8388450, 8388608) } fn atan(a: u64) -> (u64, u64, u64) { let slot = a / 58720; if slot == 0 { return (0, 0, 58719); } if slot == 1 { return (58720, 58719, 117433); } if slot == 2 { return (117441, 117433, 176135); } if slot == 3 { return (176161, 176135, 234820); } if slot == 4 { return (234881, 234820, 293481); } if slot == 5 { return (293601, 293481, 352115); } if slot == 6 { return (352322, 352115, 410713); } if slot == 7 { return (411042, 410713, 469272); } if slot == 8 { return (469762, 469272, 527785); } if slot == 9 { return (528482, 527785, 586246); } if slot == 10 { return (587201, 586246, 644651); } if slot == 11 { return (645923, 644651, 702993); } if slot == 12 { return (704643, 702993, 761267); } if slot == 13 { return (763363, 761267, 819467); } if slot == 14 { return (822084, 819467, 877588); } if slot == 15 { return (880804, 877588, 935625); } if slot == 16 { return (939524, 935625, 993572); } if slot == 17 { return (998244, 993572, 1051424); } if slot == 18 { return (1056965, 1051424, 1109175); } if slot == 19 { return (1115685, 1109175, 1166821); } if slot == 20 { return (1174411, 1166821, 1224357); } if slot == 21 { return (1233125, 1224357, 1281776); } if slot == 22 { return (1291846, 1281776, 1339075); } if slot == 23 { return (1350566, 1339075, 1396248); } if slot == 24 { return (1409286, 1396248, 1453290); } if slot == 25 { return (1468006, 1453290, 1510197); } if slot == 26 { return (1526727, 1510197, 1566964); } if slot == 27 { return (1585447, 1566964, 1623585); } if slot == 28 { return (1644167, 1623585, 1680058); } if slot == 29 { return (1702887, 1680058, 1736376); } if slot == 30 { return (1761612, 1736376, 1792537); } if slot == 31 { return (1820328, 1792537, 1848534); } if slot == 32 { return (1879048, 1848534, 1904364); } if slot == 33 { return (1937768, 1904364, 1960024); } if slot == 34 { return (1996489, 1960024, 2015508); } if slot == 35 { return (2055209, 2015508, 2070813); } if slot == 36 { return (2113929, 2070813, 2125935); } if slot == 37 { return (2172649, 2125935, 2180869); } if slot == 38 { return (2231370, 2180869, 2235613); } if slot == 39 { return (2290090, 2235613, 2290163); } if slot == 40 { return (2348813, 2290163, 2344515); } if slot == 41 { return (2407530, 2344515, 2398665); } if slot == 42 { return (2466251, 2398665, 2452611); } if slot == 43 { return (2524971, 2452611, 2506348); } if slot == 44 { return (2583691, 2506348, 2559875); } if slot == 45 { return (2642412, 2559875, 2613187); } if slot == 46 { return (2701132, 2613187, 2666281); } if slot == 47 { return (2759852, 2666281, 2719156); } if slot == 48 { return (2818572, 2719156, 2771807); } if slot == 49 { return (2877293, 2771807, 2824233); } if slot == 50 { return (2936014, 2824233, 2876431); } if slot == 51 { return (2994733, 2876431, 2928397); } if slot == 52 { return (3053453, 2928397, 2980130); } if slot == 53 { return (3112174, 2980130, 3031628); } if slot == 54 { return (3170894, 3031628, 3082888); } if slot == 55 { return (3229614, 3082888, 3133907); } if slot == 56 { return (3288334, 3133907, 3184685); } if slot == 57 { return (3347055, 3184685, 3235218); } if slot == 58 { return (3405775, 3235218, 3285506); } if slot == 59 { return (3464495, 3285506, 3335545); } if slot == 60 { return (3523224, 3335545, 3385336); } if slot == 61 { return (3581936, 3385336, 3434875); } if slot == 62 { return (3640656, 3434875, 3484161); } if slot == 63 { return (3699376, 3484161, 3533193); } if slot == 64 { return (3758096, 3533193, 3581970); } if slot == 65 { return (3816817, 3581970, 3630491); } if slot == 66 { return (3875537, 3630491, 3678753); } if slot == 67 { return (3934257, 3678753, 3726756); } if slot == 68 { return (3992977, 3726756, 3774499); } if slot == 69 { return (4051698, 3774499, 3821981); } if slot == 70 { return (4110418, 3821981, 3869201); } if slot == 71 { return (4169138, 3869201, 3916159); } if slot == 72 { return (4227858, 3916159, 3962853); } if slot == 73 { return (4286579, 3962853, 4009282); } if slot == 74 { return (4345299, 4009282, 4055447); } if slot == 75 { return (4404019, 4055447, 4101347); } if slot == 76 { return (4462739, 4101347, 4146981); } if slot == 77 { return (4521460, 4146981, 4192350); } if slot == 78 { return (4580180, 4192350, 4237451); } if slot == 79 { return (4638900, 4237451, 4282286); } if slot == 80 { return (4697620, 4282286, 4326855); } if slot == 81 { return (4756341, 4326855, 4371156); } if slot == 82 { return (4815061, 4371156, 4415191); } if slot == 83 { return (4873781, 4415191, 4458958); } if slot == 84 { return (4932502, 4458958, 4502459); } if slot == 85 { return (4991222, 4502459, 4545693); } if slot == 86 { return (5049942, 4545693, 4588660); } if slot == 87 { return (5108662, 4588660, 4631361); } if slot == 88 { return (5167383, 4631361, 4673795); } if slot == 89 { return (5226103, 4673795, 4715964); } if slot == 90 { return (5284823, 4715964, 4757868); } if slot == 91 { return (5343543, 4757868, 4799506); } if slot == 92 { return (5402264, 4799506, 4840880); } if slot == 93 { return (5460984, 4840880, 4881990); } if slot == 94 { return (5519704, 4881990, 4922837); } if slot == 95 { return (5578424, 4922837, 4963420); } if slot == 96 { return (5637145, 4963420, 5003742); } if slot == 97 { return (5695865, 5003742, 5043802); } if slot == 98 { return (5754585, 5043802, 5083601); } (5813305, 5083601, 5123141) } fn erf_lut(x: u64) -> u64 { // Construct the erf lookup table if x <= 754974 { if x <= 0 { return 0; } if x <= 83886 { return 94652; } if x <= 167772 { return 189285; } if x <= 251658 { return 283880; } if x <= 335544 { return 378419; } if x <= 419430 { return 472882; } if x <= 503316 { return 567251; } if x <= 587202 { return 661506; } if x <= 671088 { return 755630; } if x <= 754974 { return 849603; } } if x <= 1593835 { if x <= 838860 { return 943407; } if x <= 922746 { return 1037024; } if x <= 1006632 { return 1130434; } if x <= 1090519 { return 1223622; } if x <= 1174405 { return 1316567; } if x <= 1258291 { return 1409252; } if x <= 1342177 { return 1501659; } if x <= 1426063 { return 1593772; } if x <= 1509949 { return 1685571; } if x <= 1593835 { return 1777041; } } if x <= 2432696 { if x <= 1677721 { return 1868164; } if x <= 1761607 { return 1958923; } if x <= 1845493 { return 2049302; } if x <= 1929379 { return 2139284; } if x <= 2013265 { return 2228853; } if x <= 2097152 { return 2317993; } if x <= 2181038 { return 2406689; } if x <= 2264924 { return 2494924; } if x <= 2348810 { return 2582685; } if x <= 2432696 { return 2669955; } } if x <= 3271557 { if x <= 2516582 { return 2756721; } if x <= 2600468 { return 2842967; } if x <= 2684354 { return 2928681; } if x <= 2768240 { return 3013847; } if x <= 2852126 { return 3098454; } if x <= 2936012 { return 3182487; } if x <= 3019898 { return 3265934; } if x <= 3103784 { return 3348782; } if x <= 3187671 { return 3431019; } if x <= 3271557 { return 3512634; } } if x <= 4110417 { if x <= 3355443 { return 3593615; } if x <= 3439329 { return 3673951; } if x <= 3523215 { return 3753630; } if x <= 3607101 { return 3832643; } if x <= 3690987 { return 3910979; } if x <= 3774873 { return 3988629; } if x <= 3858759 { return 4065584; } if x <= 3942645 { return 4141833; } if x <= 4026531 { return 4217369; } if x <= 4110417 { return 4292184; } } if x <= 4949278 { if x <= 4194304 { return 4366269; } if x <= 4278190 { return 4439617; } if x <= 4362076 { return 4512220; } if x <= 4445962 { return 4584073; } if x <= 4529848 { return 4655167; } if x <= 4613734 { return 4725498; } if x <= 4697620 { return 4795060; } if x <= 4781506 { return 4863847; } if x <= 4865392 { return 4931854; } if x <= 4949278 { return 4999077; } } if x <= 5788139 { if x <= 5033164 { return 5065512; } if x <= 5117050 { return 5131153; } if x <= 5200936 { return 5195999; } if x <= 5284823 { return 5260046; } if x <= 5368709 { return 5323291; } if x <= 5452595 { return 5385732; } if x <= 5536481 { return 5447366; } if x <= 5620367 { return 5508192; } if x <= 5704253 { return 5568208; } if x <= 5788139 { return 5627414; } } if x <= 6627000 { if x <= 5872025 { return 5685808; } if x <= 5955911 { return 5743390; } if x <= 6039797 { return 5800161; } if x <= 6123683 { return 5856120; } if x <= 6207569 { return 5911268; } if x <= 6291456 { return 5965605; } if x <= 6375342 { return 6019134; } if x <= 6459228 { return 6071855; } if x <= 6543114 { return 6123771; } if x <= 6627000 { return 6174883; } } if x <= 7465861 { if x <= 6710886 { return 6225194; } if x <= 6794772 { return 6274706; } if x <= 6878658 { return 6323422; } if x <= 6962544 { return 6371347; } if x <= 7046430 { return 6418482; } if x <= 7130316 { return 6464832; } if x <= 7214202 { return 6510400; } if x <= 7298088 { return 6555192; } if x <= 7381975 { return 6599211; } if x <= 7465861 { return 6642462; } } if x <= 8304721 { if x <= 7549747 { return 6684950; } if x <= 7633633 { return 6726680; } if x <= 7717519 { return 6767658; } if x <= 7801405 { return 6807888; } if x <= 7885291 { return 6847377; } if x <= 7969177 { return 6886131; } if x <= 8053063 { return 6924155; } if x <= 8136949 { return 6961456; } if x <= 8220835 { return 6998041; } if x <= 8304721 { return 7033915; } } if x <= 9143582 { if x <= 8388608 { return 7069086; } if x <= 8472494 { return 7103561; } if x <= 8556380 { return 7137346; } if x <= 8640266 { return 7170449; } if x <= 8724152 { return 7202877; } if x <= 8808038 { return 7234638; } if x <= 8891924 { return 7265739; } if x <= 8975810 { return 7296187; } if x <= 9059696 { return 7325990; } if x <= 9143582 { return 7355157; } } if x <= 9982443 { if x <= 9227468 { return 7383695; } if x <= 9311354 { return 7411612; } if x <= 9395240 { return 7438915; } if x <= 9479127 { return 7465615; } if x <= 9563013 { return 7491717; } if x <= 9646899 { return 7517231; } if x <= 9730785 { return 7542165; } if x <= 9814671 { return 7566527; } if x <= 9898557 { return 7590326; } if x <= 9982443 { return 7613570; } } if x <= 10821304 { if x <= 10066329 { return 7636267; } if x <= 10150215 { return 7658425; } if x <= 10234101 { return 7680054; } if x <= 10317987 { return 7701162; } if x <= 10401873 { return 7721757; } if x <= 10485760 { return 7741847; } if x <= 10569646 { return 7761441; } if x <= 10653532 { return 7780548; } if x <= 10737418 { return 7799175; } if x <= 10821304 { return 7817332; } } if x <= 11660165 { if x <= 10905190 { return 7835026; } if x <= 10989076 { return 7852266; } if x <= 11072962 { return 7869060; } if x <= 11156848 { return 7885417; } if x <= 11240734 { return 7901344; } if x <= 11324620 { return 7916851; } if x <= 11408506 { return 7931944; } if x <= 11492392 { return 7946632; } if x <= 11576279 { return 7960923; } if x <= 11660165 { return 7974825; } } if x <= 12499025 { if x <= 11744051 { return 7988346; } if x <= 11827937 { return 8001494; } if x <= 11911823 { return 8014276; } if x <= 11995709 { return 8026700; } if x <= 12079595 { return 8038774; } if x <= 12163481 { return 8050505; } if x <= 12247367 { return 8061901; } if x <= 12331253 { return 8072969; } if x <= 12415139 { return 8083716; } if x <= 12499025 { return 8094149; } } if x <= 13337886 { if x <= 12582912 { return 8104277; } if x <= 12666798 { return 8114105; } if x <= 12750684 { return 8123641; } if x <= 12834570 { return 8132891; } if x <= 12918456 { return 8141862; } if x <= 13002342 { return 8150562; } if x <= 13086228 { return 8158996; } if x <= 13170114 { return 8167170; } if x <= 13254000 { return 8175092; } if x <= 13337886 { return 8182768; } } if x <= 14176747 { if x <= 13421772 { return 8190203; } if x <= 13505658 { return 8197405; } if x <= 13589544 { return 8204378; } if x <= 13673431 { return 8211128; } if x <= 13757317 { return 8217663; } if x <= 13841203 { return 8223986; } if x <= 13925089 { return 8230104; } if x <= 14008975 { return 8236022; } if x <= 14092861 { return 8241746; } if x <= 14176747 { return 8247281; } } if x <= 15015608 { if x <= 14260633 { return 8252632; } if x <= 14344519 { return 8257804; } if x <= 14428405 { return 8262802; } if x <= 14512291 { return 8267631; } if x <= 14596177 { return 8272296; } if x <= 14680064 { return 8276801; } if x <= 14763950 { return 8281152; } if x <= 14847836 { return 8285352; } if x <= 14931722 { return 8289405; } if x <= 15015608 { return 8293318; } } if x <= 15854469 { if x <= 15099494 { return 8297092; } if x <= 15183380 { return 8300733; } if x <= 15267266 { return 8304245; } if x <= 15351152 { return 8307631; } if x <= 15435038 { return 8310895; } if x <= 15518924 { return 8314041; } if x <= 15602810 { return 8317074; } if x <= 15686696 { return 8319995; } if x <= 15770583 { return 8322809; } if x <= 15854469 { return 8325519; } } if x <= 16693329 { if x <= 15938355 { return 8328129; } if x <= 16022241 { return 8330642; } if x <= 16106127 { return 8333060; } if x <= 16190013 { return 8335387; } if x <= 16273899 { return 8337626; } if x <= 16357785 { return 8339780; } if x <= 16441671 { return 8341852; } if x <= 16525557 { return 8343844; } if x <= 16609443 { return 8345758; } if x <= 16693329 { return 8347599; } } if x <= 28521267 { if x <= 16777216 { return 8349368; } if x <= 17616076 { return 8363614; } if x <= 18454937 { return 8372981; } if x <= 19293798 { return 8379018; } if x <= 20132659 { return 8382832; } if x <= 20971520 { return 8385194; } if x <= 21810380 { return 8386627; } if x <= 22649241 { return 8387481; } if x <= 23488102 { return 8387978; } if x <= 24326963 { return 8388263; } if x <= 25165824 { return 8388422; } if x <= 26004684 { return 8388510; } if x <= 26843545 { return 8388557; } if x <= 27682406 { return 8388582; } if x <= 28521267 { return 8388595; } } ONE }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo
use core::integer; use orion::numbers::fixed_point::implementations::fp8x23wide::math::lut; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ HALF, ONE, TWO, FP8x23W, FP8x23WImpl, FP8x23WAdd, FP8x23WSub, FP8x23WMul, FP8x23WDiv, FP8x23WIntoFelt252, FixedTrait }; // CONSTANTS const TWO_PI: u64 = 52707178; const PI: u64 = 26353589; const HALF_PI: u64 = 13176795; // PUBLIC // Calculates arccos(a) for -1 <= a <= 1 (fixed point) // arccos(a) = arcsin(sqrt(1 - a^2)) - arctan identity has discontinuity at zero fn acos(a: FP8x23W) -> FP8x23W { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin(asin_arg); if (a.sign) { FixedTrait::new(PI, false) - asin_res } else { asin_res } } fn acos_fast(a: FP8x23W) -> FP8x23W { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin_fast(asin_arg); if (a.sign) { FixedTrait::new(PI, false) - asin_res } else { asin_res } } // Calculates arcsin(a) for -1 <= a <= 1 (fixed point) // arcsin(a) = arctan(a / sqrt(1 - a^2)) fn asin(a: FP8x23W) -> FP8x23W { if (a.mag == ONE) { return FixedTrait::new(HALF_PI, a.sign); } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 atan(a / div) } fn asin_fast(a: FP8x23W) -> FP8x23W { if (a.mag == ONE) { return FixedTrait::new(HALF_PI, a.sign); } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 atan_fast(a / div) } // Calculates arctan(a) (fixed point) // See https://stackoverflow.com/a/50894477 for range adjustments fn atan(a: FP8x23W) -> FP8x23W { let mut at = a.abs(); let mut shift = false; let mut invert = false; // Invert value when a > 1 if (at.mag > ONE) { at = FixedTrait::ONE() / at; invert = true; } // Account for lack of precision in polynomaial when a > 0.7 if (at.mag > 5872026) { let sqrt3_3 = FixedTrait::new(4843165, false); // sqrt(3) / 3 at = (at - sqrt3_3) / (FixedTrait::ONE() + at * sqrt3_3); shift = true; } let r10 = FixedTrait::new(15363, true) * at; let r9 = (r10 + FixedTrait::new(392482, true)) * at; let r8 = (r9 + FixedTrait::new(1629064, false)) * at; let r7 = (r8 + FixedTrait::new(2197820, true)) * at; let r6 = (r7 + FixedTrait::new(366693, false)) * at; let r5 = (r6 + FixedTrait::new(1594324, false)) * at; let r4 = (r5 + FixedTrait::new(11519, false)) * at; let r3 = (r4 + FixedTrait::new(2797104, true)) * at; let r2 = (r3 + FixedTrait::new(34, false)) * at; let mut res = (r2 + FixedTrait::new(8388608, false)) * at; // Adjust for sign change, inversion, and shift if (shift) { res = res + FixedTrait::new(4392265, false); // pi / 6 } if (invert) { res = res - FixedTrait::new(HALF_PI, false); } FixedTrait::new(res.mag, a.sign) } fn atan_fast(a: FP8x23W) -> FP8x23W { let mut at = a.abs(); let mut shift = false; let mut invert = false; // Invert value when a > 1 if (at.mag > ONE) { at = FixedTrait::ONE() / at; invert = true; } // Account for lack of precision in polynomaial when a > 0.7 if (at.mag > 5872026) { let sqrt3_3 = FixedTrait::new(4843165, false); // sqrt(3) / 3 at = (at - sqrt3_3) / (FixedTrait::ONE() + at * sqrt3_3); shift = true; } let (start, low, high) = lut::atan(at.mag); let partial_step = FixedTrait::new(at.mag - start, false) / FixedTrait::new(58720, false); let mut res = partial_step * FixedTrait::new(high - low, false) + FixedTrait::new(low, false); // Adjust for sign change, inversion, and shift if (shift) { res = res + FixedTrait::new(4392265, false); // pi / 6 } if (invert) { res = res - FixedTrait::<FP8x23W>::new(HALF_PI, false); } FixedTrait::new(res.mag, a.sign) } // Calculates cos(a) with a in radians (fixed point) fn cos(a: FP8x23W) -> FP8x23W { sin(FixedTrait::new(HALF_PI, false) - a) } fn cos_fast(a: FP8x23W) -> FP8x23W { sin_fast(FixedTrait::new(HALF_PI, false) - a) } fn sin(a: FP8x23W) -> FP8x23W { let a1 = a.mag % TWO_PI; let (whole_rem, partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI)); let a2 = FixedTrait::new(partial_rem, false); let partial_sign = whole_rem == 1; let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE()); FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0) } fn sin_fast(a: FP8x23W) -> FP8x23W { let a1 = a.mag % TWO_PI; let (whole_rem, mut partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI)); let partial_sign = whole_rem == 1; if partial_rem >= HALF_PI { partial_rem = PI - partial_rem; } let (start, low, high) = lut::sin(partial_rem); let partial_step = FixedTrait::new(partial_rem - start, false) / FixedTrait::new(51472, false); let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false)) + FixedTrait::<FP8x23W>::new(low, false); FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0) } // Calculates tan(a) with a in radians (fixed point) fn tan(a: FP8x23W) -> FP8x23W { let sinx = sin(a); let cosx = cos(a); assert(cosx.mag != 0, 'tan undefined'); sinx / cosx } fn tan_fast(a: FP8x23W) -> FP8x23W { let sinx = sin_fast(a); let cosx = cos_fast(a); assert(cosx.mag != 0, 'tan undefined'); sinx / cosx } // Helper function to calculate Taylor series for sin fn _sin_loop(a: FP8x23W, i: u64, acc: FP8x23W) -> FP8x23W { let div = (2 * i + 2) * (2 * i + 3); let term = a * a * acc / FixedTrait::new_unscaled(div, false); let new_acc = FixedTrait::ONE() - term; if (i == 0) { return new_acc; } _sin_loop(a, i - 1, new_acc) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { use orion::numbers::fixed_point::implementations::fp8x23wide::helpers::{ assert_precise, assert_relative }; use super::{ FixedTrait, acos, HALF_PI, ONE, acos_fast, PI, atan_fast, atan, asin, cos, cos_fast, sin, sin_fast, tan }; #[test] #[available_gas(3000000)] fn test_acos() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::ONE(); assert(acos(a).into() == 0, 'invalid one'); let a = FixedTrait::new(ONE / 2, false); assert_relative(acos(a), 8784530, 'invalid half', error); // 1.0471975506263043 let a = FixedTrait::ZERO(); assert_relative(acos(a), HALF_PI.into(), 'invalid zero', Option::None(())); // PI / 2 let a = FixedTrait::new(ONE / 2, true); assert_relative(acos(a), 17569060, 'invalid neg half', error); // 2.094395102963489 let a = FixedTrait::new(ONE, true); assert_relative(acos(a), PI.into(), 'invalid neg one', Option::None(())); // PI } #[test] #[available_gas(3000000)] fn test_acos_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::ONE(); assert(acos_fast(a).into() == 0, 'invalid one'); let a = FixedTrait::new(ONE / 2, false); assert_relative(acos_fast(a), 8784530, 'invalid half', error); // 1.0471975506263043 let a = FixedTrait::ZERO(); assert_relative(acos_fast(a), HALF_PI.into(), 'invalid zero', Option::None(())); // PI / 2 let a = FixedTrait::new(ONE / 2, true); assert_relative(acos_fast(a), 17569060, 'invalid neg half', error); // 2.094395102963489 let a = FixedTrait::new(ONE, true); assert_relative(acos_fast(a), PI.into(), 'invalid neg one', Option::None(())); // PI } #[test] #[should_panic] #[available_gas(1000000)] fn test_acos_fail() { let a = FixedTrait::new(2 * ONE, true); acos(a); } #[test] #[available_gas(1400000)] fn test_atan_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::new(2 * ONE, false); assert_relative(atan_fast(a), 9287437, 'invalid two', error); let a = FixedTrait::ONE(); assert_relative(atan_fast(a), 6588397, 'invalid one', error); let a = FixedTrait::new(ONE / 2, false); assert_relative(atan_fast(a), 3889358, 'invalid half', error); let a = FixedTrait::ZERO(); assert(atan_fast(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE / 2, true); assert_relative(atan_fast(a), -3889358, 'invalid neg half', error); let a = FixedTrait::new(ONE, true); assert_relative(atan_fast(a), -6588397, 'invalid neg one', error); let a = FixedTrait::new(2 * ONE, true); assert_relative(atan_fast(a), -9287437, 'invalid neg two', error); } #[test] #[available_gas(2600000)] fn test_atan() { let a = FixedTrait::new(2 * ONE, false); assert_relative(atan(a), 9287437, 'invalid two', Option::None(())); let a = FixedTrait::ONE(); assert_relative(atan(a), 6588397, 'invalid one', Option::None(())); let a = FixedTrait::new(ONE / 2, false); assert_relative(atan(a), 3889358, 'invalid half', Option::None(())); let a = FixedTrait::ZERO(); assert(atan(a).into() == 0, 'invalid zero'); let a = FixedTrait::new(ONE / 2, true); assert_relative(atan(a), -3889358, 'invalid neg half', Option::None(())); let a = FixedTrait::new(ONE, true); assert_relative(atan(a), -6588397, 'invalid neg one', Option::None(())); let a = FixedTrait::new(2 * ONE, true); assert_relative(atan(a), -9287437, 'invalid neg two', Option::None(())); } #[test] #[available_gas(3000000)] fn test_asin() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::ONE(); assert_relative(asin(a), HALF_PI.into(), 'invalid one', Option::None(())); // PI / 2 let a = FixedTrait::new(ONE / 2, false); assert_relative(asin(a), 4392265, 'invalid half', error); let a = FixedTrait::ZERO(); assert_precise(asin(a), 0, 'invalid zero', Option::None(())); let a = FixedTrait::new(ONE / 2, true); assert_relative(asin(a), -4392265, 'invalid neg half', error); let a = FixedTrait::new(ONE, true); assert_relative(asin(a), -HALF_PI.into(), 'invalid neg one', Option::None(())); // -PI / 2 } #[test] #[should_panic] #[available_gas(1000000)] fn test_asin_fail() { let a = FixedTrait::new(2 * ONE, false); asin(a); } #[test] #[available_gas(6000000)] fn test_cos() { let a = FixedTrait::new(HALF_PI, false); assert(cos(a).into() == 0, 'invalid half pi'); let a = FixedTrait::new(HALF_PI / 2, false); assert_relative( cos(a), 5931642, 'invalid quarter pi', Option::None(()) ); // 0.7071067811865475 let a = FixedTrait::new(PI, false); assert_relative(cos(a), -1 * ONE.into(), 'invalid pi', Option::None(())); let a = FixedTrait::new(HALF_PI, true); assert_precise(cos(a), 0, 'invalid neg half pi', Option::None(())); let a = FixedTrait::new_unscaled(17, false); assert_relative(cos(a), -2308239, 'invalid 17', Option::None(())); // -0.2751631780463348 let a = FixedTrait::new_unscaled(17, true); assert_relative(cos(a), -2308236, 'invalid -17', Option::None(())); // -0.2751631780463348 } #[test] #[available_gas(6000000)] fn test_cos_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::new(HALF_PI, false); assert(cos_fast(a).into() == 0, 'invalid half pi'); let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(cos_fast(a), 5931642, 'invalid quarter pi', error); // 0.7071067811865475 let a = FixedTrait::new(PI, false); assert_precise(cos_fast(a), -1 * ONE.into(), 'invalid pi', error); let a = FixedTrait::new(HALF_PI, true); assert_precise(cos(a), 0, 'invalid neg half pi', Option::None(())); let a = FixedTrait::new_unscaled(17, false); assert_precise(cos_fast(a), -2308239, 'invalid 17', error); // -0.2751631780463348 } #[test] #[available_gas(6000000)] fn test_sin() { let a = FixedTrait::new(HALF_PI, false); assert_precise(sin(a), ONE.into(), 'invalid half pi', Option::None(())); let a = FixedTrait::new(HALF_PI / 2, false); assert_precise( sin(a), 5931642, 'invalid quarter pi', Option::None(()) ); // 0.7071067811865475 let a = FixedTrait::new(PI, false); assert(sin(a).into() == 0, 'invalid pi'); let a = FixedTrait::new(HALF_PI, true); assert_precise( sin(a), -ONE.into(), 'invalid neg half pi', Option::None(()) ); // 0.9999999999939766 let a = FixedTrait::new_unscaled(17, false); assert_precise(sin(a), -8064787, 'invalid 17', Option::None(())); // -0.9613974918793389 let a = FixedTrait::new_unscaled(17, true); assert_precise(sin(a), 8064787, 'invalid -17', Option::None(())); // 0.9613974918793389 } #[test] #[available_gas(1000000)] fn test_sin_fast() { let error = Option::Some(84); // 1e-5 let a = FixedTrait::new(HALF_PI, false); assert_precise(sin_fast(a), ONE.into(), 'invalid half pi', error); let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(sin_fast(a), 5931642, 'invalid quarter pi', error); // 0.7071067811865475 let a = FixedTrait::new(PI, false); assert(sin_fast(a).into() == 0, 'invalid pi'); let a = FixedTrait::new(HALF_PI, true); assert_precise( sin_fast(a), -ONE.into(), 'invalid neg half pi', error ); // 0.9999999999939766 let a = FixedTrait::new_unscaled(17, false); assert_precise(sin_fast(a), -8064787, 'invalid 17', error); // -0.9613974918793389 let a = FixedTrait::new_unscaled(17, true); assert_precise(sin_fast(a), 8064787, 'invalid -17', error); // 0.9613974918793389 } #[test] #[available_gas(8000000)] fn test_tan() { let a = FixedTrait::new(HALF_PI / 2, false); assert_precise(tan(a), ONE.into(), 'invalid quarter pi', Option::None(())); let a = FixedTrait::new(PI, false); assert_precise(tan(a), 0, 'invalid pi', Option::None(())); let a = FixedTrait::new_unscaled(17, false); assert_precise(tan(a), 29309069, 'invalid 17', Option::None(())); // 3.493917677159002 let a = FixedTrait::new_unscaled(17, true); assert_precise(tan(a), -29309106, 'invalid -17', Option::None(())); // -3.493917677159002 } }
https://github.com/gizatechxyz/orion
src/numbers/fixed_point/utils.cairo
use core::integer; const HALF_PRIME: felt252 = 1809251394333065606848661391547535052811553607665798349986546028067936010240; // Returns the sign of a signed `felt252` as with signed magnitude representation // true = negative // false = positive fn felt_sign(a: felt252) -> bool { integer::u256_from_felt252(a) > integer::u256_from_felt252(HALF_PRIME) } // Returns the absolute value of a signed `felt252` fn felt_abs(a: felt252) -> felt252 { let a_sign = felt_sign(a); if a_sign { return a * -1; } else { return a * 1; } } #[cfg(test)] mod tests { use super::{felt_sign, felt_abs}; #[test] fn test_sign() { let min = -1809251394333065606848661391547535052811553607665798349986546028067936010240; let max = 1809251394333065606848661391547535052811553607665798349986546028067936010240; assert(felt_sign(min), 'invalid result'); assert(felt_sign(-1), 'invalid result'); assert(!felt_sign(0), 'invalid result'); assert(!felt_sign(1), 'invalid result'); assert(!felt_sign(max), 'invalid result'); } #[test] fn test_abs() { assert(felt_abs(5) == 5, 'abs of pos should be pos'); assert(felt_abs(-5) == 5, 'abs of neg should be pos'); assert(felt_abs(0) == 0, 'abs of 0 should be 0'); } }
https://github.com/gizatechxyz/orion
src/operators.cairo
mod tensor; mod nn; mod ml; mod matrix; mod vec; mod sequence;
https://github.com/gizatechxyz/orion
src/operators/matrix.cairo
use orion::numbers::NumberTrait; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; struct MutMatrix<T> { data: NullableVec<T>, rows: usize, cols: usize, } impl MutMatrixDestruct<T, +Drop<T>> of Destruct<MutMatrix<T>> { fn destruct(self: MutMatrix<T>) nopanic { self.data.destruct() } } #[generate_trait] impl MutMatrixImpl< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T> > of MutMatrixTrait<T> { /// Constructor for the Matrix fn new(rows: usize, cols: usize) -> MutMatrix<T> { MutMatrix { data: NullableVecImpl::new(), rows: rows, cols: cols } } /// Get the value at (row, col) fn get(ref self: MutMatrix<T>, row: usize, col: usize) -> Option<T> { if row >= self.rows || col >= self.cols { Option::None } else { self.data.get(row * self.cols + col) } } /// Get the value at (row, col) fn at(ref self: MutMatrix<T>, row: usize, col: usize) -> T { match self.get(row, col) { Option::Some(val) => val, Option::None => NumberTrait::zero(), } } /// Performs the product between a m x n `MutMatrix<T>` and a n x 1 `NullableVec<T>`. /// Returns the resulta as a `NullableVec<T>`. fn matrix_vector_product<+Mul<T>, +Add<T>, +Div<T>, +AddEq<T>>( ref self: MutMatrix<T>, ref vec: NullableVec<T> ) -> NullableVec<T> { assert(self.cols == vec.len, 'wrong matrix shape for dot'); let m = self.rows; let n = self.cols; let mut result_vec = VecTrait::new(); let mut i = 0_usize; while i != m { let mut sum: T = NumberTrait::zero(); let mut k = 0_usize; while k != n { sum += MutMatrixImpl::at(ref self, i, k) * VecTrait::at(ref vec, k); k += 1; }; VecTrait::set(ref result_vec, i, sum); i += 1; }; result_vec } /// Set the value at (row, col) fn set(ref self: MutMatrix<T>, row: usize, col: usize, value: T) { if row < self.rows && col < self.cols { let index = row * self.cols + col; self.data.set(index, value) } } /// Returns the shape of the matrix as (rows, cols) fn shape(self: MutMatrix<T>) -> (usize, usize) { (self.rows, self.cols) } /// Returns the index of the maximum value along the specified axis fn argmax(ref self: MutMatrix<T>, axis: usize) -> Span<usize> { assert(axis < 2, 'Invalid axis'); let mut result: Array<usize> = ArrayTrait::new(); if axis == 0 { let mut col: usize = 0; while col != self.cols { let mut max_value = self.get(0, col); let mut max_value = match max_value { Option::Some => { max_value.unwrap() }, Option::None => { NumberTrait::min_value() } }; let mut max_index = 0; let mut row: usize = 1; while row != self.rows { let mut value = self.get(row, col); let mut value = match value { Option::Some => { value.unwrap() }, Option::None => { NumberTrait::min_value() } }; if value > max_value { max_value = value; max_index = row; } row += 1; }; result.append(max_index); col += 1; }; return result.span(); } let mut row: usize = 0; while row != self.rows { let mut max_value = self.get(row, 0); let mut max_value = match max_value { Option::Some => { max_value.unwrap() }, Option::None => { NumberTrait::min_value() } }; let mut max_index = 0; let mut col: usize = 1; while col != self.cols { let mut value = self.get(row, col); let mut value = match value { Option::Some => { value.unwrap() }, Option::None => { NumberTrait::min_value() } }; if value > max_value { max_value = value; max_index = col; } col += 1; }; result.append(max_index); row += 1; }; result.span() } /// Apply softmax to the matrix along the specified axis fn softmax<+AddEq<T>, +Div<T>>(ref self: MutMatrix<T>, axis: usize) -> MutMatrix<T> { assert(axis < 2, 'Invalid axis'); let mut result = MutMatrixImpl::new(self.rows, self.cols); if axis == 0 { let mut col: usize = 0; while col != self.cols { let mut sum_exp = NumberTrait::zero(); let mut row: usize = 0; while row != self.rows { let value = self.get(row, col).unwrap().into(); sum_exp += value.exp(); row += 1; }; row = 0; while row != self.rows { let value = self.get(row, col).unwrap().into(); let softmax_value = (value.exp() / sum_exp).into(); result.set(row, col, softmax_value); row += 1; }; col += 1; }; } else { let mut row: usize = 0; while row != self.rows { let mut sum_exp = NumberTrait::zero(); let mut col: usize = 0; while col != self.cols { let value = self.get(row, col).unwrap().into(); sum_exp += value.exp(); col += 1; }; col = 0; while col != self.cols { let value = self.get(row, col).unwrap().into(); let softmax_value = (value.exp() / sum_exp).into(); result.set(row, col, softmax_value); col += 1; }; row += 1; }; } result } /// Apply softmax to the matrix along the specified axis, treating zeros as neutral fn softmax_zero<+AddEq<T>, +Div<T>, +PartialEq<T>>( ref self: MutMatrix<T>, axis: usize ) -> MutMatrix<T> { assert(axis < 2, 'Invalid axis'); let mut result = MutMatrixImpl::new(self.rows, self.cols); if axis == 0 { let mut col: usize = 0; while col != self.cols { let mut sum_exp = NumberTrait::zero(); let mut row: usize = 0; while row != self.rows { let value = self.get(row, col).unwrap().into(); if value != NumberTrait::zero() { sum_exp += value.exp(); } row += 1; }; row = 0; while row != self.rows { let value = self.get(row, col).unwrap().into(); if value != NumberTrait::zero() { let softmax_value = (value.exp() / sum_exp).into(); result.set(row, col, softmax_value); } else { result.set(row, col, NumberTrait::zero()); } row += 1; }; col += 1; }; } else { let mut row: usize = 0; while row != self.rows { let mut sum_exp = NumberTrait::zero(); let mut col: usize = 0; while col != self.cols { let value = self.get(row, col).unwrap().into(); if value != NumberTrait::zero() { sum_exp += value.exp(); } col += 1; }; col = 0; while col != self.cols { let value = self.get(row, col).unwrap().into(); if value != NumberTrait::zero() { let softmax_value = (value.exp() / sum_exp).into(); result.set(row, col, softmax_value); } else { result.set(row, col, NumberTrait::zero()); } col += 1; }; row += 1; }; } result } /// Apply the sigmoid function to each element of the matrix fn sigmoid<+Mul<T>, +Add<T>, +Div<T>>(ref self: MutMatrix<T>) -> MutMatrix<T> { let mut result = MutMatrixImpl::new(self.rows, self.cols); let mut row: usize = 0; while row != self.rows { let mut col: usize = 0; while col != self.cols { let value = self.get(row, col); if value.is_some() { let value = NumberTrait::one() / (NumberTrait::one() + (value.unwrap() * NumberTrait::neg_one()).exp()); result.set(row, col, value); } col += 1; }; row += 1; }; result } }
https://github.com/gizatechxyz/orion
src/operators/ml.cairo
mod tree_ensemble; mod linear; mod svm; mod normalizer; use orion::operators::ml::tree_ensemble::core::{ TreeEnsemble, TreeEnsembleAttributes, TreeEnsembleImpl, NODE_MODES }; use orion::operators::ml::tree_ensemble::tree_ensemble_classifier::{ TreeEnsembleClassifier, TreeEnsembleClassifierImpl, TreeEnsembleClassifierTrait }; use orion::operators::ml::tree_ensemble::tree_ensemble_regressor::{ TreeEnsembleRegressor, TreeEnsembleRegressorImpl, TreeEnsembleRegressorTrait, AGGREGATE_FUNCTION }; use orion::operators::ml::linear::linear_regressor::{ LinearRegressorTrait, LinearRegressorImpl, LinearRegressor }; use orion::operators::ml::linear::linear_classifier::{ LinearClassifierTrait, LinearClassifierImpl, LinearClassifier }; use orion::operators::ml::normalizer::normalizer::{NormalizerTrait, NORM}; #[derive(Copy, Drop)] enum POST_TRANSFORM { NONE, SOFTMAX, LOGISTIC, SOFTMAXZERO, PROBIT, }
https://github.com/gizatechxyz/orion
src/operators/ml/linear.cairo
mod linear_regressor; mod linear_classifier;
https://github.com/gizatechxyz/orion
src/operators/ml/linear/linear_classifier.cairo
use core::array::ArrayTrait; use core::array::SpanTrait; use orion::numbers::FP16x16; use orion::operators::tensor::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::nn::{NNTrait, FP16x16NN}; use orion::operators::ml::POST_TRANSFORM; #[derive(Destruct)] struct LinearClassifier<T> { classlabels: Option<Span<usize>>, coefficients: Span<T>, intercepts: Option<Span<T>>, multi_class: usize, post_transform: POST_TRANSFORM, } /// Trait /// /// predict - Performs the linear classification. trait LinearClassifierTrait<T> { /// # LinearClassifierTrait::predict /// /// ```rust /// fn predict(classifier: LinearClassifier<T>, X: Tensor<T>) -> Tensor<T>; /// ``` /// /// Linear Classifier. Performs the linear classification. /// /// ## Args /// /// * `self`: LinearClassifier<T> - A LinearClassifier object. /// * `X`: Input 2D tensor. /// /// ## Returns /// /// * Tensor<T> containing the linear classification evaluation of the input X. /// /// ## Type Constraints /// /// `LinearClassifier` and `X` must be fixed points /// /// ## Examples /// /// ```rust /// use orion::numbers::FP16x16; /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor}; /// /// use orion::operators::ml::linear::linear_classifier::{ /// LinearClassifierTrait, POST_TRANSFORM, LinearClassifier /// }; /// /// fn linear_classifier_helper( /// post_transform: POST_TRANSFORM /// ) -> (LinearClassifier<FP16x16>, Tensor<FP16x16>) { /// /// let classlabels: Span<usize> = array![0, 1, 2].span(); /// let classlabels = Option::Some(classlabels); /// /// let classlabels_strings: Option<Span<FP16x16>> = Option::None; /// /// let coefficients: Span<FP16x16> = array![ /// FP16x16 { mag: 38011, sign: true }, /// FP16x16 { mag: 19005, sign: true }, /// FP16x16 { mag: 5898, sign: true }, /// FP16x16 { mag: 38011, sign: false }, /// FP16x16 { mag: 19005, sign: false }, /// FP16x16 { mag: 5898, sign: false }, /// ] /// .span(); /// /// let intercepts: Span<FP16x16> = array![ /// FP16x16 { mag: 176947, sign: false }, /// FP16x16 { mag: 176947, sign: true }, /// FP16x16 { mag: 32768, sign: false }, /// ] /// .span(); /// let intercepts = Option::Some(intercepts); /// /// let multi_class: usize = 0; /// /// let mut classifier: LinearClassifier<FP16x16> = LinearClassifier { /// classlabels, /// coefficients, /// intercepts, /// multi_class, /// post_transform /// }; /// /// let mut X: Tensor<FP16x16> = TensorTrait::new( /// array![3, 2].span(), /// array![ /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 65536, sign: false }, /// FP16x16 { mag: 131072, sign: false }, /// FP16x16 { mag: 196608, sign: false }, /// FP16x16 { mag: 262144, sign: false }, /// FP16x16 { mag: 327680, sign: false }, /// ] /// .span() /// ); /// /// (classifier, X) /// } /// /// fn linear_classifier_multi_softmax() -> (Span<usize>, Tensor<FP16x16>) { /// let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX); /// /// let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); /// /// (labels, scores) /// } /// /// >>> /// ([0, 2, 2], /// [ /// [0.852656, 0.009192, 0.138152], /// [0.318722, 0.05216, 0.629118], /// [0.036323, 0.090237, 0.87344] /// ]) /// ``` fn predict(classifier: LinearClassifier<T>, X: Tensor<T>) -> (Span<usize>, Tensor<T>); } impl LinearClassifierImpl< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T>, +Add<T>, +TensorTrait<usize>, +TensorTrait<T>, +AddEq<T>, +Div<T>, +Mul<T>, +Add<Tensor<T>>, +NNTrait<T> > of LinearClassifierTrait<T> { fn predict(classifier: LinearClassifier<T>, X: Tensor<T>) -> (Span<usize>, Tensor<T>) { let n: usize = classifier.coefficients.len() / *(X.shape).at(1); let mut shape = ArrayTrait::<usize>::new(); shape.append(n); shape.append(*(X.shape).at(1)); let mut coefficients = TensorTrait::new(shape.span(), classifier.coefficients); let coefficients = coefficients.transpose(array![1, 0].span()); let mut scores = X.matmul(@coefficients); match classifier.intercepts { Option::Some(intercepts) => { let mut shape = ArrayTrait::<usize>::new(); shape.append(1); shape.append(intercepts.len()); let intercepts = TensorTrait::new(shape.span(), intercepts); scores = TensorTrait::add(scores, intercepts); }, Option::None => {}, }; let (n_classes, classlabels) = match classifier.classlabels { Option::Some(classlabels) => { (classlabels.len(), classlabels) }, Option::None => { (0, ArrayTrait::<usize>::new().span()) }, }; if *coefficients.shape.at(1) == 1 && n_classes == 2 { let mut new_scores = array![]; loop { match scores.data.pop_front() { Option::Some(item) => { new_scores.append(NumberTrait::neg(*item)); new_scores.append(*item); }, Option::None => { break; }, } }; scores = TensorTrait::new(array![*scores.shape.at(0), 2].span(), new_scores.span()); } // Post Transform scores = match classifier.post_transform { POST_TRANSFORM::NONE => { scores }, POST_TRANSFORM::SOFTMAX => { NNTrait::softmax(@scores, Option::Some(1)) }, POST_TRANSFORM::LOGISTIC => { NNTrait::sigmoid(@scores) }, POST_TRANSFORM::SOFTMAXZERO => { NNTrait::softmax_zero(@scores, 1) }, POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; // Labels let mut labels_list = array![]; if *scores.shape.at(1) > 1 { let mut labels = scores.argmax(1, Option::None, Option::None); loop { match labels.data.pop_front() { Option::Some(i) => { labels_list.append(*classlabels[(*i).try_into().unwrap()]); }, Option::None => { break; } }; }; } else { let mut i = 0; match classifier.post_transform { POST_TRANSFORM::NONE => { while i != scores .data .len() { if *scores.data.at(i) >= NumberTrait::zero() { labels_list.append(*classlabels[0]); } else { labels_list.append(0); } i += 1; }; }, POST_TRANSFORM::SOFTMAX => { while i != scores .data .len() { if *scores.data.at(i) >= NumberTrait::half() { labels_list.append(*classlabels[0]); } else { labels_list.append(0); } i += 1; }; }, POST_TRANSFORM::LOGISTIC => { while i != scores .data .len() { if *scores.data.at(i) >= NumberTrait::half() { labels_list.append(*classlabels[0]); } else { labels_list.append(0); } i += 1; }; }, POST_TRANSFORM::SOFTMAXZERO => { while i != scores .data .len() { if *scores.data.at(i) >= NumberTrait::half() { labels_list.append(*classlabels[0]); } else { labels_list.append(0); } i += 1; }; }, POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; } (labels_list.span(), scores) } } fn max(a: usize, b: usize) -> usize { if a > b { a } else { b } }
https://github.com/gizatechxyz/orion
src/operators/ml/linear/linear_regressor.cairo
use core::array::ArrayTrait; use core::clone::Clone; use core::traits::Into; use core::array::SpanTrait; use core::dict::Felt252DictTrait; use core::dict::Felt252DictEntryTrait; use orion::numbers::FP16x16; use orion::operators::tensor::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; use core::debug::PrintTrait; use orion::operators::nn::{NNTrait, FP16x16NN}; use orion::operators::ml::POST_TRANSFORM; #[derive(Destruct)] struct LinearRegressor<T> { coefficients: Span<T>, intercepts: Option<Span<T>>, target: usize, post_transform: POST_TRANSFORM, } /// Trait /// /// predict - Performs the generalized linear regression evaluation. trait LinearRegressorTrait<T> { /// # LinearRegressorTrait::predict /// /// ```rust /// fn predict(regressor: LinearRegressor<T>, X: Tensor<T>) -> Tensor<T>; /// ``` /// /// Linear Regressor. Performs the generalized linear regression evaluation. /// /// ## Args /// /// * `regressor`: LinearRegressor<T> - A LinearRegressor object. /// * `X`: Input 2D tensor. /// /// ## Returns /// /// * Tensor<T> containing the generalized linear regression evaluation of the input X. /// /// ## Type Constraints /// /// `LinearRegressor` and `X` must be fixed points /// /// ## Examples /// /// ```rust /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor, FP16x16TensorAdd}; /// use orion::operators::ml::linear::linear_regressor::{ /// LinearRegressorTrait, POST_TRANSFORM, LinearRegressor /// }; /// use orion::numbers::{FP16x16, FixedTrait}; /// use orion::operators::nn::{NNTrait, FP16x16NN}; /// /// fn example_linear_regressor() -> Tensor<FP16x16> { /// /// let mut X: Tensor<FP16x16> = TensorTrait::new( /// array![3, 2].span(), /// array![ /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 65536, sign: false }, /// FP16x16 { mag: 131072, sign: false }, /// FP16x16 { mag: 196608, sign: false }, /// FP16x16 { mag: 262144, sign: false }, /// FP16x16 { mag: 327680, sign: false }, /// ] /// .span() /// ); /// /// let coefficients: Span<FP16x16> = array![ /// FP16x16 { mag: 19661, sign: false }, /// FP16x16 { mag: 50463, sign: true }, /// /// ] /// .span(); /// /// let intercepts: Span<FP16x16> = array![ /// FP16x16 { mag: 32768, sign: false }, /// /// ] /// .span(); /// let intercepts = Option::Some(intercepts); /// /// let target : usize = 1; /// let post_transform = POST_TRANSFORM::NONE; /// /// let mut regressor: LinearRegressor<FP16x16> = LinearRegressor { /// coefficients, /// intercepts, /// target, /// post_transform /// }; /// /// let scores = LinearRegressorTrait::predict(regressor, X); /// /// scores /// } /// /// >>> /// [[-0.27], [-1.21], [-2.15]] /// /// /// /// fn example_linear_regressor_2() -> Tensor<FP16x16> { /// /// let mut X: Tensor<FP16x16> = TensorTrait::new( /// array![3, 2].span(), /// array![ /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 65536, sign: false }, /// FP16x16 { mag: 131072, sign: false }, /// FP16x16 { mag: 196608, sign: false }, /// FP16x16 { mag: 262144, sign: false }, /// FP16x16 { mag: 327680, sign: false }, /// ] /// .span() /// ); /// /// let coefficients: Span<FP16x16> = array![ /// FP16x16 { mag: 19661, sign: false }, /// FP16x16 { mag: 50463, sign: true }, /// FP16x16 { mag: 19661, sign: false }, /// FP16x16 { mag: 50463, sign: true }, /// /// ] /// .span(); /// /// let intercepts: Span<FP16x16> = array![ /// FP16x16 { mag: 32768, sign: false }, /// FP16x16 { mag: 45875, sign: false }, /// /// ] /// .span(); /// let intercepts = Option::Some(intercepts); /// /// let target = 2; /// let post_transform = POST_TRANSFORM::NONE; /// /// let mut regressor: LinearRegressor<FP16x16> = LinearRegressor { /// coefficients, /// intercepts, /// target, /// post_transform /// }; /// /// let scores = LinearRegressorTrait::predict(regressor, X); /// /// scores /// } /// /// >>> /// [[-0.27, -0.07], [-1.21, -1.01], [-2.15, -1.95]] /// ``` /// /// fn predict(regressor: LinearRegressor<T>, X: Tensor<T>) -> Tensor<T>; } impl LinearRegressorImpl< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T>, +Add<T>, +TensorTrait<usize>, +TensorTrait<T>, +PrintTrait<T>, +AddEq<T>, +Div<T>, +Mul<T>, +Add<Tensor<T>>, +NNTrait<T>, > of LinearRegressorTrait<T> { fn predict(regressor: LinearRegressor<T>, X: Tensor<T>) -> Tensor<T> { let n: usize = regressor.coefficients.len() / regressor.target; let mut shape = ArrayTrait::<usize>::new(); shape.append(regressor.target); shape.append(n); let mut coefficients = TensorTrait::new(shape.span(), regressor.coefficients); let coefficients = coefficients.transpose(array![1, 0].span()); let mut score = X.matmul(@coefficients); match regressor.intercepts { Option::Some(intercepts) => { let mut shape: Array<usize> = array![]; shape.append(1); shape.append(intercepts.len()); let intercepts = TensorTrait::new(shape.span(), intercepts); score = TensorTrait::add(score, intercepts); }, Option::None => {}, }; // Post Transform let score = match regressor.post_transform { POST_TRANSFORM::NONE => score, // No action required POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@score, Option::Some(1)), POST_TRANSFORM::LOGISTIC => NNTrait::sigmoid(@score), POST_TRANSFORM::SOFTMAXZERO => NNTrait::softmax_zero(@score, 1), POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; score } }
https://github.com/gizatechxyz/orion
src/operators/ml/normalizer.cairo
mod normalizer;
https://github.com/gizatechxyz/orion
src/operators/ml/normalizer/normalizer.cairo
use core::array::ArrayTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::{TensorTrait, Tensor}; #[derive(Copy, Drop)] enum NORM { MAX, L1, L2, } /// predict - Returns the normalization of the input, each row of the input is normalized independently. trait NormalizerTrait<T> { /// # Normalizer::predict /// /// ```rust /// fn predict(X: Tensor<T>, norm: NORM) -> Tensor<T>; /// ``` /// /// Returns the normalized input. /// Tree different types of normalization can be performed and are defined as follow : /// MAX: $Y = \frac{X}{max(X)}$ /// L1: $Y = \frac{X}{sum(X)}$ /// L2: $Y = \frac{X}\sqrt{{sum(X²)}}$ /// For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row of the batch is normalized independently. /// /// ## Args /// /// * `X`(`@Tensor<T>`) - Input 2D tensor. /// * `norm`(`NORM`) - NORM::MAX, NORM::L1 or NORM::L2 /// /// /// ## Returns /// /// * Tensor<T> - output tensor /// /// ## Examples /// /// ```rust /// use orion::numbers::FP16x16; /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorPartialEq}; /// /// use orion::operators::ml::normalizer::normalizer::{ /// NormalizerTrait, NORM /// }; /// /// /// /// fn normalizer_max() -> Tensor<FP16x16> { /// let mut shape = ArrayTrait::<usize>::new(); /// shape.append(3); /// shape.append(3); /// /// let mut data = ArrayTrait::new(); /// data.append(FP16x16 { mag: 65536, sign: true }); /// data.append(FP16x16 { mag: 52428, sign: true }); /// data.append(FP16x16 { mag: 39321, sign: true }); /// data.append(FP16x16 { mag: 26214, sign: true }); /// data.append(FP16x16 { mag: 13107, sign: true }); /// data.append(FP16x16 { mag: 0, sign: false }); /// data.append(FP16x16 { mag: 13107, sign: false }); /// data.append(FP16x16 { mag: 26214, sign: false }); /// data.append(FP16x16 { mag: 39321, sign: false }); /// /// let X = TensorTrait::new(shape.span(), data.span()); /// /// return NormalizerTrait::predict(X, NORM::MAX); /// } /// >>> [[-1. -0.8 -0.6 ] /// [-1. -0.5 0. ] /// [ 0.3333333 0.6666666 1. ]] /// /// ``` /// /// fn predict(X: Tensor<T>, norm: NORM) -> Tensor<T>; } impl NormalizerImpl< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +TensorTrait<T>, +AddEq<T>, +Div<Tensor<T>>, +Mul<T> > of NormalizerTrait<T> { fn predict(X: Tensor<T>, norm: NORM) -> Tensor<T> { assert(X.shape.len() == 2, 'input should be 2D: NxC'); let normalized_tensor = match norm { NORM::MAX => { norm_max(X) }, NORM::L1 => { norm_l1(X) }, NORM::L2 => { norm_l2(X) }, }; return normalized_tensor; } } fn norm_max< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +PartialOrd<T>, +Div<Tensor<T>>, >( X: Tensor<T> ) -> Tensor<T> { let div_data = reduce_max_2D_axis_1(X.abs()); let div = TensorTrait::new( array![*X.shape.at(0), (div_data.len() / *X.shape.at(0))].span(), div_data ); let epsillon = TensorTrait::new(array![1, 1].span(), array![NumberTrait::from_felt(1)].span()); let safe_div = TensorTrait::max(tensors: array![div, epsillon].span()); return X / safe_div; } fn norm_l1< T, MAG, +Drop<T>, +Copy<T>, +AddEq<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +PartialOrd<T>, +Div<Tensor<T>>, >( X: Tensor<T> ) -> Tensor<T> { let div_data = reduce_sum_2D_axis_1(X.abs()); let div = TensorTrait::new( array![*X.shape.at(0), (div_data.len() / *X.shape.at(0))].span(), div_data ); let epsillon = TensorTrait::new(array![1, 1].span(), array![NumberTrait::from_felt(1)].span()); let safe_div = TensorTrait::max(tensors: array![div, epsillon].span()); return X / safe_div; } fn norm_l2< T, MAG, +Drop<T>, +Copy<T>, +AddEq<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +PartialOrd<T>, +Div<Tensor<T>>, +Mul<T> >( X: Tensor<T> ) -> Tensor<T> { let div_data = reduce_sum_2D_axis_1(square(X)); let div = TensorTrait::new( array![*X.shape.at(0), (div_data.len() / *X.shape.at(0))].span(), div_data ); let epsillon = TensorTrait::new(array![1, 1].span(), array![NumberTrait::from_felt(1)].span()); let safe_div = TensorTrait::max(tensors: array![div.sqrt(), epsillon].span()); return X / safe_div; } fn reduce_max_2D_axis_1< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +PartialOrd<T>, >( X: Tensor<T> ) -> Span<T> { let mut new_data = ArrayTrait::new(); let N = *X.shape.at(0); let C = *X.shape.at(1); let mut i = 0; while i != N { let max = max(SpanTrait::slice(X.data, i * C, C)); new_data.append(max); i += 1; }; return new_data.span(); } fn max<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +PartialOrd<T>,>( mut a: Span<T> ) -> T { assert(a.len() > 0, 'span cannot be empty'); let mut max = *a.at(0); loop { match a.pop_front() { Option::Some(v) => { if *v > max { max = *v; }; }, Option::None => { break max; } }; } } fn sum<T, MAG, +Drop<T>, +Copy<T>, +AddEq<T>, +NumberTrait<T, MAG>,>(mut a: Span<T>) -> T { assert(a.len() > 0, 'span cannot be empty'); let mut sum = NumberTrait::zero(); loop { match a.pop_front() { Option::Some(v) => { sum += *v; }, Option::None => { break sum; } }; } } fn square< T, MAG, +Drop<T>, +Copy<T>, +AddEq<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +PartialOrd<T>, +Mul<T> >( mut a: Tensor<T> ) -> Tensor<T> { let mut arr = ArrayTrait::new(); loop { match a.data.pop_front() { Option::Some(v) => { arr.append(*v * *v); }, Option::None => { break TensorTrait::new(a.shape, arr.span()); } }; } } fn reduce_sum_2D_axis_1< T, MAG, +Drop<T>, +Copy<T>, +AddEq<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, >( X: Tensor<T> ) -> Span<T> { let mut new_data = ArrayTrait::new(); let N = *X.shape.at(0); let C = *X.shape.at(1); let mut i = 0; while i != N { let sum = sum(SpanTrait::slice(X.data, i * C, C)); new_data.append(sum); i += 1; }; return new_data.span(); }
https://github.com/gizatechxyz/orion
src/operators/ml/svm.cairo
mod core; mod svm_regressor; mod svm_classifier;
https://github.com/gizatechxyz/orion
src/operators/ml/svm/core.cairo
use orion::numbers::NumberTrait; use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; use orion::utils::get_row; #[derive(Copy, Drop)] enum KERNEL_TYPE { LINEAR, POLY, RBF, SIGMOID, } fn kernel_dot< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>, +Neg<T>, +Sub<T>, >( kernel_params: Span<T>, pA: Span<T>, pB: Span<T>, kernel: KERNEL_TYPE ) -> T { let s = match kernel { KERNEL_TYPE::LINEAR => sv_dot(pA, pB), KERNEL_TYPE::POLY => { let mut s = sv_dot(pA, pB); s = s * *kernel_params.at(0) + *kernel_params.at(1); s.pow(*kernel_params.at(2)) }, KERNEL_TYPE::RBF => { let mut s = squared_diff(pA, pB); NumberTrait::exp(-*kernel_params.at(0) * s) }, KERNEL_TYPE::SIGMOID => { let mut s = sv_dot(pA, pB); s = s * *kernel_params.at(0) + *kernel_params.at(1); NumberTrait::tanh(s) }, }; s } fn sv_dot< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>, >( pA: Span<T>, pB: Span<T> ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); while i != pA.len() { sum = sum + *pA.at(i) * *pB.at(i); i += 1; }; sum } fn squared_diff< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>, +Sub<T>, >( pA: Span<T>, pB: Span<T> ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); while i != pA.len() { sum = sum + (*pA.at(i) - *pB.at(i)).pow(NumberTrait::one() + NumberTrait::one()); i += 1; }; sum }
https://github.com/gizatechxyz/orion
src/operators/ml/svm/svm_classifier.cairo
use core::array::ArrayTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; use orion::numbers::{FP64x64, FP64x64Impl}; use orion::operators::tensor::implementations::tensor_fp64x64::{FP64x64Tensor}; use orion::operators::nn::{NNTrait, FP16x16NN, FP64x64NN}; use orion::utils::get_row; use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; use orion::operators::ml::POST_TRANSFORM; #[derive(Copy, Drop, Destruct)] struct SVMClassifier<T> { classlabels: Span<usize>, coefficients: Span<T>, kernel_params: Span<T>, kernel_type: KERNEL_TYPE, post_transform: POST_TRANSFORM, prob_a: Span<T>, prob_b: Span<T>, rho: Span<T>, support_vectors: Span<T>, vectors_per_class: Option<Span<usize>>, } #[derive(Copy, Drop)] enum MODE { SVM_LINEAR, SVM_SVC, } /// /// predict - Returns the top class for each of N inputs. trait SVMClassifierTrait<T> { /// # SVMClassifierTrait::predict /// /// ```rust /// fn predict(ref self: SVMClassifier<T>, X: Tensor<T>) -> (Span<usize>, Tensor<T>); /// ``` /// /// Support Vector Machine classification. /// /// ## Args /// /// * `self`: SVMClassifier<T> - A SVMClassifier object. /// * `X`: Input 2D tensor. /// /// ## Returns /// /// * N Top class for each point /// * The class score Matrix for each class, for each point. If prob_a and prob_b are provided they are probabilities for each class, otherwise they are raw scores. /// /// ## Type Constraints /// /// `SVMClassifier` and `X` must be fixed points /// /// ## Examples /// /// ```rust /// fn example_svm_classifier_noprob_linear_sv_none() -> (Span<usize>, Tensor<FP16x16>) { /// let coefficients: Span<FP16x16> = array![ /// FP16x16 { mag: 50226, sign: false }, /// FP16x16 { mag: 5711, sign: false }, /// FP16x16 { mag: 7236, sign: false }, /// FP16x16 { mag: 63175, sign: true } /// ] /// .span(); /// let kernel_params: Span<FP16x16> = array![ /// FP16x16 { mag: 8025, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 196608, sign: false } /// ] /// .span(); /// let kernel_type = KERNEL_TYPE::LINEAR; /// let prob_a: Span<FP16x16> = array![].span(); /// let prob_b: Span<FP16x16> = array![].span(); /// let rho: Span<FP16x16> = array![FP16x16 { mag: 146479, sign: false }].span(); /// /// let support_vectors: Span<FP16x16> = array![ /// FP16x16 { mag: 314572, sign: false }, /// FP16x16 { mag: 222822, sign: false }, /// FP16x16 { mag: 124518, sign: false }, /// FP16x16 { mag: 327680, sign: false }, /// FP16x16 { mag: 196608, sign: false }, /// FP16x16 { mag: 104857, sign: false }, /// FP16x16 { mag: 294912, sign: false }, /// FP16x16 { mag: 150732, sign: false }, /// FP16x16 { mag: 85196, sign: false }, /// FP16x16 { mag: 334233, sign: false }, /// FP16x16 { mag: 163840, sign: false }, /// FP16x16 { mag: 196608, sign: false } /// ] /// .span(); /// let classlabels: Span<usize> = array![0, 1].span(); /// /// let vectors_per_class = Option::Some(array![3, 1].span()); /// /// let post_transform = POST_TRANSFORM::NONE; /// /// let mut classifier: SVMClassifier<FP16x16> = SVMClassifier { /// classlabels, /// coefficients, /// kernel_params, /// kernel_type, /// post_transform, /// prob_a, /// prob_b, /// rho, /// support_vectors, /// vectors_per_class, /// }; /// /// let mut X: Tensor<FP16x16> = TensorTrait::new( /// array![3, 3].span(), /// array![ /// FP16x16 { mag: 65536, sign: true }, /// FP16x16 { mag: 52428, sign: true }, /// FP16x16 { mag: 39321, sign: true }, /// FP16x16 { mag: 26214, sign: true }, /// FP16x16 { mag: 13107, sign: true }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 13107, sign: false }, /// FP16x16 { mag: 26214, sign: false }, /// FP16x16 { mag: 39321, sign: false }, /// ] /// .span() /// ); /// /// return SVMClassifierTrait::predict(ref classifier, X); /// /// } /// // >>> ([0, 0, 0], /// // [[-2.662655, 2.662655], /// // [-2.21481, 2.21481], /// // [-1.766964, 1.766964]]) /// /// /// fn example_svm_classifier_binary_softmax_fp64x64() -> (Span<usize>, Tensor<FP64x64>) { /// let coefficients: Span<FP64x64> = array![ /// FP64x64 { mag: 18446744073709551616, sign: false }, /// FP64x64 { mag: 18446744073709551616, sign: false }, /// FP64x64 { mag: 18446744073709551616, sign: false }, /// FP64x64 { mag: 18446744073709551616, sign: false }, /// FP64x64 { mag: 18446744073709551616, sign: true }, /// FP64x64 { mag: 18446744073709551616, sign: true }, /// FP64x64 { mag: 18446744073709551616, sign: true }, /// FP64x64 { mag: 18446744073709551616, sign: true } /// ] /// .span(); /// let kernel_params: Span<FP64x64> = array![ /// FP64x64 { mag: 7054933896252620800, sign: false }, /// FP64x64 { mag: 0, sign: false }, /// FP64x64 { mag: 55340232221128654848, sign: false } /// ] /// .span(); /// let kernel_type = KERNEL_TYPE::RBF; /// let prob_a: Span<FP64x64> = array![FP64x64 { mag: 94799998099962986496, sign: true }].span(); /// let prob_b: Span<FP64x64> = array![FP64x64 { mag: 1180576833385529344, sign: false }].span(); /// let rho: Span<FP64x64> = array![FP64x64 { mag: 3082192501545631744, sign: false }].span(); /// /// let support_vectors: Span<FP64x64> = array![ /// FP64x64 { mag: 3528081300248330240, sign: false }, /// FP64x64 { mag: 19594207602596118528, sign: true }, /// FP64x64 { mag: 9235613999318433792, sign: false }, /// FP64x64 { mag: 10869715877100519424, sign: true }, /// FP64x64 { mag: 5897111318564962304, sign: true }, /// FP64x64 { mag: 1816720038917308416, sign: false }, /// FP64x64 { mag: 4564890528671334400, sign: false }, /// FP64x64 { mag: 21278987070814027776, sign: true }, /// FP64x64 { mag: 7581529597213147136, sign: false }, /// FP64x64 { mag: 10953113834067329024, sign: true }, /// FP64x64 { mag: 24318984989010034688, sign: true }, /// FP64x64 { mag: 30296187483321270272, sign: true }, /// FP64x64 { mag: 10305112258191032320, sign: false }, /// FP64x64 { mag: 17005441559857987584, sign: true }, /// FP64x64 { mag: 11555205301925838848, sign: false }, /// FP64x64 { mag: 2962701975885447168, sign: true }, /// FP64x64 { mag: 11741665981322231808, sign: true }, /// FP64x64 { mag: 15376232508819505152, sign: false }, /// FP64x64 { mag: 13908474645692022784, sign: false }, /// FP64x64 { mag: 7323415394302033920, sign: true }, /// FP64x64 { mag: 3284258824352956416, sign: true }, /// FP64x64 { mag: 11374683084831064064, sign: true }, /// FP64x64 { mag: 9087138148126818304, sign: false }, /// FP64x64 { mag: 8247488946750095360, sign: false } /// ] /// .span(); /// let classlabels: Span<usize> = array![0, 1].span(); /// /// let vectors_per_class = Option::Some(array![4, 4].span()); /// let post_transform = POST_TRANSFORM::SOFTMAX; /// /// let mut classifier: SVMClassifier<FP64x64> = SVMClassifier { /// classlabels, /// coefficients, /// kernel_params, /// kernel_type, /// post_transform, /// prob_a, /// prob_b, /// rho, /// support_vectors, /// vectors_per_class, /// }; /// /// let mut X: Tensor<FP64x64> = TensorTrait::new( /// array![3, 3].span(), /// array![ /// FP64x64 { mag: 18446744073709551616, sign: true }, /// FP64x64 { mag: 14757395258967642112, sign: true }, /// FP64x64 { mag: 11068046444225730560, sign: true }, /// FP64x64 { mag: 7378697629483821056, sign: true }, /// FP64x64 { mag: 3689348814741910528, sign: true }, /// FP64x64 { mag: 0, sign: false }, /// FP64x64 { mag: 3689348814741910528, sign: false }, /// FP64x64 { mag: 7378697629483821056, sign: false }, /// FP64x64 { mag: 11068046444225730560, sign: false } /// ] /// .span() /// ); /// /// /// return SVMClassifierTrait::predict(ref classifier, X); /// /// } /// >>> ([0, 1, 1], /// [[0.728411, 0.271589], /// [0.484705, 0.515295], /// [0.274879, 0.725121]]) /// ``` fn predict(ref self: SVMClassifier<T>, X: Tensor<T>) -> (Span<usize>, Tensor<T>); } impl SVMClassifierImpl< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Into<usize, MAG>, +PartialOrd<T>, +PartialEq<T>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Div<T>, +Mul<T>, +Neg<T>, +Sub<T>, +NNTrait<T>, > of SVMClassifierTrait<T> { fn predict(ref self: SVMClassifier<T>, X: Tensor<T>) -> (Span<usize>, Tensor<T>) { let mut vector_count_ = 0; let class_count_ = max(self.classlabels.len(), 1); let mut starting_vector_: Array<usize> = array![]; let (vectors_per_class_, starting_vector_) = match self.vectors_per_class { Option::Some(vectors_per_class) => { let mut i = 0; while i != vectors_per_class.len() { starting_vector_.append(vector_count_); vector_count_ += *vectors_per_class.at(i); i += 1; }; (vectors_per_class, starting_vector_.span()) }, Option::None => { (array![].span(), array![].span()) }, }; let (mode, kernel_type_, sv, coefs) = if vector_count_ > 0 { let mode = MODE::SVM_SVC; let kernel_type_ = self.kernel_type; let sv = TensorTrait::new( array![vector_count_, self.support_vectors.len() / vector_count_].span(), self.support_vectors ); let coefs = TensorTrait::new( array![self.coefficients.len() / vector_count_, vector_count_].span(), self.coefficients ); (mode, kernel_type_, sv, coefs) } else { let mode = MODE::SVM_LINEAR; let kernel_type_ = KERNEL_TYPE::LINEAR; let sv = TensorTrait::new( array![self.support_vectors.len()].span(), self.support_vectors ); let coefs = TensorTrait::new( array![class_count_, self.coefficients.len() / class_count_].span(), self.coefficients ); (mode, kernel_type_, sv, coefs) }; let weights_are_all_positive_ = (min(self.coefficients) >= NumberTrait::zero()); // SVM let (res, votes) = match mode { MODE::SVM_LINEAR => { let mut res: Array<T> = array![]; let mut n = 0; while n != *X.shape.at(0) { let mut x_n = get_row(@X, n); let scores = run_linear(ref self, x_n, coefs, class_count_, kernel_type_); let mut i = 0; while i != scores.len() { res.append(*scores.at(i)); i += 1; }; n += 1; }; ( TensorTrait::new(array![*X.shape.at(0), class_count_].span(), res.span()), Option::None ) }, MODE::SVM_SVC => { let mut res: Array<T> = array![]; let mut votes: Array<T> = array![]; let mut n = 0; while n != *X.shape.at(0) { let mut x_n = get_row(@X, n); let (scores, mut vote) = run_svm( ref self, x_n, sv, vector_count_, kernel_type_, class_count_, starting_vector_, coefs, vectors_per_class_ ); let mut i = 0; while i != scores.len() { res.append(*scores.at(i)); i += 1; }; let mut i = 0; while i != vote.len() { votes.append(vote.at(i)); i += 1; }; n += 1; }; ( TensorTrait::new( array![*X.shape.at(0), class_count_ * (class_count_ - 1) / 2].span(), res.span() ), Option::Some( TensorTrait::new(array![*X.shape.at(0), class_count_].span(), votes.span()) ) ) }, }; // Proba let (scores, has_proba) = match mode { MODE::SVM_LINEAR => { (res, false) }, MODE::SVM_SVC => { let (scores, has_proba) = if self.prob_a.len() > 0 { let mut scores: Array<T> = array![]; let mut n = 0; while n != *res.shape.at(0) { let res_n = get_row(@res, n); let mut s = probablities(ref self, res_n, class_count_); let mut i = 0; while i != s.len() { scores.append(s.at(i)); i += 1; }; n += 1; }; ( TensorTrait::new( array![*res.shape.at(0), scores.len() / *res.shape.at(0)].span(), scores.span() ), true ) } else { (res, false) }; (scores, has_proba) }, }; // Finalization let mut labels: Array<usize> = array![]; let mut final_scores: Array<T> = array![]; let mut n = 0; while n != *scores.shape.at(0) { let mut scores_n = get_row(@scores, n); match votes { Option::Some(votes) => { let mut votes_n = get_row(@votes, n); let (label, new_scores) = compute_final_scores( ref self, votes_n, scores_n, weights_are_all_positive_, has_proba, self.classlabels ); let mut i = 0; while i != new_scores.data.len() { final_scores.append(*new_scores.data.at(i)); i += 1; }; labels.append(label); }, Option::None => { let (label, new_scores) = compute_final_scores( ref self, array![].span(), scores_n, weights_are_all_positive_, has_proba, self.classlabels ); let mut i = 0; while i != new_scores.data.len() { final_scores.append(*new_scores.data.at(i)); i += 1; }; labels.append(label); }, } n += 1; }; let labels = labels.span(); // Labels if self.classlabels.len() > 0 { let mut class_labels: Array<usize> = array![]; let mut i = 0; while i != labels.len() { class_labels.append(*self.classlabels.at(*labels.at(i))); i += 1; }; return ( class_labels.span(), TensorTrait::new( array![*X.shape.at(0), final_scores.len() / *X.shape.at(0)].span(), final_scores.span() ) ); } ( labels, TensorTrait::new( array![*X.shape.at(0), final_scores.len() / *X.shape.at(0)].span(), final_scores.span() ) ) } } fn run_svm< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>, +Neg<T>, +Sub<T>, +PartialOrd<T>, >( ref self: SVMClassifier<T>, X: Span<T>, sv: Tensor<T>, vector_count_: usize, kernel: KERNEL_TYPE, class_count_: usize, starting_vector_: Span<usize>, coefs: Tensor<T>, vectors_per_class_: Span<usize> ) -> (Array<T>, NullableVec<T>) { let mut evals = 0; let mut kernels: Array<T> = array![]; let mut j = 0; while j != vector_count_ { let sv_j = get_row(@sv, j); kernels.append(kernel_dot(self.kernel_params, X, sv_j, kernel)); j += 1; }; let kernels = kernels.span(); let mut scores: Array<T> = array![]; let mut votes = VecTrait::new(); VecTrait::set(ref votes, class_count_ - 1, NumberTrait::zero()); let mut i = 0; while i != class_count_ { let si_i = *starting_vector_.at(i); let class_i_sc = *vectors_per_class_.at(i); let mut j = i + 1; while j != class_count_ { let si_j = *starting_vector_.at(j); let class_j_sc = *vectors_per_class_.at(j); let s1 = dot_start_end( coefs.data, kernels, (j - 1) * *coefs.shape.at(0) + si_i, (j - 1) * *coefs.shape.at(0) + si_i + class_i_sc, si_i, si_i + class_i_sc ); let s2 = dot_start_end( coefs.data, kernels, i * *coefs.shape.at(0) + si_j, i * *coefs.shape.at(0) + si_j + class_j_sc, si_j, si_j + class_j_sc ); let s = *self.rho.at(evals) + s1 + s2; scores.append(s); if s > NumberTrait::zero() { VecTrait::set(ref votes, i, VecTrait::at(ref votes, i) + NumberTrait::one()); } else { VecTrait::set(ref votes, j, VecTrait::at(ref votes, j) + NumberTrait::one()); } evals += 1; j += 1; }; i += 1; }; (scores, votes) } fn run_linear< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>, +Neg<T>, +Sub<T>, >( ref self: SVMClassifier<T>, X: Span<T>, coefs: Tensor<T>, class_count_: usize, kernel: KERNEL_TYPE ) -> Array<T> { let mut scores: Array<T> = array![]; let mut j = 0; while j != class_count_ { let coefs_j = get_row(@coefs, j); let d = kernel_dot(self.kernel_params, X, coefs_j, kernel); let score = *self.rho.at(0) + d; scores.append(score); j += 1; }; scores } fn compute_final_scores< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +NNTrait<T>, +Into<usize, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>, +Neg<T>, +Sub<T>, +Div<T>, +PartialOrd<T>, >( ref self: SVMClassifier<T>, votes: Span<T>, scores: Span<T>, weights_are_all_positive_: bool, has_proba: bool, classlabels: Span<usize> ) -> (usize, Tensor<T>) { let (max_class, max_weight) = if votes.len() > 0 { let max_class = argmax_span(votes); let max_weight = *votes.at(max_class); (max_class, max_weight) } else { let max_class = argmax_span(scores); let max_weight = *scores.at(max_class); (max_class, max_weight) }; let (label, write_additional_scores) = if self.rho.len() == 1 { let (label, write_additional_scores) = set_score_svm( max_weight, max_class, weights_are_all_positive_, has_proba, classlabels, 1, 0 ); (label, write_additional_scores) } else if classlabels.len() > 0 { let label = *classlabels.at(max_class); (label, 4) } else { (max_class, 4) }; let new_scores = write_scores( scores.len(), TensorTrait::new(array![scores.len()].span(), scores), self.post_transform, write_additional_scores ); (label, new_scores) } fn write_scores< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +PartialOrd<T>, +NNTrait<T>, +Neg<T>, +Sub<T>, >( n_classes: usize, scores: Tensor<T>, post_transform: POST_TRANSFORM, add_second_class: usize ) -> Tensor<T> { let new_scores = if n_classes >= 2 { let new_scores = match post_transform { POST_TRANSFORM::NONE => scores, POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@scores, Option::Some(0)), POST_TRANSFORM::LOGISTIC => NNTrait::sigmoid(@scores), POST_TRANSFORM::SOFTMAXZERO => NNTrait::softmax_zero(@scores, 0), POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; new_scores } else { //if n_classes == 1 let new_scores = match post_transform { POST_TRANSFORM::NONE => { let scores = if add_second_class == 0 || add_second_class == 1 { TensorTrait::new( array![2].span(), array![NumberTrait::one() - *scores.data.at(0), *scores.data.at(0)].span() ) } else if add_second_class == 2 || add_second_class == 3 { TensorTrait::new( array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span() ) } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) }; scores }, POST_TRANSFORM::SOFTMAX => { let scores = if add_second_class == 0 || add_second_class == 1 { TensorTrait::new( array![2].span(), array![NumberTrait::one() - *scores.data.at(0), *scores.data.at(0)].span() ) } else if add_second_class == 2 || add_second_class == 3 { // NNTrait::softmax( @TensorTrait::new( array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span() ), Option::Some(0) ) } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) }; scores }, POST_TRANSFORM::LOGISTIC => { let scores = if add_second_class == 0 || add_second_class == 1 { TensorTrait::new( array![2].span(), array![NumberTrait::one() - *scores.data.at(0), *scores.data.at(0)].span() ) } else if add_second_class == 2 || add_second_class == 3 { // NNTrait::sigmoid( @TensorTrait::new( array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span() ) ) } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) }; scores }, POST_TRANSFORM::SOFTMAXZERO => { let scores = if add_second_class == 0 || add_second_class == 1 { TensorTrait::new( array![2].span(), array![NumberTrait::one() - *scores.data.at(0), *scores.data.at(0)].span() ) } else if add_second_class == 2 || add_second_class == 3 { // NNTrait::softmax_zero( @TensorTrait::new( array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span() ), 0 ) } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) }; scores }, POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not applicable here.'), }; new_scores }; new_scores } fn set_score_svm< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +PartialOrd<T>, >( max_weight: T, maxclass: usize, weights_are_all_positive_: bool, has_proba: bool, classlabels: Span<usize>, posclass: usize, negclass: usize ) -> (usize, usize) { let mut write_additional_scores = 0; if classlabels.len() == 2 { write_additional_scores = 2; if !has_proba { if weights_are_all_positive_ && max_weight >= NumberTrait::half() { return (*classlabels.at(1), write_additional_scores); }; }; return (*classlabels.at(maxclass), write_additional_scores); } if max_weight >= NumberTrait::zero() { return (posclass, write_additional_scores); }; (negclass, write_additional_scores) } fn argmax_span<T, +Drop<T>, +Copy<T>, +PartialOrd<T>,>(span: Span<T>) -> usize { let mut max = 0; let mut i = 0; while i != span.len() { if *span.at(i) > *span.at(max) { max = i; } i += 1; }; max } fn probablities< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Into<usize, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>, +Neg<T>, +Sub<T>, +Div<T>, +PartialOrd<T>, >( ref self: SVMClassifier<T>, scores: Span<T>, class_count_: usize ) -> NullableVec<T> { let mut probsp2: MutMatrix<T> = MutMatrixImpl::new(class_count_, class_count_); let mut index = 0; let mut i = 0; while i != class_count_ { let mut j = i + 1; while j != class_count_ { let val1 = sigmoid_probability( *scores.at(index), *self.prob_a.at(index), *self.prob_b.at(index) ); let mut val2 = NumberTrait::min( val1, NumberTrait::one() ); // ONNX : min(val2, (1 - 1.0e-7)) probsp2.set(i, j, val2); probsp2.set(j, i, NumberTrait::one() - val2); j += 1; index += 1; }; i += 1; }; multiclass_probability(class_count_, ref probsp2) } fn multiclass_probability< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +Add<T>, +Mul<T>, +Div<T>, +Sub<T>, +Neg<T>, +AddEq<T>, +Into<usize, MAG>, >( k: usize, ref R: MutMatrix<T> ) -> NullableVec<T> { let max_iter = max(100, k); let k_fp = NumberTrait::<T>::new_unscaled(k.into(), false); let mut Q: MutMatrix<T> = MutMatrixImpl::new(k, k); MutMatrixImpl::set(ref Q, k - 1, k - 1, NumberTrait::zero()); let mut P = VecTrait::new(); VecTrait::set(ref P, k - 1, NumberTrait::zero()); let a: usize = 100; let eps = (NumberTrait::half() / NumberTrait::new_unscaled(a.into(), false)) / k_fp; let mut t = 0; while t != k { VecTrait::set(ref P, t, NumberTrait::one() / k_fp); let mut i = 0; let mut acc1 = NumberTrait::zero(); while i != t { let r_i = MutMatrixImpl::at(ref R, i, t); acc1 += r_i * r_i; i += 1; }; MutMatrixImpl::set(ref Q, t, t, acc1); let mut i = 0; while i != t { MutMatrixImpl::set(ref Q, t, i, MutMatrixImpl::at(ref Q, i, t)); i += 1; }; let mut i = t + 1; let mut acc2 = NumberTrait::zero(); while i != k { let r_i = MutMatrixImpl::at(ref R, i, t); acc2 += r_i * r_i; i += 1; }; MutMatrixImpl::set(ref Q, t, t, acc1 + acc2); let mut i = t + 1; let mut acc = NumberTrait::zero(); while i != k { acc += -MutMatrixImpl::at(ref R, i, t) * MutMatrixImpl::at(ref R, t, i); i += 1; }; let mut i = t + 1; while i != k { MutMatrixImpl::set(ref Q, t, i, acc); i += 1; }; t += 1; }; let mut i = 0; while i != max_iter { let mut Qp = MutMatrixImpl::matrix_vector_product(ref Q, ref P); let mut pQp = dot(ref P, ref Qp); let mut max_error = NumberTrait::zero(); let mut t = 0; while t != k { let error = NumberTrait::abs(Qp.at(t) - pQp); if error > max_error { max_error = error; } t += 1; }; if max_error < eps { break; } let mut t = 0; while t != k { let diff = (-VecTrait::at(ref Qp, t) + pQp) / MutMatrixImpl::at(ref Q, t, t); VecTrait::set(ref P, t, VecTrait::at(ref P, t) + diff); pQp = (pQp + diff * (diff * MutMatrixImpl::at(ref Q, t, t) + (NumberTrait::one() + NumberTrait::one()) * VecTrait::at(ref Qp, t))) / ((NumberTrait::one() + diff) * (NumberTrait::one() + diff)); div_element_wise(ref P, NumberTrait::one() + diff); Qp_computation(ref Q, ref Qp, diff, t); t += 1; }; i += 1; }; P } /// Computation of the matrix Qb in the multiclass_probability computation /// /// Qp[:] = (Qp + diff * Q[t, :]) / (1 + diff) /// fn Qp_computation< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +Mul<T>, +Add<T>, +Div<T>, +AddEq<T> >( ref Q: MutMatrix<T>, ref Qp: NullableVec<T>, diff: T, t: usize ) { let m = Qp.len; let mut i = 0_usize; while i != m { let elem = (VecTrait::at(ref Qp, i) + diff * MutMatrixImpl::at(ref Q, t, i)) / (NumberTrait::one() + diff); VecTrait::set(ref Qp, i, elem); i += 1; }; } fn sigmoid_probability< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +Add<T>, +Mul<T>, +Div<T>, +Sub<T>, +Neg<T>, >( score: T, prob_a: T, prob_b: T ) -> T { let val = score * prob_a + prob_b; let mut v = NumberTrait::one() / (NumberTrait::one() + NumberTrait::exp(-NumberTrait::abs(val))); v = if val < NumberTrait::zero() { NumberTrait::one() - v } else { v }; NumberTrait::one() - v } fn max(a: usize, b: usize) -> usize { if a > b { return a; }; b } fn min<T, +Copy<T>, +Drop<T>, +PartialOrd<T>,>(a: Span<T>) -> T { let mut min = *a.at(0); let mut i = 0; while i != a.len() { if min > *a.at(i) { min = *a.at(i); } i += 1; }; min } fn dot_start_end< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>, >( pA: Span<T>, pB: Span<T>, a_start: usize, a_end: usize, b_start: usize, b_end: usize ) -> T { let mut sum = NumberTrait::zero(); let mut index_a = a_start; let mut index_b = b_start; while index_a != a_end && index_b != b_end { sum = sum + *pA.at(index_a) * *pB.at(index_b); index_a += 1; index_b += 1; }; sum } fn sv_dot< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>, >( pA: Span<T>, pB: Span<T> ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); while i != pA.len() { sum = sum + *pA.at(i) * *pB.at(i); i += 1; }; sum } fn squared_diff< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>, +Sub<T>, >( pA: Span<T>, pB: Span<T> ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); while i != pA.len() { sum = sum + (*pA.at(i) - *pB.at(i)).pow(NumberTrait::one() + NumberTrait::one()); i += 1; }; sum } fn dot<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Mul<T>, +AddEq<T>, +Add<T>, +Div<T>>( ref self: NullableVec<T>, ref vec: NullableVec<T> ) -> T { assert(self.len == vec.len, 'wrong vec len for dot prod'); let n = self.len; let mut sum: T = NumberTrait::zero(); let mut i = 0_usize; while i != n { sum += self.at(i) * vec.at(i); i += 1; }; sum } fn div_element_wise<T, MAG, +Mul<T>, +Add<T>, +Div<T>, +NumberTrait<T, MAG>, +Drop<T>, +Copy<T>>( ref self: NullableVec<T>, elem: T ) { let m = self.len; let mut i = 0_usize; while i != m { VecTrait::set(ref self, i, VecTrait::at(ref self, i) / elem); i += 1; }; }
https://github.com/gizatechxyz/orion
src/operators/ml/svm/svm_regressor.cairo
use core::traits::TryInto; use core::array::ArrayTrait; use core::array::SpanTrait; use core::traits::Into; use orion::numbers::NumberTrait; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; use core::debug::PrintTrait; use orion::operators::nn::{NNTrait, FP16x16NN}; use orion::utils::get_row; use orion::operators::ml::POST_TRANSFORM; use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; #[derive(Copy, Drop, Destruct)] struct SVMRegressor<T> { coefficients: Span<T>, kernel_params: Span<T>, kernel_type: KERNEL_TYPE, n_supports: usize, one_class: usize, post_transform: POST_TRANSFORM, rho: Span<T>, support_vectors: Span<T>, } #[derive(Copy, Drop)] enum MODE { SVM_LINEAR, SVM_SVC, } /// Trait /// /// predict - Returns the regressed values for each input in N. trait SVMRegressorTrait<T> { /// # SVMRegressorTrait::predict /// /// ```rust /// fn predict(ref self: SVMRegressor<T>, X: Tensor<T>) -> Tensor<T>; /// ``` /// /// Support Vector Machine regression prediction and one-class SVM anomaly detection. /// /// ## Args /// /// * `self`: SVMRegressor<T> - A SVMRegressor object. /// * `X`: Input 2D tensor. /// /// ## Returns /// /// * Tensor<T> containing the Support Vector Machine regression prediction and one-class SVM anomaly detection of the input X. /// /// ## Type Constraints /// /// `SVMRegressor` and `X` must be fixed points /// /// ## Examples /// /// ```rust /// use orion::numbers::FP16x16; /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor}; /// use orion::operators::tensor::FP16x16TensorPartialEq; /// /// use orion::operators::ml::svm::svm_regressor::{SVMRegressorTrait, POST_TRANSFORM, SVMRegressor}; /// use orion::operators::ml::svm::core::{KERNEL_TYPE}; /// /// fn example_svm_regressor_linear() -> Tensor<FP16x16> { /// let coefficients: Span<FP16x16> = array![ /// FP16x16 { mag: 65536, sign: false }, /// FP16x16 { mag: 65536, sign: true }, /// FP16x16 { mag: 54959, sign: false }, /// FP16x16 { mag: 54959, sign: true }, /// FP16x16 { mag: 29299, sign: false }, /// FP16x16 { mag: 65536, sign: true }, /// FP16x16 { mag: 36236, sign: false } /// ] /// .span(); /// let n_supports: usize = 7; /// let one_class: usize = 0; /// let rho: Span<FP16x16> = array![FP16x16 { mag: 35788, sign: false }].span(); /// let support_vectors: Span<FP16x16> = array![ /// FP16x16 { mag: 8421, sign: true }, /// FP16x16 { mag: 5842, sign: false }, /// FP16x16 { mag: 4510, sign: false }, /// FP16x16 { mag: 5202, sign: true }, /// FP16x16 { mag: 14783, sign: true }, /// FP16x16 { mag: 17380, sign: true }, /// FP16x16 { mag: 60595, sign: false }, /// FP16x16 { mag: 1674, sign: true }, /// FP16x16 { mag: 38669, sign: true }, /// FP16x16 { mag: 63803, sign: false }, /// FP16x16 { mag: 87720, sign: true }, /// FP16x16 { mag: 22236, sign: false }, /// FP16x16 { mag: 61816, sign: false }, /// FP16x16 { mag: 34267, sign: true }, /// FP16x16 { mag: 36418, sign: false }, /// FP16x16 { mag: 27471, sign: false }, /// FP16x16 { mag: 28421, sign: false }, /// FP16x16 { mag: 69270, sign: true }, /// FP16x16 { mag: 152819, sign: false }, /// FP16x16 { mag: 4065, sign: false }, /// FP16x16 { mag: 62274, sign: true } /// ] /// .span(); /// let post_transform = POST_TRANSFORM::NONE; /// let kernel_params: Span<FP16x16> = array![ /// FP16x16 { mag: 27812, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 196608, sign: false } /// ] /// .span(); /// let kernel_type = KERNEL_TYPE::LINEAR; /// /// let mut regressor: SVMRegressor<FP16x16> = SVMRegressor { /// coefficients, /// kernel_params, /// kernel_type, /// n_supports, /// one_class, /// post_transform, /// rho, /// support_vectors, /// }; /// /// let mut X: Tensor<FP16x16> = TensorTrait::new( /// array![3, 3].span(), /// array![ /// FP16x16 { mag: 32768, sign: true }, /// FP16x16 { mag: 26214, sign: true }, /// FP16x16 { mag: 19660, sign: true }, /// FP16x16 { mag: 13107, sign: true }, /// FP16x16 { mag: 6553, sign: true }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 6553, sign: false }, /// FP16x16 { mag: 13107, sign: false }, /// FP16x16 { mag: 19660, sign: false }, /// ] /// .span() /// ); /// /// return SVMRegressorTrait::predict(ref regressor, X); /// } /// /// >>> [[-0.468206], [0.227487], [0.92318]] /// ``` /// /// fn predict(ref self: SVMRegressor<T>, X: Tensor<T>) -> Tensor<T>; } impl SVMRegressorImpl< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T>, +Add<T>, +TensorTrait<T>, +PrintTrait<T>, +AddEq<T>, +Div<T>, +Mul<T>, +Neg<T>, +Sub<T>, +NNTrait<T>, > of SVMRegressorTrait<T> { fn predict(ref self: SVMRegressor<T>, X: Tensor<T>) -> Tensor<T> { let (mode_, kernel_type_, sv) = if self.n_supports > 0 { let mode_ = MODE::SVM_SVC; let kernel_type_ = self.kernel_type; let sv = TensorTrait::new( array![self.n_supports, self.support_vectors.len() / self.n_supports].span(), self.support_vectors ); //self.atts.support_vectors.reshape((self.atts.n_supports, -1)) (mode_, kernel_type_, sv) } else { let mode_ = MODE::SVM_LINEAR; let kernel_type_ = KERNEL_TYPE::LINEAR; let sv = TensorTrait::new( array![self.support_vectors.len()].span(), self.support_vectors ); (mode_, kernel_type_, sv) }; let mut z: Array<T> = array![]; let mut n = 0; while n != *X.shape.at(0) { let mut s = NumberTrait::zero(); match mode_ { MODE::SVM_LINEAR => { let mut x_n = get_row(@X, n); s = kernel_dot(self.kernel_params, x_n, self.coefficients, kernel_type_); s += *self.rho.at(0); }, MODE::SVM_SVC => { let mut x_n = get_row(@X, n); let mut j = 0; while j != self.n_supports { let mut sv_j = get_row(@sv, j); let d = kernel_dot(self.kernel_params, x_n, sv_j, kernel_type_); s += *self.coefficients.at(j) * d; j += 1; }; s += *self.rho.at(0); }, } if self.one_class == 1 { let elem = if s > NumberTrait::zero() { NumberTrait::one() } else { -NumberTrait::one() }; z.append(elem); } else { z.append(s); }; n += 1; }; // Post Transform let mut score = TensorTrait::new(array![*X.shape.at(0)].span(), z.span()); score = match self.post_transform { POST_TRANSFORM::NONE => score, POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@score, Option::Some(1)), POST_TRANSFORM::LOGISTIC => NNTrait::sigmoid(@score), POST_TRANSFORM::SOFTMAXZERO => NNTrait::softmax_zero(@score, 1), POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; score } }
https://github.com/gizatechxyz/orion
src/operators/ml/tree_ensemble.cairo
mod core; mod tree_ensemble_classifier; mod tree_ensemble_regressor;
https://github.com/gizatechxyz/orion
src/operators/ml/tree_ensemble/core.cairo
use alexandria_data_structures::array_ext::SpanTraitExt; use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; use alexandria_data_structures::array_ext::ArrayTraitExt; use orion::numbers::NumberTrait; use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; use orion::utils::get_row; #[derive(Copy, Drop, Destruct)] struct TreeEnsembleAttributes<T> { nodes_falsenodeids: Span<usize>, nodes_featureids: Span<usize>, nodes_missing_value_tracks_true: Span<usize>, nodes_modes: Span<NODE_MODES>, nodes_nodeids: Span<usize>, nodes_treeids: Span<usize>, nodes_truenodeids: Span<usize>, nodes_values: Span<T>, } #[derive(Destruct)] struct TreeEnsemble<T> { atts: TreeEnsembleAttributes<T>, tree_ids: Span<usize>, root_index: Felt252Dict<usize>, node_index: Felt252Dict<usize>, // index is pedersen hash of tree_id and nid. } #[derive(Copy, Drop)] enum NODE_MODES { BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF } #[generate_trait] impl TreeEnsembleImpl< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T> > of TreeEnsembleTrait<T> { fn leaf_index_tree(ref self: TreeEnsemble<T>, x: Span<T>, tree_id: usize) -> usize { let mut index: usize = self.root_index.get(tree_id.into()); loop { // Loop breaker match *self.atts.nodes_modes.at(index) { NODE_MODES::BRANCH_LEQ => {}, NODE_MODES::BRANCH_LT => {}, NODE_MODES::BRANCH_GTE => {}, NODE_MODES::BRANCH_GT => {}, NODE_MODES::BRANCH_EQ => {}, NODE_MODES::BRANCH_NEQ => {}, NODE_MODES::LEAF => { break; }, }; let x_value = *x.at(*(self.atts.nodes_featureids).at(index)); let r = if x_value.is_nan() { *self.atts.nodes_missing_value_tracks_true.at(index) >= 1 } else { match *self.atts.nodes_modes.at(index) { NODE_MODES::BRANCH_LEQ => x_value <= *self.atts.nodes_values[index], NODE_MODES::BRANCH_LT => x_value < *self.atts.nodes_values[index], NODE_MODES::BRANCH_GTE => x_value >= *self.atts.nodes_values[index], NODE_MODES::BRANCH_GT => x_value > *self.atts.nodes_values[index], NODE_MODES::BRANCH_EQ => x_value == *self.atts.nodes_values[index], NODE_MODES::BRANCH_NEQ => x_value != *self.atts.nodes_values[index], NODE_MODES::LEAF => { panic(array!['Unexpected rule for node index ', index.into()]) }, } }; let nid = if r { *self.atts.nodes_truenodeids[index] } else { *self.atts.nodes_falsenodeids[index] }; // key of TreeEnsemble.node_index is pedersen hash of tree_id and nid. let mut key = PedersenHasherImpl::new(); let key: felt252 = key.hash(tree_id.into(), nid.into()); index = self.node_index.get(key); }; index } fn leave_index_tree(ref self: TreeEnsemble<T>, x: Tensor<T>) -> Tensor<usize> { let mut outputs: Array<usize> = array![]; let mut i: usize = 0; let breaker: usize = *x.shape[0]; while i != breaker { let row_data: Span<T> = get_row(@x, i); let mut outs: Array<usize> = array![]; let mut tree_ids = self.tree_ids; loop { match tree_ids.pop_front() { Option::Some(tree_id) => { outs .append( TreeEnsembleImpl::<T>::leaf_index_tree(ref self, row_data, *tree_id) ) }, Option::None => { break; } }; }; outputs.append_all(ref outs); i += 1; }; TensorTrait::new(array![*x.shape[0], self.tree_ids.len()].span(), outputs.span()) } }
https://github.com/gizatechxyz/orion
src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo
use core::array::ArrayTrait; use core::clone::Clone; use core::box::BoxTrait; use core::traits::Into; use core::option::OptionTrait; use orion::operators::matrix::MutMatrixTrait; use core::array::SpanTrait; use core::nullable::NullableTrait; use core::dict::Felt252DictTrait; use core::dict::Felt252DictEntryTrait; use core::nullable::{match_nullable, FromNullableResult}; use orion::operators::tensor::{Tensor, TensorTrait}; use orion::operators::ml::tree_ensemble::core::{TreeEnsemble, TreeEnsembleImpl, TreeEnsembleTrait}; use orion::numbers::NumberTrait; use orion::utils::get_row; use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; use alexandria_data_structures::array_ext::{SpanTraitExt}; use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; use orion::operators::ml::POST_TRANSFORM; use core::debug::PrintTrait; #[derive(Destruct)] struct TreeEnsembleClassifier<T> { ensemble: TreeEnsemble<T>, class_ids: Span<usize>, class_nodeids: Span<usize>, class_treeids: Span<usize>, class_weights: Span<T>, classlabels: Span<usize>, base_values: Option<Span<T>>, post_transform: POST_TRANSFORM, } /// Trait /// /// predict - Returns the top class for each of N inputs. trait TreeEnsembleClassifierTrait<T> { /// # TreeEnsembleClassifier::predict /// /// ```rust /// fn predict(classifier: TreeEnsembleClassifier<T>, X: Tensor<T>) -> (Span<usize>, MutMatrix::<T>); /// ``` /// /// Tree Ensemble classifier. Returns the top class for each of N inputs. /// /// ## Args /// /// * `self`: TreeEnsembleClassifier<T> - A TreeEnsembleClassifier object. /// * `X`: Input 2D tensor. /// /// ## Returns /// /// * N Top class for each point /// * The class score Matrix for each class, for each point. /// /// ## Type Constraints /// /// `TreeEnsembleClassifier` and `X` must be fixed points /// /// ## Examples /// /// ```rust /// use orion::numbers::FP16x16; /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor}; /// use orion::operators::ml::{NODE_MODES, TreeEnsembleAttributes, TreeEnsemble}; /// use orion::operators::ml::{ /// TreeEnsembleClassifier, POST_TRANSFORM, TreeEnsembleClassifierTrait /// }; /// use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; /// /// fn tree_ensemble_classifier_helper( /// post_transform: POST_TRANSFORM ///) -> (TreeEnsembleClassifier<FP16x16>, Tensor<FP16x16>) { /// let class_ids: Span<usize> = array![0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2] /// .span(); /// /// let class_nodeids: Span<usize> = array![2, 2, 2, 3, 3, 3, 4, 4, 4, 1, 1, 1, 3, 3, 3, 4, 4, 4] /// .span(); /// /// let class_treeids: Span<usize> = array![0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1] /// .span(); /// /// let class_weights: Span<FP16x16> = array![ /// FP16x16 { mag: 30583, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 2185, sign: false }, /// FP16x16 { mag: 13107, sign: false }, /// FP16x16 { mag: 15729, sign: false }, /// FP16x16 { mag: 3932, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 32768, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 32768, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 29491, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 3277, sign: false }, /// FP16x16 { mag: 6746, sign: false }, /// FP16x16 { mag: 12529, sign: false }, /// FP16x16 { mag: 13493, sign: false }, /// ] /// .span(); /// /// let classlabels: Span<usize> = array![0, 1, 2].span(); /// /// let nodes_falsenodeids: Span<usize> = array![4, 3, 0, 0, 0, 2, 0, 4, 0, 0].span(); /// /// let nodes_featureids: Span<usize> = array![1, 0, 0, 0, 0, 1, 0, 0, 0, 0].span(); /// /// let nodes_missing_value_tracks_true: Span<usize> = array![0, 0, 0, 0, 0, 0, 0, 0, 0, 0].span(); /// /// let nodes_modes: Span<NODE_MODES> = array![ /// NODE_MODES::BRANCH_LEQ, /// NODE_MODES::BRANCH_LEQ, /// NODE_MODES::LEAF, /// NODE_MODES::LEAF, /// NODE_MODES::LEAF, /// NODE_MODES::BRANCH_LEQ, /// NODE_MODES::LEAF, /// NODE_MODES::BRANCH_LEQ, /// NODE_MODES::LEAF, /// NODE_MODES::LEAF, /// ] /// .span(); /// /// let nodes_nodeids: Span<usize> = array![0, 1, 2, 3, 4, 0, 1, 2, 3, 4].span(); /// /// let nodes_treeids: Span<usize> = array![0, 0, 0, 0, 0, 1, 1, 1, 1, 1].span(); /// /// let nodes_truenodeids: Span<usize> = array![1, 2, 0, 0, 0, 1, 0, 3, 0, 0].span(); /// /// let nodes_values: Span<FP16x16> = array![ /// FP16x16 { mag: 81892, sign: false }, /// FP16x16 { mag: 19992, sign: true }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 110300, sign: true }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 44245, sign: true }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// ] /// .span(); /// /// let tree_ids: Span<usize> = array![0, 1].span(); /// /// let mut root_index: Felt252Dict<usize> = Default::default(); /// root_index.insert(0, 0); /// root_index.insert(1, 5); /// /// let mut node_index: Felt252Dict<usize> = Default::default(); /// node_index /// .insert(2089986280348253421170679821480865132823066470938446095505822317253594081284, 0); /// node_index /// .insert(2001140082530619239661729809084578298299223810202097622761632384561112390979, 1); /// node_index /// .insert(2592670241084192212354027440049085852792506518781954896144296316131790403900, 2); /// node_index /// .insert(2960591271376829378356567803618548672034867345123727178628869426548453833420, 3); /// node_index /// .insert(458933264452572171106695256465341160654132084710250671055261382009315664425, 4); /// node_index /// .insert(1089549915800264549621536909767699778745926517555586332772759280702396009108, 5); /// node_index /// .insert(1321142004022994845681377299801403567378503530250467610343381590909832171180, 6); /// node_index /// .insert(2592987851775965742543459319508348457290966253241455514226127639100457844774, 7); /// node_index /// .insert(2492755623019086109032247218615964389726368532160653497039005814484393419348, 8); /// node_index /// .insert(1323616023845704258113538348000047149470450086307731200728039607710316625916, 9); /// /// let atts = TreeEnsembleAttributes { /// nodes_falsenodeids, /// nodes_featureids, /// nodes_missing_value_tracks_true, /// nodes_modes, /// nodes_nodeids, /// nodes_treeids, /// nodes_truenodeids, /// nodes_values /// }; /// /// let mut ensemble: TreeEnsemble<FP16x16> = TreeEnsemble { /// atts, tree_ids, root_index, node_index /// }; /// /// let base_values: Option<Span<FP16x16>> = Option::None; /// /// let mut classifier: TreeEnsembleClassifier<FP16x16> = TreeEnsembleClassifier { /// ensemble, /// class_ids, /// class_nodeids, /// class_treeids, /// class_weights, /// classlabels, /// base_values, /// post_transform /// }; /// /// let mut X: Tensor<FP16x16> = TensorTrait::new( /// array![3, 3].span(), /// array![ /// FP16x16 { mag: 65536, sign: true }, /// FP16x16 { mag: 52429, sign: true }, /// FP16x16 { mag: 39322, sign: true }, /// FP16x16 { mag: 26214, sign: true }, /// FP16x16 { mag: 13107, sign: true }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 13107, sign: false }, /// FP16x16 { mag: 26214, sign: false }, /// FP16x16 { mag: 39322, sign: false }, /// ] /// .span() /// ); /// /// (classifier, X) /// } /// /// fn test_tree_ensemble_classifier_multi_pt_softmax() -> (Span<usize>, MutMatrix::<FP16x16>) { /// let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAX); /// /// let (labels, scores) = TreeEnsembleClassifierTrait::predict(classifier, X); /// (labels, scores) /// } /// /// >>> /// ([0, 0, 1], /// [ /// [0.545123, 0.217967, 0.23691], /// [0.416047, 0.284965, 0.298988], /// [0.322535, 0.366664, 0.310801], /// ]) /// ``` /// fn predict( classifier: TreeEnsembleClassifier<T>, X: Tensor<T> ) -> (Span<usize>, MutMatrix::<T>); } impl TreeEnsembleClassifierImpl< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T>, +Add<T>, +TensorTrait<usize>, +TensorTrait<T>, +PrintTrait<T>, +AddEq<T>, +Div<T>, +Mul<T> > of TreeEnsembleClassifierTrait<T> { fn predict( classifier: TreeEnsembleClassifier<T>, X: Tensor<T> ) -> (Span<usize>, MutMatrix::<T>) { let mut classifier = classifier; let leaves_index = classifier.ensemble.leave_index_tree(X); let n_classes = classifier.classlabels.len(); let mut res: MutMatrix<T> = MutMatrixImpl::new(*leaves_index.shape.at(0), n_classes); // Set base values if classifier.base_values.is_some() { let mut base_values = classifier.base_values.unwrap(); let mut row: usize = 0; loop { if row == res.rows { break; } let mut col: usize = 0; loop { if col == res.cols { break; } let value = *base_values.pop_front().unwrap(); res.set(row, col, value); col += 1 }; row += 1; } } else { let mut row: usize = 0; loop { if row == res.rows { break; } let mut col: usize = 0; loop { if col == res.cols { break; } res.set(row, col, NumberTrait::zero()); col += 1 }; row += 1; } } let mut class_index: Felt252Dict<Nullable<Span<usize>>> = Default::default(); let mut i: usize = 0; loop { if i == classifier.class_treeids.len() { break; } let tid = *classifier.class_treeids[i]; let nid = *classifier.class_nodeids[i]; let mut key = PedersenHasherImpl::new(); let key: felt252 = key.hash(tid.into(), nid.into()); match match_nullable(class_index.get(key)) { FromNullableResult::Null(()) => { class_index.insert(key, NullableTrait::new(array![i].span())); }, FromNullableResult::NotNull(val) => { let mut new_val = val.unbox(); let new_val = new_val.concat(array![i].span()); class_index.insert(key, NullableTrait::new(new_val)); }, } i += 1; }; let mut i: usize = 0; loop { if i == res.rows { break; } let mut indices = get_row(@leaves_index, i); let mut t_index: Array<Span<core::integer::u32>> = ArrayTrait::new(); loop { match indices.pop_front() { Option::Some(index) => { let mut key = PedersenHasherImpl::new(); let key: felt252 = key .hash( (*classifier.ensemble.atts.nodes_treeids[*index]).into(), (*classifier.ensemble.atts.nodes_nodeids[*index]).into() ); t_index.append(class_index.get(key).deref()); }, Option::None => { break; } }; }; let mut t_index = t_index.span(); loop { match t_index.pop_front() { Option::Some(its) => { let mut its = *its; loop { match its.pop_front() { Option::Some(it) => { match res.get(i, *classifier.class_ids[*it]) { Option::Some(val) => { res .set( i, *classifier.class_ids[*it], val + *classifier.class_weights[*it] ); }, Option::None => { res .set( i, *classifier.class_ids[*it], *classifier.class_weights[*it] ); }, }; }, Option::None => { break; } }; }; }, Option::None => { break; } }; }; i += 1; }; // Binary class let mut binary = false; let mut i: usize = 0; let mut class_ids = classifier.class_ids; let mut class_id: usize = 0; // Get first class_id in class_ids match class_ids.pop_front() { Option::Some(c_id) => { class_id = *c_id; }, Option::None => { class_id = 0; } }; loop { if i == classifier.class_ids.len() { break; } match class_ids.pop_front() { Option::Some(c_id) => { if *c_id == class_id { binary = true; continue; } else { binary = false; break; } }, Option::None => { break; } }; }; // Clone res if binary { let mut new_res: MutMatrix<T> = MutMatrixImpl::new(res.rows, res.cols); let mut i: usize = 0; loop { if i == res.rows { break; } // Exchange match res.get(i, 0) { Option::Some(res_0) => { new_res.set(i, 1, res_0); }, Option::None => { new_res.set(i, 1, NumberTrait::zero()); }, }; i += 1; }; match classifier.post_transform { POST_TRANSFORM::NONE => { let mut i: usize = 0; loop { if i == res.rows { break; } // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { let value = NumberTrait::sub(NumberTrait::one(), res_1); new_res.set(i, 0, value); }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; i += 1; }; }, POST_TRANSFORM::SOFTMAX => { let mut i: usize = 0; loop { if i == res.rows { break; } // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; i += 1; }; }, POST_TRANSFORM::LOGISTIC => { let mut i: usize = 0; loop { if i == res.rows { break; } // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; i += 1; }; }, POST_TRANSFORM::SOFTMAXZERO => { let mut i: usize = 0; loop { if i == res.rows { break; } // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; i += 1; }; }, POST_TRANSFORM::PROBIT => { let mut i: usize = 0; loop { if i == res.rows { break; } // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { let value = NumberTrait::sub(NumberTrait::one(), res_1); new_res.set(i, 0, value); }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; i += 1; }; }, }; res = new_res; } // Post Transform let mut new_scores = match classifier.post_transform { POST_TRANSFORM::NONE => res, // No action required POST_TRANSFORM::SOFTMAX => res.softmax(1), POST_TRANSFORM::LOGISTIC => res.sigmoid(), POST_TRANSFORM::SOFTMAXZERO => res.softmax_zero(1), POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; // Labels let mut labels = new_scores.argmax(1); let mut labels_list = ArrayTrait::new(); loop { match labels.pop_front() { Option::Some(i) => { labels_list.append(*classifier.classlabels[*i]); }, Option::None => { break; } }; }; return (labels_list.span(), new_scores); } }
https://github.com/gizatechxyz/orion
src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo
use core::array::ArrayTrait; use core::clone::Clone; use core::box::BoxTrait; use core::traits::Into; use core::option::OptionTrait; use orion::operators::matrix::MutMatrixTrait; use core::array::SpanTrait; use core::nullable::NullableTrait; use core::dict::Felt252DictTrait; use core::dict::Felt252DictEntryTrait; use core::nullable::{match_nullable, FromNullableResult}; use orion::operators::tensor::{Tensor, TensorTrait}; use orion::operators::ml::tree_ensemble::core::{TreeEnsemble, TreeEnsembleImpl, TreeEnsembleTrait}; use orion::numbers::NumberTrait; use orion::utils::get_row; use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; use alexandria_data_structures::array_ext::{SpanTraitExt}; use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; use orion::operators::ml::POST_TRANSFORM; use core::debug::PrintTrait; #[derive(Destruct)] struct TreeEnsembleRegressor<T> { ensemble: TreeEnsemble<T>, target_ids: Span<usize>, target_nodeids: Span<usize>, target_treeids: Span<usize>, target_weights: Span<T>, base_values: Option<Span<T>>, n_targets: usize, aggregate_function: AGGREGATE_FUNCTION, post_transform: POST_TRANSFORM, } #[derive(Copy, Drop)] enum AGGREGATE_FUNCTION { SUM, AVERAGE, MIN, MAX, } /// Trait /// /// predict - Returns the regressed values for each input in N. trait TreeEnsembleRegressorTrait<T> { /// # TreeEnsembleRegressor::predict /// /// ```rust /// fn predict(regressor: TreeEnsembleRegressor<T>, X: Tensor<T>) -> (Span<usize>, MutMatrix::<T>); /// ``` /// /// Tree Ensemble regressor. Returns the regressed values for each input in N. /// /// ## Args /// /// * `self`: TreeEnsembleRegressor<T> - A TreeEnsembleRegressor object. /// * `X`: Input 2D tensor. /// /// ## Returns /// /// * Regressed values for each input in N /// /// ## Type Constraints /// /// `TreeEnsembleRegressor` and `X` must be fixed points /// /// ## Examples /// /// ```rust /// use orion::numbers::FP16x16; /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor}; /// use orion::operators::ml::{NODE_MODES, TreeEnsembleAttributes, TreeEnsemble}; /// use orion::operators::ml::tree_ensemble::tree_ensemble_regressor::{ /// TreeEnsembleRegressor, POST_TRANSFORM, TreeEnsembleRegressorTrait, AGGREGATE_FUNCTION /// }; /// use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; /// /// /// fn tree_ensemble_regressor_helper( /// agg: AGGREGATE_FUNCTION /// ) -> (TreeEnsembleRegressor<FP16x16>, Tensor<FP16x16>) { /// let n_targets: usize = 1; /// let aggregate_function = agg; /// let nodes_falsenodeids: Span<usize> = array![4, 3, 0, 0, 0, 2, 0, 4, 0, 0].span(); /// let nodes_featureids: Span<usize> = array![0, 2, 0, 0, 0, 0, 0, 2, 0, 0].span(); /// let nodes_missing_value_tracks_true: Span<usize> = array![0, 0, 0, 0, 0, 0, 0, 0, 0, 0].span(); /// let nodes_modes: Span<NODE_MODES> = array![ /// NODE_MODES::BRANCH_LEQ, /// NODE_MODES::BRANCH_LEQ, /// NODE_MODES::LEAF, /// NODE_MODES::LEAF, /// NODE_MODES::LEAF, /// NODE_MODES::BRANCH_LEQ, /// NODE_MODES::LEAF, /// NODE_MODES::BRANCH_LEQ, /// NODE_MODES::LEAF, /// NODE_MODES::LEAF /// ] /// .span(); /// let nodes_nodeids: Span<usize> = array![0, 1, 2, 3, 4, 0, 1, 2, 3, 4].span(); /// let nodes_treeids: Span<usize> = array![0, 0, 0, 0, 0, 1, 1, 1, 1, 1].span(); /// let nodes_truenodeids: Span<usize> = array![1, 2, 0, 0, 0, 1, 0, 3, 0, 0].span(); /// let nodes_values: Span<FP16x16> = array![ /// FP16x16 { mag: 17462, sign: false }, /// FP16x16 { mag: 40726, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 47240, sign: true }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 36652, sign: true }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 0, sign: false } /// ] /// .span(); /// let target_ids: Span<usize> = array![0, 0, 0, 0, 0, 0].span(); /// let target_nodeids: Span<usize> = array![2, 3, 4, 1, 3, 4].span(); /// let target_treeids: Span<usize> = array![0, 0, 0, 1, 1, 1].span(); /// let target_weights: Span<FP16x16> = array![ /// FP16x16 { mag: 5041, sign: false }, /// FP16x16 { mag: 32768, sign: false }, /// FP16x16 { mag: 32768, sign: false }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 18724, sign: false }, /// FP16x16 { mag: 32768, sign: false } /// ] /// .span(); /// /// let base_values: Option<Span<FP16x16>> = Option::None; /// let post_transform = POST_TRANSFORM::NONE; /// /// let tree_ids: Span<usize> = array![0, 1].span(); /// /// let mut root_index: Felt252Dict<usize> = Default::default(); /// root_index.insert(0, 0); /// root_index.insert(1, 5); /// /// let mut node_index: Felt252Dict<usize> = Default::default(); /// node_index /// .insert(2089986280348253421170679821480865132823066470938446095505822317253594081284, 0); /// node_index /// .insert(2001140082530619239661729809084578298299223810202097622761632384561112390979, 1); /// node_index /// .insert(2592670241084192212354027440049085852792506518781954896144296316131790403900, 2); /// node_index /// .insert(2960591271376829378356567803618548672034867345123727178628869426548453833420, 3); /// node_index /// .insert(458933264452572171106695256465341160654132084710250671055261382009315664425, 4); /// node_index /// .insert(1089549915800264549621536909767699778745926517555586332772759280702396009108, 5); /// node_index /// .insert(1321142004022994845681377299801403567378503530250467610343381590909832171180, 6); /// node_index /// .insert(2592987851775965742543459319508348457290966253241455514226127639100457844774, 7); /// node_index /// .insert(2492755623019086109032247218615964389726368532160653497039005814484393419348, 8); /// node_index /// .insert(1323616023845704258113538348000047149470450086307731200728039607710316625916, 9); /// /// let atts = TreeEnsembleAttributes { /// nodes_falsenodeids, /// nodes_featureids, /// nodes_missing_value_tracks_true, /// nodes_modes, /// nodes_nodeids, /// nodes_treeids, /// nodes_truenodeids, /// nodes_values /// }; /// /// let mut ensemble: TreeEnsemble<FP16x16> = TreeEnsemble { /// atts, tree_ids, root_index, node_index /// }; /// /// let mut regressor: TreeEnsembleRegressor<FP16x16> = TreeEnsembleRegressor { /// ensemble, /// target_ids, /// target_nodeids, /// target_treeids, /// target_weights, /// base_values, /// n_targets, /// aggregate_function, /// post_transform /// }; /// /// let mut X: Tensor<FP16x16> = TensorTrait::new( /// array![3, 3].span(), /// array![ /// FP16x16 { mag: 32768, sign: true }, /// FP16x16 { mag: 26214, sign: true }, /// FP16x16 { mag: 19660, sign: true }, /// FP16x16 { mag: 13107, sign: true }, /// FP16x16 { mag: 6553, sign: true }, /// FP16x16 { mag: 0, sign: false }, /// FP16x16 { mag: 6553, sign: false }, /// FP16x16 { mag: 13107, sign: false }, /// FP16x16 { mag: 19660, sign: false }, /// ] /// .span() /// ); /// /// (regressor, X) /// } /// /// fn test_tree_ensemble_regressor_SUM() -> MutMatrix::<FP16x16> { /// let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::SUM); /// let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); /// res /// } /// >>> /// /// [0.5769, 0.5769, 0.5769] /// /// ``` /// fn predict(regressor: TreeEnsembleRegressor<T>, X: Tensor<T>) -> MutMatrix::<T>; } impl TreeEnsembleRegressorImpl< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T>, +Add<T>, +TensorTrait<usize>, +TensorTrait<T>, +PrintTrait<T>, +AddEq<T>, +Div<T>, +Mul<T>, > of TreeEnsembleRegressorTrait<T> { fn predict(regressor: TreeEnsembleRegressor<T>, X: Tensor<T>) -> MutMatrix::<T> { let mut regressor = regressor; let leaves_index = regressor.ensemble.leave_index_tree(X); let n_targets = regressor.n_targets; let mut res: MutMatrix<T> = MutMatrixImpl::new(*leaves_index.shape.at(0), n_targets); let n_trees = regressor.ensemble.tree_ids.len(); let mut target_index: Felt252Dict<Nullable<Span<usize>>> = Default::default(); let mut i: usize = 0; loop { if i == regressor.target_treeids.len() { break; } let tid = *regressor.target_treeids[i]; let nid = *regressor.target_nodeids[i]; let mut key = PedersenHasherImpl::new(); let key: felt252 = key.hash(tid.into(), nid.into()); match match_nullable(target_index.get(key)) { FromNullableResult::Null(()) => { target_index.insert(key, NullableTrait::new(array![i].span())); }, FromNullableResult::NotNull(val) => { let mut new_val = val.unbox(); let new_val = new_val.concat(array![i].span()); target_index.insert(key, NullableTrait::new(new_val)); }, } i += 1; }; let mut i: usize = 0; loop { if i == res.rows { break; } let mut indices = get_row(@leaves_index, i); let mut t_index: Array<Span<core::integer::u32>> = ArrayTrait::new(); loop { match indices.pop_front() { Option::Some(index) => { let mut key = PedersenHasherImpl::new(); let key: felt252 = key .hash( (*regressor.ensemble.atts.nodes_treeids[*index]).into(), (*regressor.ensemble.atts.nodes_nodeids[*index]).into() ); t_index.append(target_index.get(key).deref()); }, Option::None => { break; } }; }; let mut t_index = t_index.span(); match regressor.aggregate_function { AGGREGATE_FUNCTION::SUM => { compute_res_SUM(ref regressor, ref res, ref t_index, i); }, AGGREGATE_FUNCTION::AVERAGE => { compute_res_AVERAGE(ref regressor, ref res, ref t_index, n_trees, i); }, AGGREGATE_FUNCTION::MIN => { compute_res_MIN(ref regressor, ref res, ref t_index, i); }, AGGREGATE_FUNCTION::MAX => { compute_res_MAX(ref regressor, ref res, ref t_index, i); }, }; i += 1; }; // Convention is to add base_values after aggregate function if regressor.base_values.is_some() { let mut base_values = regressor.base_values.unwrap(); let mut row: usize = 0; loop { if row == res.rows { break; } let mut col: usize = 0; loop { if col == res.cols { break; } let value = *base_values.pop_front().unwrap(); match res.get(row, col) { Option::Some(val) => { res.set(row, col, val + value); }, Option::None => { res.set(row, col, value); }, }; col += 1 }; row += 1; } } // Post Transform let mut new_scores = match regressor.post_transform { POST_TRANSFORM::NONE => res, // No action required POST_TRANSFORM::SOFTMAX => res.softmax(1), POST_TRANSFORM::LOGISTIC => res.sigmoid(), POST_TRANSFORM::SOFTMAXZERO => res.softmax_zero(1), POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; return new_scores; } } fn compute_res_SUM< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T>, +Add<T>, +TensorTrait<usize>, +TensorTrait<T>, +PrintTrait<T>, +AddEq<T>, +Div<T>, +Mul<T>, >( ref self: TreeEnsembleRegressor<T>, ref res: MutMatrix<T>, ref t_index: Span<Span<core::integer::u32>>, i: usize ) { loop { match t_index.pop_front() { Option::Some(its) => { let mut its = *its; loop { match its.pop_front() { Option::Some(it) => { match res.get(i, *self.target_ids[*it]) { Option::Some(val) => { res .set( i, *self.target_ids[*it], val + *self.target_weights[*it] ); }, Option::None => { res.set(i, *self.target_ids[*it], *self.target_weights[*it]); }, }; }, Option::None => { break; } }; }; }, Option::None => { break; } }; }; } fn compute_res_AVERAGE< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T>, +Add<T>, +TensorTrait<usize>, +TensorTrait<T>, +PrintTrait<T>, +AddEq<T>, +Div<T>, +Mul<T> >( ref self: TreeEnsembleRegressor<T>, ref res: MutMatrix<T>, ref t_index: Span<Span<core::integer::u32>>, n_trees: usize, i: usize ) { let n_trees_felt: felt252 = (n_trees * 65536).into(); let n_trees: T = NumberTrait::from_felt(n_trees_felt); loop { match t_index.pop_front() { Option::Some(its) => { let mut its = *its; loop { match its.pop_front() { Option::Some(it) => { match res.get(i, *self.target_ids[*it]) { Option::Some(val) => { res .set( i, *self.target_ids[*it], val + (*self.target_weights[*it]) / n_trees ); }, Option::None => { res .set( i, *self.target_ids[*it], *self.target_weights[*it] / n_trees ); }, }; }, Option::None => { break; } }; }; }, Option::None => { break; } }; }; } fn compute_res_MIN< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T>, +Add<T>, +TensorTrait<usize>, +TensorTrait<T>, +PrintTrait<T>, +AddEq<T>, +Div<T>, +Mul<T>, >( ref self: TreeEnsembleRegressor<T>, ref res: MutMatrix<T>, ref t_index: Span<Span<core::integer::u32>>, i: usize ) { let mut j = 0; loop { if j == res.cols { break; } res.set(i, j, NumberTrait::max_value()); j += 1; }; loop { match t_index.pop_front() { Option::Some(its) => { let mut its = *its; loop { match its.pop_front() { Option::Some(it) => { match res.get(i, *self.target_ids[*it]) { Option::Some(val) => { res .set( i, *self.target_ids[*it], NumberTrait::min(val, *self.target_weights[*it]) ); }, Option::None => { res.set(i, *self.target_ids[*it], *self.target_weights[*it]); }, }; }, Option::None => { break; } }; }; }, Option::None => { break; } }; }; } fn compute_res_MAX< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T>, +Add<T>, +TensorTrait<usize>, +TensorTrait<T>, +PrintTrait<T>, +AddEq<T>, +Div<T>, +Mul<T>, >( ref self: TreeEnsembleRegressor<T>, ref res: MutMatrix<T>, ref t_index: Span<Span<core::integer::u32>>, i: usize ) { let mut j = 0; loop { if j == res.cols { break; } res.set(i, j, NumberTrait::min_value()); j += 1; }; loop { match t_index.pop_front() { Option::Some(its) => { let mut its = *its; loop { match its.pop_front() { Option::Some(it) => { match res.get(i, *self.target_ids[*it]) { Option::Some(val) => { res .set( i, *self.target_ids[*it], NumberTrait::max(val, *self.target_weights[*it]) ); }, Option::None => { res.set(i, *self.target_ids[*it], *self.target_weights[*it]); }, }; }, Option::None => { break; } }; }; }, Option::None => { break; } }; }; }
https://github.com/gizatechxyz/orion
src/operators/nn.cairo
mod core; mod implementations; mod functional; use orion::operators::nn::core::NNTrait; use orion::operators::nn::implementations::nn_fp8x23::FP8x23NN; use orion::operators::nn::implementations::nn_fp16x16::FP16x16NN; use orion::operators::nn::implementations::nn_fp32x32::FP32x32NN; use orion::operators::nn::implementations::nn_fp64x64::FP64x64NN; use orion::operators::nn::implementations::nn_i8::I8NN; use orion::operators::nn::implementations::nn_i32::I32NN; use orion::operators::nn::implementations::nn_u32::U32NN;
https://github.com/gizatechxyz/orion
src/operators/nn/core.cairo
use orion::operators::tensor::core::Tensor; /// Trait /// /// relu - Applies the rectified linear unit function element-wise. /// leaky_relu - Applies the leaky rectified linear unit (Leaky ReLU) activation function element-wise. /// sigmoid - Applies the Sigmoid function to an n-dimensional input tensor. /// softmax - Computes softmax activations. /// softmax_zero - Computes softmax zero. /// logsoftmax - Applies the natural log to Softmax function to an n-dimensional input Tensor. /// softsign - Applies the Softsign function element-wise. /// softplus - Applies the Softplus function element-wise. /// linear - Performs a linear transformation of the input tensor using the provided weights and bias. /// hard_sigmoid - Applies the Hard Sigmoid function to an n-dimensional input tensor. /// thresholded_relu - Performs the thresholded relu activation function element-wise. /// gemm - Performs General Matrix multiplication. /// grid_sample - Computes the grid sample of the input tensor and input grid. /// col2im - Rearranges column blocks back into a multidimensional image /// conv_transpose - Performs the convolution transpose of the input data tensor and weight tensor. /// conv - Performs the convolution of the input data tensor and weight tensor. trait NNTrait<T> { /// # NNTrait::relu /// /// ```rust /// fn relu(tensor: @Tensor<T>) -> Tensor<T>; /// ``` /// /// Applies the rectified linear unit function element-wise /// /// $$ /// ReLU(x)=(x)^+=max(0,x) /// $$ /// /// ## Args /// /// * `tensor`(`@Tensor<T>`) - The input tensor. /// /// ## Returns /// /// A `Tensor<T>` with the same shape as the input tensor. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// /// use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor}; /// use orion::operators::nn::{NNTrait, I32NN}; /// /// fn relu_example() -> Tensor<i32> { /// let tensor = TensorTrait::<i32>::new( /// shape: array![2, 2].span(), /// data: array![1, 2, -1, -2].span(), /// ); /// /// return NNTrait::relu(@tensor); /// } /// >>> [[1,2],[0,0]] /// ``` /// fn relu(tensor: @Tensor<T>) -> Tensor<T>; /// # NNTrait::softmax /// /// ```rust /// fn softmax(tensor: @Tensor<T>, axis: Option<i32>) -> Tensor<T>; /// ``` /// /// Applies the Softmax function to an n-dimensional input Tensor rescaling them so that the elements of the n-dimensional output Tensor lie in the range \[0,1] and sum to 1. /// /// $$ /// \text{softmax}(x_i) = \frac{e^{x_i}}{\sum_{j=1}^n e^{x_j}} /// $$ /// /// ## Args /// /// * `tensor`(`@Tensor<T>`) - The input tensor. /// * `axis`(`Option<i32>`) - Describes the dimension Softmax will be performed on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). /// /// ## Returns /// /// A Tensor of fixed point numbers with the same shape than the input Tensor. /// /// ## Type Constraints /// /// Constrain input and output types to fixed point tensors. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// /// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor}; /// use orion::operators::nn::{NNTrait, FP8x23NN}; /// use orion::numbers::{FP8x23, FixedTrait}; /// /// fn softmax_example() -> Tensor<FP8x23> { /// let tensor = TensorTrait::<FP8x23>::new( /// shape: array![2, 2].span(), /// data: array![ /// FixedTrait::new(0, false), /// FixedTrait::new(1, false), /// FixedTrait::new(2, false), /// FixedTrait::new(3, false), /// ] /// .span(), /// ); /// /// return NNTrait::softmax(@tensor, Option::Some(1)); /// } /// >>> [[2255697,6132911],[2255697,6132911]] /// // The fixed point representation of /// // [[0.2689, 0.7311],[0.2689, 0.7311]] /// ``` /// fn softmax(tensor: @Tensor<T>, axis: Option<i32>) -> Tensor<T>; /// # NNTrait::softmax_zero /// /// ```rust /// fn softmax_zero(tensor: @Tensor<T>, axis: usize) -> Tensor<T>; /// ``` /// /// Applies the Softmax zero function to an n-dimensional input Tensor rescaling them so that the elements of the n-dimensional output Tensor lie in the range \[0,1] and sum to 1 while keeping the zero elements to zero. /// /// The softmax zero on the set $\mathbf{x} = (x_1, ..., x_n)$ is given by : /// /// $$ /// \text{softmax zero}(x_i) = \begin{cases} /// 0 & \qquad x_i = 0 \\ /// \frac{e^{x_i}}{ \sum_{x \in {S}} e^{x}} & \qquad \text{otherwise} /// \end{cases} /// $$ /// where $S$ in a subset of $\mathbf{x}$ given by /// /// $$ /// \ S = \{ (x_1, \ldots, x_k) \mid 1 \leq k \leq n, x_j \neq 0 \text{ for } 1 \leq j \leq k \} /// $$ /// /// ## Args /// /// * `tensor`(`@Tensor<T>`) - The input tensor. /// * `axis`(`usize`) - The axis along which to compute the softmax zero. /// /// ## Returns /// /// A Tensor of fixed point numbers with the same shape than the input Tensor. /// /// ## Type Constraints /// /// Constrain input and output types to fixed point tensors. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// /// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor}; /// use orion::operators::nn::{NNTrait, FP8x23NN}; /// use orion::numbers::{FP8x23, FixedTrait}; /// /// use core::debug::PrintTrait; /// /// fn softmax_zero_example() -> Tensor<FP8x23> { /// let tensor = TensorTrait::<FP8x23>::new( /// shape: array![2, 2].span(), /// data: array![ /// FixedTrait::new(0, false), /// FixedTrait::new(8388608, false), /// FixedTrait::new(16777216, false), /// FixedTrait::new(25165824, false), /// ] /// .span(), /// ); /// /// return NNTrait::softmax_zero(@tensor, 1); /// } /// >>> [[0,0x800000],[2256043,6132564]] /// // The fixed point representation of /// // [[0, 1],[0.2689, 0.7311]] /// ``` /// fn softmax_zero(tensor: @Tensor<T>, axis: usize) -> Tensor<T>; /// # NNTrait::logsoftmax /// /// ```rust /// fn logsoftmax(tensor: @Tensor<T>, axis: usize) -> Tensor<T> /// ``` /// /// Applies the natural log to Softmax function to an n-dimensional input Tensor consisting of values in the range \[0,1]. /// /// $$ /// \text{log softmax}(x_i) = \log(frac{e^{x_i}}{\sum_{j=1}^n e^{x_j}}) /// $$ /// /// ## Args /// /// * `tensor`(`@Tensor<T>`) - The input tensor. /// * `axis`(`usize`) - The axis along which to compute the natural lof softmax outputs. /// /// ## Returns /// /// A Tensor of fixed point numbers with the same shape than the input Tensor. /// /// ## Type Constraints /// /// Constrain input and output types to fixed point tensors. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// /// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23}; /// use orion::operators::nn::{NNTrait, FP8x23NN}; /// use orion::numbers::{FP8x23, FixedTrait}; /// /// fn logsoftmax_example() -> Tensor<FP8x23> { /// let tensor = TensorTrait::<FP8x23>::new( /// shape: array![2, 2].span(), /// data: array![ /// FixedTrait::new(0, false), /// FixedTrait::new(1, false), /// FixedTrait::new(2, false), /// FixedTrait::new(3, false), /// ] /// .span(), /// ); /// /// return NNTrait::logsoftmax(@tensor, 1); /// } /// This will first generate the softmax output tensor /// >>> [[2255697,6132911],[2255697,6132911]] /// // The fixed point representation of /// // [[0.2689, 0.7311],[0.2689, 0.7311]] /// /// Applying the natural log to this tensor yields /// >>> /// // The fixed point representation of: /// // [[-1.3134, -0.3132],[-1.3134, -0.3132]] /// ``` /// fn logsoftmax(tensor: @Tensor<T>, axis: usize) -> Tensor<T>; /// # NNTrait::sigmoid /// /// ```rust /// fn sigmoid(tensor: @Tensor<T>) -> Tensor<T>; /// ``` /// /// Applies the Sigmoid function to an n-dimensional input tensor rescaling them so that the elements of the n-dimensional output Tensor lie in the range \[0,1]. /// /// $$ /// \text{sigmoid}(x_i) = \frac{1}{1 + e^{-x_i}} /// $$ /// /// ## Args /// /// * `tensor`(`@Tensor<T>`) - The input tensor. /// /// ## Returns /// /// A Tensor of fixed point numbers with the same shape than the input Tensor. /// /// ## Type Constraints /// /// Constrain input and output types to fixed point tensors. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// /// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23}; /// use orion::operators::nn::{NNTrait, FP8x23NN}; /// use orion::numbers::{FP8x23, FixedTrait}; /// /// fn sigmoid_example() -> Tensor<FP8x23> { /// let tensor = TensorTrait::<FP8x23>::new( /// shape: array![2, 2].span(), /// data: array![ /// FixedTrait::new(0, false), /// FixedTrait::new(1, false), /// FixedTrait::new(2, false), /// FixedTrait::new(3, false), /// ] /// .span(), /// ); /// /// return NNTrait::sigmoid(@tensor); /// } /// >>> [[4194304,6132564],[7388661,7990771]] /// // The fixed point representation of /// // [[0.5, 0.7310586],[0.88079703, 0.95257413]] /// ``` /// fn sigmoid(tensor: @Tensor<T>) -> Tensor<T>; /// # NNTrait::softsign /// /// ```rust /// fn softsign(tensor: @Tensor<T>) -> Tensor<T>; /// ``` /// /// Applies the Softsign function to an n-dimensional input Tensor such that the elements of the n-dimensional output Tensor lie in the range \[-1,1]. /// /// $$ /// \text{softsign}(x_i) = \frac{x_i}{1 + |x_i|} /// $$ /// /// ## Args /// /// * `tensor`(`@Tensor<T>`) - The input tensor. /// /// ## Returns /// /// A Tensor of fixed point numbers with the same shape than the input Tensor. /// /// ## Type Constraints /// /// Constrain input and output types to fixed point tensors. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// /// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23}; /// use orion::operators::nn::{NNTrait, FP8x23NN}; /// use orion::numbers::{FP8x23, FixedTrait}; /// /// fn softsign_example() -> Tensor<FP8x23> { /// let tensor = TensorTrait::<FP8x23>::new( /// shape: array![2, 2].span(), /// data: array![ /// FixedTrait::new(0, false), /// FixedTrait::new(1, false), /// FixedTrait::new(2, false), /// FixedTrait::new(3, false), /// ] /// .span(), /// ); /// /// return NNTrait::softsign(@tensor); /// } /// >>> [[0,4194304],[5592405,6291456]] /// // The fixed point representation of /// // [[0, 0.5],[0.67, 0.75]] /// ``` /// fn softsign(tensor: @Tensor<T>) -> Tensor<T>; /// # NNTrait::softplus /// /// ```rust /// fn softplus(tensor: @Tensor<T>) -> Tensor<T>; /// ``` /// /// Applies the Softplus function to an n-dimensional input Tensor such that the elements of the n-dimensional output Tensor lie in the range \[-1,1]. /// /// $$ /// \text{softplus}(x_i) = log({1 + e^{x_i}}) /// $$ /// /// ## Args /// /// * `tensor`(`@Tensor<T>`) - The input tensor. /// /// ## Returns /// /// A Tensor of fixed point numbers with the same shape than the input Tensor. /// /// ## Type Constraints /// /// Constrain input and output types to fixed point tensors. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// /// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23}; /// use orion::operators::nn::{NNTrait, FP8x23NN}; /// use orion::numbers::{FP8x23, FixedTrait}; /// /// fn softplus_example() -> Tensor<FP8x23> { /// let tensor = TensorTrait::<FP8x23>::new( /// shape: array![2, 2].span(), /// data: array![ /// FixedTrait::new(0, false), /// FixedTrait::new(1, false), /// FixedTrait::new(2, false), /// FixedTrait::new(3, false), /// ] /// .span(), /// ); /// /// return NNTrait::softplus(@tensor); /// } /// >>> [[5814540,11016447],[17841964,25573406]] /// // The fixed point representation of /// // [[0.6931452, 1.31326096],[2.12692796, 3.04858728]] /// ``` /// fn softplus(tensor: @Tensor<T>) -> Tensor<T>; /// # NNTrait::linear /// /// ```rust /// fn linear(inputs: Tensor<T>, weights: Tensor<T>, bias: Tensor<T>) -> Tensor<T> /// ``` /// /// Performs a linear transformation of the input tensor using the provided weights and bias. /// /// ## Args /// /// * `tensor`(`@Tensor<T>`) - A 1D tensor representing the input tensor. /// * `weights`(`@Tensor<T>`) - A 2D tensor representing the weights. /// * `bias`(`@Tensor<T>`) - A 1D tensor representing the bias. /// /// ## Panics /// /// * This function asserts that the input tensor `inputs` must be 1D, weights tensor must be 2D, and bias tensor must be 1D. /// /// ## Returns /// /// A `Tensor<T>` representing the result of the linear transformation. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// /// use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor}; /// use orion::operators::nn::{NNTrait, I32NN}; /// /// fn linear_example() -> Tensor<i32> { /// // We instantiate inputs here. /// let inputs = TensorTrait::<i32>::new( /// shape: array![3].span(), /// data: array![ /// -71, 38, 62, /// ] /// .span(), /// ); /// /// // We instantiate weights here. /// let weights = TensorTrait::<i32>::new( /// shape: array![2, 3].span(), /// data: array![ /// -8, /// 64, /// 40, /// -33, /// -34, /// -20, /// ] /// .span(), /// ); /// /// // We instantiate bias here. /// let bias = TensorTrait::<i32>::new( /// shape: array![2].span(), /// data: array![61, -61].span(), /// ); /// /// return NNTrait::linear(inputs, weights, bias); /// } /// >>> [5541, -250] /// ```` /// fn linear(inputs: Tensor<T>, weights: Tensor<T>, bias: Tensor<T>) -> Tensor<T>; /// # NNTrait::leaky_relu /// /// ```rust /// fn leaky_relu(inputs: @Tensor<T>, alpha: @T) -> Tensor<T> /// ``` /// /// Applies the leaky rectified linear unit (Leaky ReLU) activation function element-wise to a given tensor. /// /// The Leaky ReLU function is defined as f(x) = alpha * x if x < 0, f(x) = x otherwise, where x is the input element. /// /// ## Args /// * `inputs`(`@Tensor<T>`) - A snapshot of a tensor to which the Leaky ReLU function will be applied. /// * `alpha`(`@T`) - A snapshot of a fixed point scalar that defines the alpha value of the Leaky ReLU function. /// /// ## Returns /// A new fixed point tensor with the same shape as the input tensor and the Leaky ReLU function applied element-wise. /// /// ## Type Constraints /// /// Constrain input and output types to fixed point tensors. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// /// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23}; /// use orion::operators::nn::{NNTrait, FP8x23NN}; /// use orion::numbers::{FP8x23, FixedTrait}; /// /// fn leaky_relu_example() -> Tensor<FP8x23> { /// let tensor = TensorTrait::<FP8x23>::new( /// shape: array![2, 3].span(), /// data: array![ /// FixedTrait::new(1, false), /// FixedTrait::new(2, false), /// FixedTrait::new(1, true), /// FixedTrait::new(2, true), /// FixedTrait::new(0, false), /// FixedTrait::new(0, false), /// ] /// .span(), /// ); /// let alpha = FixedTrait::from_felt(838861); // 0.1 /// /// return NNTrait::leaky_relu(@tensor, @alpha); /// } /// >>> [[8388608, 16777216, 838861], [1677722, 0, 0]] /// // The fixed point representation of /// [[1, 2, 0.1], [0.2, 0, 0]] /// ``` /// fn leaky_relu(inputs: @Tensor<T>, alpha: @T) -> Tensor<T>; /// # NNTrait::hard_sigmoid /// /// ```rust /// fn hard_sigmoid(tensor: @Tensor<T>, alpha: @T, beta: @T) -> Tensor<T>; /// ``` /// /// Applies the HardSigmoid function to an n-dimensional input tensor. /// /// $$ /// \text{HardSigmoid}(x_i) = \text{max}(0, \text{min}(alpha * x + beta, 1)) /// $$ /// /// ## Args /// /// * `tensor`(`@Tensor<T>`) - The input tensor. /// * `alpha`(`@T`) - value of alpha. /// * `beta`(`@T`) - value of beta. /// /// ## Returns /// /// A Tensor of fixed point numbers with the same shape than the input Tensor. /// /// ## Type Constraints /// /// Constrain input and output types to fixed point tensors. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// /// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23}; /// use orion::operators::nn::{NNTrait, FP8x23NN}; /// use orion::numbers::{FP16x16, FixedTrait}; /// /// fn hard_sigmoid_example() -> Tensor<FP16x16> { /// let tensor = TensorTrait::<FP16x16>::new( /// shape: array![2, 2].span(), /// data: array![ /// FixedTrait::new(0, false), /// FixedTrait::new(13107, false), /// FixedTrait::new(32768, false), /// FixedTrait::new(65536, false), /// ] /// .span(), /// ); /// let alpha = FixedTrait::new(13107, false); /// let beta = FixedTrait::new(32768, false); /// /// return NNTrait::hard_sigmoid(@tensor, @alpha, @beta); /// } /// >>> [[32768, 35389],[39321, 45875]] /// ``` /// fn hard_sigmoid(tensor: @Tensor<T>, alpha: @T, beta: @T) -> Tensor<T>; /// # NNTrait::thresholded_relu /// /// ```rust /// fn thresholded_relu(tensor: @Tensor<T>, alpha: @T) -> Tensor<T> /// ``` /// /// Applies the thresholded rectified linear unit (Thresholded ReLU) activation function element-wise to a given tensor. /// /// The Thresholded ReLU function is defined as f(x) = x if x > alpha, f(x) = 0 otherwise, where x is the input element. /// /// ## Args /// * `tensor`(`@Tensor<T>`) - A snapshot of a tensor to which the Leaky ReLU function will be applied. /// * `alpha`(`@T`) - A snapshot of a fixed point scalar that defines the alpha value of the Thresholded ReLU function. /// /// ## Returns /// A new fixed point tensor with the same shape as the input tensor and the Thresholded ReLU function applied element-wise. /// /// ## Type Constraints /// /// Constrain input and output types to fixed point tensors. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// /// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23}; /// use orion::operators::nn::{NNTrait, FP8x23NN}; /// use orion::numbers::{FP8x23, FixedTrait}; /// /// fn thresholded_relu_example() -> Tensor<FP8x23> { /// let tensor = TensorTrait::<FP8x23>::new( /// shape: array![2, 2].span(), /// data: array![ /// FixedTrait::new(0, false), /// FixedTrait::new(256, false), /// FixedTrait::new(512, false), /// FixedTrait::new(257, false), /// ] /// .span(), /// ); /// let alpha = FixedTrait::from_felt(256); // 1.0 /// /// return NNTrait::leaky_relu(@tensor, @alpha); /// } /// >>> [[0, 0], [512, 257]] /// ``` /// fn thresholded_relu(tensor: @Tensor<T>, alpha: @T) -> Tensor<T>; /// # NNTrait::space_to_depth /// /// ```rust /// fn space_to_depth(tensor: @Tensor<T>, blocksize: usize) -> Tensor<T>; /// ``` /// /// SpaceToDepth rearranges blocks of spatial data into depth. More specifically, this op outputs a copy of the input tensor where values from the height and width dimensions are moved to the depth dimension. /// /// ## Args /// /// * `tensor`(`@Tensor<T>`) - The input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width. /// * `blocksize`(`usize`) - The size of the blocks to move along [blocksize, blocksize]. /// /// ## Returns /// /// A `Tensor<T>` of [N, C * blocksize * blocksize, H/blocksize, W/blocksize]. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// use orion::operators::tensor::{TensorTrait, Tensor}; /// use orion::operators::tensor::{I8Tensor, I8TensorAdd}; /// use orion::numbers::NumberTrait; /// use orion::operators::nn::NNTrait; /// use orion::operators::nn::I8NN; /// use orion::numbers::FixedTrait; /// /// fn space_to_depth_example() -> Tensor<i8> { /// let mut shape = ArrayTrait::<usize>::new(); /// shape.append(1); /// shape.append(2); /// shape.append(2); /// shape.append(4); /// /// let mut data = ArrayTrait::new(); /// data.append(-3); /// data.append(0); /// data.append(0); /// data.append(0); /// data.append(-1); /// data.append(1); /// data.append(-2); /// data.append(-3); /// data.append(2); /// data.append(-2); /// data.append(-3); /// data.append(-3); /// data.append(-1); /// data.append(0); /// data.append(1); /// data.append(-3); /// let tensor = TensorTrait::new(shape.span(), data.span()); /// return NNTrait::space_to_depth(@tensor, 2); /// } /// >>> [[[[-3, 0]], [[2, -3]], [[0, 0]], [[-2, -3]], [[-1, -2]], [[-1, 1]], [[1, -3]], [[0, -3]]]] /// ``` /// fn space_to_depth(tensor: @Tensor<T>, blocksize: usize) -> Tensor<T>; /// # NNTrait::depth_to_space /// /// ```rust /// fn depth_to_space(tensor: @Tensor<T>, blocksize: usize) -> Tensor<T>; /// ``` /// /// DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of the input tensor where values from the depth dimension are moved in spatial blocks to the height and width dimensions. By default, mode = DCR. In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the following order: depth, column, and then row. /// /// ## Args /// /// * `tensor`(`@Tensor<T>`) - The input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width. /// * `blocksize`(`usize`) - The size of the blocks to move along [blocksize, blocksize]. /// * `mode`(felt252) - DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order. /// /// ## Returns /// /// A `Tensor<T>` of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize]. /// /// ## Examples /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; /// use orion::operators::tensor::{TensorTrait, Tensor}; /// use orion::operators::tensor::{I8Tensor, I8TensorAdd}; /// use orion::numbers::NumberTrait; /// use orion::operators::nn::NNTrait; /// use orion::operators::nn::I8NN; /// use orion::numbers::FixedTrait; /// /// fn depth_to_space_example() -> Tensor<i8> { /// let mut shape = ArrayTrait::<usize>::new(); /// shape.append(1); /// shape.append(4); /// shape.append(2); /// shape.append(2); /// /// let mut data = ArrayTrait::new(); /// data.append(-2); /// data.append(0); /// data.append(-1); /// data.append(0); /// data.append(0); /// data.append(-3); /// data.append(2); /// data.append(1); /// data.append(-2); /// data.append(-2); /// data.append(0); /// data.append(-2); /// data.append(-1); /// data.append(-1); /// data.append(2); /// data.append(2); /// let tensor = TensorTrait::new(shape.span(), data.span()); /// return NNTrait::depth_to_space(@tensor, 2, 'DCR'); /// } /// >>> [[[[-2, 0, 0, -3], [-2, -1, -2, -1], [-1, 2, 0, 1], [0, 2, -2, 2]]]] /// ``` /// fn depth_to_space(tensor: @Tensor<T>, blocksize: usize, mode: felt252) -> Tensor<T>; /// # NNTrait::gemm /// /// ```rust /// fn gemm( /// A: Tensor<T>, /// B: Tensor<T>, /// C: Option<Tensor<T>>, /// alpha: Option<T>, /// beta: Option<T>, /// transA: bool, /// transB: bool /// ) -> Tensor<T>; /// ``` /// /// Performs General Matrix multiplication: https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 /// /// * A' = transpose(A) if transA else A /// * B' = transpose(B) if transB else B /// /// Compute `Y = alpha * A' * B' + beta * C`, where input tensor A has shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), and output tensor Y has shape (M, N). /// `A` will be transposed before doing the computation if attribute `transA` is `true`, same for `B` and `transB`. /// /// ## Args /// /// * `A`(`Tensor<T>`) - Input tensor A. The shape of `A` should be (M, K) if `transA` is `false`, or (K, M) if `transA` is `true`. /// * `B`(`Tensor<T>`) - Input tensor B. The shape of `B` should be (K, N) if `transB` is `false`, or (N, K) if `transB` is `true`. /// * `C`(`Option<Tensor<T>>`) - Optional input tensor C. The shape of C should be unidirectional broadcastable to (M, N). /// * `alpha`(`Option<T>`) - Optional scalar multiplier for the product of input tensors `A * B`. /// * `beta`(`Option<T>`) - Optional scalar multiplier for input tensor `C`. /// * `transA`(`bool`) - Whether `A` should be transposed. /// * `transB`(`bool`) - Whether `B` should be transposed. /// /// ## Returns /// /// A `Tensor<T>` of shape (M, N). /// /// ## Examples /// /// ```rust /// mod input_0; /// mod input_1; /// mod input_2; /// /// use orion::operators::nn::NNTrait; /// use orion::numbers::FixedTrait; /// use orion::operators::nn::FP16x16NN; /// use orion::operators::tensor::FP16x16TensorPartialEq; /// /// fn gemm_all_attributes_example() -> Tensor<FP16x16> { /// let input_0 = input_0::input_0(); // shape [4;3] /// let input_1 = input_1::input_1(); // shape [5;4] /// let input_2 = input_2::input_2(); // shape [1;5] /// /// let y = NNTrait::gemm( /// input_0, /// input_1, /// Option::Some(input_2), /// Option::Some(FixedTrait::new(16384, false)), // 0.25 /// Option::Some(FixedTrait::new(22938, false)), // 0.35 /// true, /// true /// ); /// /// return y; /// } /// >>> tensor of shape [3;5] /// ```` /// fn gemm( A: Tensor<T>, B: Tensor<T>, C: Option<Tensor<T>>, alpha: Option<T>, beta: Option<T>, transA: bool, transB: bool ) -> Tensor<T>; /// /// # NNTrait::conv /// /// ```rust /// conv( /// X: @Tensor<T>, /// W: @Tensor<T>, /// B: Option<Span<T>>, /// auto_pad: Option<AUTO_PAD>, /// dilations: Option<Span<usize>>, /// group: Option<usize>, /// kernel_shape: Option<Span<usize>>, /// pads: Option<Span<usize>>, /// strides: Option<Span<usize>>, /// ) -> Tensor<T> /// ``` /// /// The convolution operator consumes an input tensor and a filter (input weight tensor), and computes the output. /// /// ## Args /// /// * `X`(`@Tensor<T>`) - Input data tensor, has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W if 2D, otherwise the size is (N x C x D1 x D2 ... x Dn). /// * `W`(`@Tensor<T>`) - The weight tensor, has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps if 2D, for more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x ... x kn). /// * `B`(`Option<@Tensor<T>>`) - Optional 1D bias to be added to the convolution, has size of M. /// * `auto_pad`(`Option<AUTO_PAD>`) - Default is NOTSET, auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. NOTSET means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. /// * `dilations`(`Option<Span<usize>>`) - Dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis. /// * `group`(`Option<usize>`) - Default is 1, number of groups input channels and output channels are divided into. /// * `kernel_shape`(`Option<Span<usize>>`) - The shape of the convolution kernel. If not present, should be inferred from input W. /// * `pads`(`Option<Span<usize>>`) - Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis. /// * `strides`(`Option<Span<usize>>`) - Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. /// /// ## Returns /// /// A `Tensor<T>` that contains the result of the convolution. /// /// ## Examples /// /// ```rust /// use orion::operators::nn::NNTrait; /// use orion::numbers::FixedTrait; /// use orion::operators::nn::FP16x16NN; /// use orion::numbers::FP16x16; /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; /// /// /// fn example_conv() -> Tensor<FP16x16> { /// let mut shape = ArrayTrait::<usize>::new(); /// shape.append(1); /// shape.append(1); /// shape.append(3); /// shape.append(3); /// /// let mut data = ArrayTrait::new(); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// let W = TensorTrait::new(shape.span(), data.span()); /// /// let mut shape = ArrayTrait::<usize>::new(); /// shape.append(1); /// shape.append(1); /// shape.append(5); /// shape.append(5); /// /// let mut data = ArrayTrait::new(); /// data.append(FP16x16 { mag: 0, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 131072, sign: false }); /// data.append(FP16x16 { mag: 196608, sign: false }); /// data.append(FP16x16 { mag: 262144, sign: false }); /// data.append(FP16x16 { mag: 327680, sign: false }); /// data.append(FP16x16 { mag: 393216, sign: false }); /// data.append(FP16x16 { mag: 458752, sign: false }); /// data.append(FP16x16 { mag: 524288, sign: false }); /// data.append(FP16x16 { mag: 589824, sign: false }); /// data.append(FP16x16 { mag: 655360, sign: false }); /// data.append(FP16x16 { mag: 720896, sign: false }); /// data.append(FP16x16 { mag: 786432, sign: false }); /// data.append(FP16x16 { mag: 851968, sign: false }); /// data.append(FP16x16 { mag: 917504, sign: false }); /// data.append(FP16x16 { mag: 983040, sign: false }); /// data.append(FP16x16 { mag: 1048576, sign: false }); /// data.append(FP16x16 { mag: 1114112, sign: false }); /// data.append(FP16x16 { mag: 1179648, sign: false }); /// data.append(FP16x16 { mag: 1245184, sign: false }); /// data.append(FP16x16 { mag: 1310720, sign: false }); /// data.append(FP16x16 { mag: 1376256, sign: false }); /// data.append(FP16x16 { mag: 1441792, sign: false }); /// data.append(FP16x16 { mag: 1507328, sign: false }); /// data.append(FP16x16 { mag: 1572864, sign: false }); /// let mut X = TensorTrait::new(shape.span(), data.span()); /// /// return NNTrait::conv( /// @X, /// @W, /// Option::None, /// Option::None, /// Option::None, /// Option::None, /// Option::Some(array![3, 3].span()), /// Option::Some(array![1, 1, 1, 1].span()), /// Option::None, /// ); /// } /// /// >>> [ /// [ /// [ /// [12.0, 21.0, 27.0, 33.0, 24.0], /// [33.0, 54.0, 63.0, 72.0, 51.0], /// [63.0, 99.0, 108.0, 117.0, 81.0], /// [93.0, 144.0, 153.0, 162.0, 111.0], /// [72.0, 111.0, 117.0, 123.0, 84.0], /// ] /// ] /// ] /// /// ```` /// fn conv( X: @Tensor<T>, W: @Tensor<T>, B: Option<Span<T>>, auto_pad: Option<orion::operators::nn::functional::conv::AUTO_PAD>, dilations: Option<Span<usize>>, group: Option<usize>, kernel_shape: Option<Span<usize>>, pads: Option<Span<usize>>, strides: Option<Span<usize>>, ) -> Tensor<T>; /// # NNTrait::conv_transpose /// /// ```rust /// conv_transpose( /// X: @Tensor<T>, /// W: @Tensor<T>, /// B: Option<@Tensor<T>>, /// auto_pad: Option<AUTO_PAD>, /// dilations: Option<Span<usize>>, /// group: Option<usize>, /// kernel_shape: Option<Span<usize>>, /// output_padding: Option<Span<usize>>, /// output_shape: Option<Span<usize>>, /// pads: Option<Span<usize>>, /// strides: Option<Span<usize>>, /// ) -> Tensor<T> /// ``` /// /// The convolution transpose operator consumes an input tensor and a input weight tensor, and computes the output. /// /// ## Args /// /// * `X`(`@Tensor<T>`) - Input data tensor, has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W if 2D, otherwise the size is (N x C x D1 x D2 ... x Dn). /// * `W`(`@Tensor<T>`) - The weight tensor, has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps if 2D, for more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x ... x kn). /// * `B`(`Option<@Tensor<T>>`) - Optional 1D bias to be added to the convolution, has size of M. /// * `auto_pad`(`Option<AUTO_PAD>`) - Default is NOTSET, auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. NOTSET means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = input_shape[i] * strides[i]` for each axis `i`. /// * `dilations`(`Option<Span<usize>>`) - Dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis. /// * `group`(`Option<usize>`) - Default is 1, number of groups input channels and output channels are divided into. /// * `kernel_shape`(`Option<Span<usize>>`) - The shape of the convolution kernel. If not present, should be inferred from input W. /// * `output_padding`(`Option<Span<usize>>`) - Additional elements added to the side with higher coordinate indices in the output. Each padding value in "output_padding" must be less than the corresponding stride/dilation dimension. By default, this attribute is a zero vector. /// * `output_shape`(`Option<Span<usize>>`) - The shape of the output can be explicitly set which will cause pads values to be auto generated. If output_shape is specified pads values are ignored. See doc for details for equations to generate pads. /// * `pads`(`Option<Span<usize>>`) - Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis. /// * `strides`(`Option<Span<usize>>`) - Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. /// /// ## Returns /// /// A `Tensor<T>` that contains the result of the convolution transpose. /// /// ## Examples /// /// ```rust /// use orion::operators::nn::NNTrait; /// use orion::numbers::FixedTrait; /// use orion::operators::nn::FP16x16NN; /// use orion::numbers::FP16x16; /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; /// /// fn example_conv_transpose() -> Tensor<FP16x16> { /// let mut shape = ArrayTrait::<usize>::new(); /// shape.append(1); /// shape.append(2); /// shape.append(3); /// shape.append(3); /// /// let mut data = ArrayTrait::new(); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// let W = TensorTrait::new(shape.span(), data.span()); /// /// let mut shape = ArrayTrait::<usize>::new(); /// shape.append(1); /// shape.append(1); /// shape.append(3); /// shape.append(3); /// /// let mut data = ArrayTrait::new(); /// data.append(FP16x16 { mag: 0, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 131072, sign: false }); /// data.append(FP16x16 { mag: 196608, sign: false }); /// data.append(FP16x16 { mag: 262144, sign: false }); /// data.append(FP16x16 { mag: 327680, sign: false }); /// data.append(FP16x16 { mag: 393216, sign: false }); /// data.append(FP16x16 { mag: 458752, sign: false }); /// data.append(FP16x16 { mag: 524288, sign: false }); /// let mut X = TensorTrait::new(shape.span(), data.span()); /// /// return NNTrait::conv_transpose( /// @X, /// @W, /// Option::None, /// Option::None, /// Option::None, /// Option::None, /// Option::None, /// Option::None, /// Option::None, /// Option::None, /// Option::None, /// ); /// /// } /// >>> [ /// [ /// [ /// [0.0, 1.0, 3.0, 3.0, 2.0], /// [3.0, 8.0, 15.0, 12.0, 7.0], /// [9.0, 21.0, 36.0, 27.0, 15.0], /// [9.0, 20.0, 33.0, 24.0, 13.0], /// [6.0, 13.0, 21.0, 15.0, 8.0], /// ], /// [ /// [0.0, 1.0, 3.0, 3.0, 2.0], /// [3.0, 8.0, 15.0, 12.0, 7.0], /// [9.0, 21.0, 36.0, 27.0, 15.0], /// [9.0, 20.0, 33.0, 24.0, 13.0], /// [6.0, 13.0, 21.0, 15.0, 8.0], /// ], /// ] /// ] /// /// ```` /// fn conv_transpose( X: @Tensor<T>, W: @Tensor<T>, B: Option<@Tensor<T>>, auto_pad: Option<orion::operators::nn::functional::conv_transpose::AUTO_PAD>, dilations: Option<Span<usize>>, group: Option<usize>, kernel_shape: Option<Span<usize>>, output_padding: Option<Span<usize>>, output_shape: Option<Span<usize>>, pads: Option<Span<usize>>, strides: Option<Span<usize>>, ) -> Tensor<T>; /// # NNTrait::col2im /// /// ```rust /// col2im( /// data: @Tensor<T>, /// image_shape: Span<usize>, /// block_shape: Span<usize>, /// dilations: Option<Span<usize>>, /// pads: Option<Span<usize>>, /// strides: Option<Span<usize>>, /// ) -> Tensor<T> /// ``` /// /// The operator rearranges column blocks back into a multidimensional image /// /// Col2Im behaves similarly to PyTorch's fold https://pytorch.org/docs/stable/generated/torch.nn.Fold.html, but it only supports batched multi-dimensional image tensors. Another implementation in Python with N-dimension support can be found at https://github.com/f-dangel/unfoldNd/. /// /// ## Args /// /// * `data`(`@Tensor<T>`) - Input data tensor to be rearranged from column blocks back into an image. This is a 3-dimensional tensor containing [N, C * n-ary-product(block_shape), L], where N is batch dimension, C is image channel dimension and L is number of blocks. /// * `image_shape`(`Span<usize>`) - The shape of the spatial dimensions of the image after rearranging the column blocks.This is a 1-dimensional tensor with size of at least 2, containing the value [H_img, W_img] for a 2-D image or [dim_i1, dim_i2, ..., dim_iN] for a N-D image. /// * `block_shape`(`Span<usize>`) - The shape of the block to apply on the input.This is a 1-dimensional tensor of size of at least 2, containing the value [H_block, W_block] for a 2-D image or [dim_b1, dim_b2, ..., dim_bN] for a N-D block.This is the block-shape before dilation is applied to it. /// * `dilations`(`Option<Span<usize>>`) - 1-dimensional tensor with dilation value along each spatial axis of the image. If not present, the dilation defaults to 1 along each spatial axis of the image. /// * `pads`(`Option<Span<usize>>`) - 1-dimensional tensor with padding value for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin is the number of pixels added at the beginning of axis `i` and xi_end is the number of pixels added at the end of axis `i`. If not present, the padding defaults to 0 along start and end of each spatial axis. /// * `strides`(`Option<Span<usize>>`) - 1-dimensional tensor with stride value along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. /// /// ## Returns /// /// A `Tensor<T>` output tensor produced by rearranging blocks into an image. /// /// ## Examples /// /// ```rust /// use orion::operators::nn::NNTrait; /// use orion::numbers::FixedTrait; /// use orion::operators::nn::FP16x16NN; /// use orion::numbers::FP16x16; /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; /// /// fn example_col2im() -> Tensor<FP16x16> { /// let mut shape = ArrayTrait::<usize>::new(); /// shape.append(1); /// shape.append(5); /// shape.append(5); /// /// let mut data = ArrayTrait::new(); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 393216, sign: false }); /// data.append(FP16x16 { mag: 720896, sign: false }); /// data.append(FP16x16 { mag: 1048576, sign: false }); /// data.append(FP16x16 { mag: 1376256, sign: false }); /// data.append(FP16x16 { mag: 131072, sign: false }); /// data.append(FP16x16 { mag: 458752, sign: false }); /// data.append(FP16x16 { mag: 786432, sign: false }); /// data.append(FP16x16 { mag: 1114112, sign: false }); /// data.append(FP16x16 { mag: 1441792, sign: false }); /// data.append(FP16x16 { mag: 196608, sign: false }); /// data.append(FP16x16 { mag: 524288, sign: false }); /// data.append(FP16x16 { mag: 851968, sign: false }); /// data.append(FP16x16 { mag: 1179648, sign: false }); /// data.append(FP16x16 { mag: 1507328, sign: false }); /// data.append(FP16x16 { mag: 262144, sign: false }); /// data.append(FP16x16 { mag: 589824, sign: false }); /// data.append(FP16x16 { mag: 917504, sign: false }); /// data.append(FP16x16 { mag: 1245184, sign: false }); /// data.append(FP16x16 { mag: 1572864, sign: false }); /// data.append(FP16x16 { mag: 327680, sign: false }); /// data.append(FP16x16 { mag: 0, sign: false }); /// data.append(FP16x16 { mag: 983040, sign: false }); /// data.append(FP16x16 { mag: 1310720, sign: false }); /// data.append(FP16x16 { mag: 1638400, sign: false }); /// let mut X = TensorTrait::new(shape.span(), data.span()); /// /// let image_shape = array![5, 5].span(); /// let block_shape = array![1, 5].span(); /// /// return NNTrait::col2im( /// @X, image_shape, block_shape, Option::None, Option::None, Option::None, /// ); /// /// /// } /// >>> [ /// [ /// [ /// [1.0, 2.0, 3.0, 4.0, 5.0], /// [6.0, 7.0, 8.0, 9.0, 0.0], /// [11.0, 12.0, 13.0, 14.0, 15.0], /// [16.0, 17.0, 18.0, 19.0, 20.0], /// [21.0, 22.0, 23.0, 24.0, 25.0], /// ] /// ] /// ] /// /// ```` /// /// fn col2im( data: @Tensor<T>, image_shape: Span<usize>, block_shape: Span<usize>, dilations: Option<Span<usize>>, pads: Option<Span<usize>>, strides: Option<Span<usize>>, ) -> Tensor<T>; /// # NNTrait::grid_sample /// /// ```rust /// fn grid_sample( /// X: @Tensor<T>, /// grid: @Tensor<T>, /// align_corner: Option<usize>, /// mode: Option<MODE>, /// padding_mode: Option<PADDING_MODE>, /// ) -> Tensor<T>; /// ``` /// /// Given an input X and a flow-field grid, computes the output Y using X values and pixel locations from the grid. /// /// ## Args /// /// * `X`(`@Tensor<T>`) - Input tensor of shape (N, C, D1, D2, ..., Dr), where N is the batch size, C is the number of channels, D1, D2, ..., Dr are the spatial dimensions. /// * `grid`(`@Tensor<T>`) - Input offset of shape (N, D1_out, D2_out, ..., Dr_out, r), where D1_out, D2_out, ..., Dr_out are the spatial dimensions of the grid and output, and r is the number of spatial dimensions. Grid specifies the sampling locations normalized by the input spatial dimensions. /// * `align_corners`(`Option<usize>`) - default is 0. If align_corners=1, the extrema are considered as referring to the center points of the input's corner pixels. If align_corners=0, they are instead considered as referring to the corner points of the input's corner pixels /// * `mode`(`Option<MODE>`) - default is linear. Three interpolation modes: linear (default), nearest and cubic. /// * `padding_mode`(`Option<PADDING_MODE>`) - default is zeros. Support padding modes for outside grid values: `zeros`(default), `border`, `reflection`. /// /// ## Returns /// /// A `Tensor<T>` of shape (N, C, D1_out, D2_out, ..., Dr_out) of the sampled values. /// /// ## Example /// /// ```rust /// use orion::operators::nn::NNTrait; /// use orion::numbers::FixedTrait; /// use orion::operators::nn::FP16x16NN; /// use orion::numbers::FP16x16; /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; /// /// fn example_grid_sample() -> Tensor<FP16x16> { /// /// let mut shape = ArrayTrait::<usize>::new(); /// shape.append(1); /// shape.append(2); /// shape.append(4); /// shape.append(2); /// /// let mut data = ArrayTrait::new(); /// data.append(FP16x16 { mag: 655360, sign: true }); /// data.append(FP16x16 { mag: 655360, sign: true }); /// data.append(FP16x16 { mag: 327680, sign: true }); /// data.append(FP16x16 { mag: 327680, sign: true }); /// data.append(FP16x16 { mag: 13107, sign: true }); /// data.append(FP16x16 { mag: 13107, sign: true }); /// data.append(FP16x16 { mag: 655360, sign: false }); /// data.append(FP16x16 { mag: 655360, sign: false }); /// data.append(FP16x16 { mag: 655360, sign: false }); /// data.append(FP16x16 { mag: 655360, sign: false }); /// data.append(FP16x16 { mag: 13107, sign: true }); /// data.append(FP16x16 { mag: 13107, sign: true }); /// data.append(FP16x16 { mag: 327680, sign: false }); /// data.append(FP16x16 { mag: 327680, sign: false }); /// data.append(FP16x16 { mag: 655360, sign: false }); /// data.append(FP16x16 { mag: 655360, sign: false }); /// /// let mut grid = TensorTrait::new(shape.span(), data.span()); /// /// /// let mut shape = ArrayTrait::<usize>::new(); /// shape.append(1); /// shape.append(1); /// shape.append(3); /// shape.append(2); /// /// let mut data = ArrayTrait::new(); /// data.append(FP16x16 { mag: 0, sign: false }); /// data.append(FP16x16 { mag: 65536, sign: false }); /// data.append(FP16x16 { mag: 131072, sign: false }); /// data.append(FP16x16 { mag: 196608, sign: false }); /// data.append(FP16x16 { mag: 262144, sign: false }); /// data.append(FP16x16 { mag: 327680, sign: false }); /// let mut X = TensorTrait::new(shape.span(), data.span()); /// /// /// return NNTrait::grid_sample( /// @X, @grid, Option::None, Option::None, Option::None, /// ); /// /// } /// ///} /// >>> [ /// [ /// [ /// [0.0000, 0.0000, 1.7000, 0.0000], /// [0.0000, 1.7000, 0.0000, 0.0000] /// ] /// ] /// ] /// /// ```` fn grid_sample( X: @Tensor<T>, grid: @Tensor<T>, align_corner: Option<usize>, mode: Option<orion::operators::nn::functional::grid_sample::MODE>, padding_mode: Option<orion::operators::nn::functional::grid_sample::PADDING_MODE>, ) -> Tensor<T>; }
https://github.com/gizatechxyz/orion
src/operators/nn/functional.cairo
mod relu; mod leaky_relu; mod sigmoid; mod softmax; mod softmax_zero; mod softsign; mod softplus; mod linear; mod logsoftmax; mod thresholded_relu; mod hard_sigmoid; mod gemm; mod grid_sample; mod col2im; mod conv_transpose; mod depth_to_space; mod space_to_depth; mod conv;
https://github.com/gizatechxyz/orion
src/operators/nn/functional/col2im.cairo
use orion::numbers::NumberTrait; use orion::operators::tensor::core::{stride}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; fn col2im<T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>, +Add<T>, +Mul<T>,>( data: @Tensor<T>, image_shape: Span<usize>, block_shape: Span<usize>, dilations: Option<Span<usize>>, pads: Option<Span<usize>>, strides: Option<Span<usize>>, ) -> Tensor<T> { let dilations = match dilations { Option::Some(dilations) => dilations, Option::None => { let mut dilations: Array<usize> = array![]; let mut i = 0; while i != image_shape.len() { dilations.append(1); i += 1; }; dilations.span() }, }; let pads = match pads { Option::Some(pads) => pads, Option::None => { let mut pads: Array<usize> = array![]; let mut i = 0; while i != image_shape.len() { pads.append(0); pads.append(0); i += 1; }; pads.span() }, }; let strides = match strides { Option::Some(strides) => strides, Option::None => { let mut strides: Array<usize> = array![]; let mut i = 0; while i != image_shape.len() { strides.append(1); i += 1; }; strides.span() }, }; let bl = prod(block_shape, 0); let C = *(*data).shape.at(1) / bl; let mut new_shape: Array<i32> = array![ (*(*data).shape.at(0)).try_into().unwrap(), C.try_into().unwrap(), bl.try_into().unwrap() ]; let mut i = 2; while i != (*data) .shape .len() { new_shape.append((*(*data).shape.at(i)).try_into().unwrap()); i += 1; }; let data = data.reshape(new_shape.span(), false); let mut res: Array<T> = array![]; let data_stride = stride(data.shape); let mut n = 0; while n != *data .shape .at(0) { let mut c = 0; while c != *data .shape .at(1) { let data_n_c = TensorTrait::new( SpanTrait::slice(data.shape, 2, data.shape.len() - 2), SpanTrait::slice( data.data, n * *data_stride.at(0) + c * *data_stride.at(1), *data_stride.at(1) ) ); let mut out = col2im_naive_implementation( @data_n_c, image_shape, block_shape, dilations, pads, strides ); let mut i = 0; while i != out.len() { res.append(out.at(i)); i += 1; }; c += 1; }; n += 1; }; let mut new_shape = array![*data.shape.at(0), *data.shape.at(1)]; let mut i = 0; while i != image_shape.len() { new_shape.append(*image_shape.at(i)); i += 1; }; TensorTrait::new(new_shape.span(), res.span()) } fn get_image<T, +Drop<T>, +Copy<T>>(self: @Tensor<T>, row: usize) -> Span<T> { assert((*self).shape.len() == 2, 'Expected a 2D tensor'); let row_length = *self.shape[1]; let start = row * row_length; (*self).data.slice(start, row_length) } fn col2im_naive_implementation< T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>, +Add<T>, >( data: @Tensor<T>, image_shape: Span<usize>, kernel_shape: Span<usize>, dilations: Span<usize>, pads: Span<usize>, strides: Span<usize>, ) -> NullableVec<T> { let n_dims = pads.len() / 2; col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides); let mut dim_col: Array<usize> = array![]; let mut i = 0; while i != n_dims { dim_col .append( (*image_shape.at(i) + (*pads.at(i) + *pads.at(i + n_dims)) - (*dilations.at(i) * (*kernel_shape.at(i) - 1) + 1)) / *strides.at(i) + 1 ); i += 1; }; let dim_col = dim_col.span(); let stride_img = stride(image_shape); let mut data_im = NullableVecImpl::new(); data_im.set(*image_shape.at(0) * *stride_img.at(0) - 1, NumberTrait::zero()); let kernel_size = prod(kernel_shape, 0); let col_size = prod(dim_col, 0); let mut c_col = 0; while c_col != kernel_size { let offset = get_indices(c_col, kernel_shape).span(); let mut col = 0; while col != col_size { let ind_col = get_indices(col, dim_col).span(); let mut ind_im: Array<usize> = array![]; let mut i = 0; while i != n_dims { if (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)) < *pads .at(i) { let neg_index = *pads.at(i) - (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)); ind_im.append(*image_shape.at(i) + neg_index); } else { ind_im .append( *ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i) - *pads.at(i) ); } i += 1; }; let ind_im = ind_im.span(); if !is_out(ind_im, image_shape) { let mut index = 0; let mut i = 0; while i != image_shape.len() { index += *stride_img.at(i) * *ind_im.at(i); i += 1; }; data_im.set(index, data_im.at(index) + *(*data).data.at(c_col * col_size + col)); } col += 1; }; c_col += 1; }; data_im } fn col2im_shape_check<T, +TensorTrait<T>, +Copy<T>, +Drop<T>,>( X: @Tensor<T>, output_shape: Span<usize>, kernel_shape: Span<usize>, dilations: Span<usize>, pads: Span<usize>, strides: Span<usize>, ) { let n_input_plane = *(*X).shape.at(0); let kernel_size = prod(kernel_shape, 0); assert(n_input_plane % kernel_size == 0, 'wrong input dimension'); let input_length = *(*X).shape.at(1); let n_dims = output_shape.len(); let mut n_blocks: Array<usize> = array![]; let mut i = 0; while i != n_dims { n_blocks .append( (*output_shape.at(i) + (*pads.at(i) + *pads.at(i + n_dims)) - *dilations.at(i) * (*kernel_shape.at(i) - 1) - 1) / *strides.at(i) + 1 ); i += 1; }; let block_size = prod(n_blocks.span(), 0); assert(input_length == block_size, 'input_length != block_size'); } fn get_indices(index: usize, shape: Span<usize>,) -> Array<usize> { let mut i = index; let mut res: Array<usize> = array![]; let mut k = shape.len() - 1; while k != 0 { let m = i % *shape.at(k); res.append(m); i -= m; i /= *shape.at(k); k -= 1; }; let mut new_res: Array<usize> = array![]; new_res.append(i); let mut i = shape.len() - 1; while i != 0 { new_res.append(*res.at(i - 1)); i -= 1; }; new_res } fn is_out(ind: Span<usize>, shape: Span<usize>,) -> bool { let mut n = 0; let is_out = loop { if n == ind.len() { break false; } let s = *shape.at(n); let i = *ind.at(n); if i < 0 { break true; } if i >= s { break true; } n += 1; }; is_out } fn prod<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>,>( pA: Span<T>, start: usize ) -> T { let mut i = start; let mut prod = NumberTrait::one(); while i != pA.len() { prod = prod * (*pA.at(i)); i += 1; }; prod }
https://github.com/gizatechxyz/orion
src/operators/nn/functional/conv.cairo
use core::debug::PrintTrait; use orion::numbers::NumberTrait; use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; use orion::operators::tensor::core::{stride}; #[derive(Copy, Drop)] enum AUTO_PAD { NOTSET, SAME_UPPER, SAME_LOWER, VALID } fn conv< T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>, +Add<T>, +Mul<T>, +AddEq<T>, +PrintTrait<T>, >( X: @Tensor<T>, W: @Tensor<T>, B: Option<Span<T>>, auto_pad: Option<AUTO_PAD>, dilations: Option<Span<usize>>, group: Option<usize>, kernel_shape: Option<Span<usize>>, pads: Option<Span<usize>>, strides: Option<Span<usize>>, ) -> Tensor<T> { let nd = (*X).shape.len() - 2; assert((*X).shape.len() >= 3, 'X must have at least 3 dim'); let dilations = match dilations { Option::Some(dilations) => dilations, Option::None => { let mut dilations: Array<usize> = array![]; let mut i = 2; while i != (*X).shape.len() { dilations.append(1); i += 1; }; dilations.span() }, }; let kernel_shape = match kernel_shape { Option::Some(kernel_shape) => kernel_shape, Option::None => { let mut kernel_shape: Array<usize> = array![]; let mut i = 2; while i != (*W).shape.len() { kernel_shape.append(*(*W).shape.at(i)); i += 1; }; kernel_shape.span() }, }; let pads = match pads { Option::Some(pads) => pads, Option::None => { let mut pads: Array<usize> = array![]; let mut i = 2; while i != (*X).shape.len() { pads.append(0); pads.append(0); i += 1; }; pads.span() }, }; let strides = match strides { Option::Some(strides) => strides, Option::None => { let mut strides: Array<usize> = array![]; let mut i = 2; while i != (*X).shape.len() { strides.append(1); i += 1; }; strides.span() }, }; let group = match group { Option::Some(group) => group, Option::None => { 1 }, }; let auto_pad = match auto_pad { Option::Some(auto_pad) => auto_pad, Option::None => { AUTO_PAD::NOTSET }, }; if group > 1 { let sN = *(*X).shape.at(0); let mut res_b: Array<usize> = array![]; let mut res_cv = array![]; let mut td = 0; let mg = *(*W).shape.at(0) / group; let dw = *(*W).shape.at(1); let X_stride = stride((*X).shape); let mut gx_shape = array![1, dw]; let mut i = 2; while i != (*X).shape.len() { gx_shape.append(*(*X).shape.at(i)); i += 1; }; let gx_shape = gx_shape.span(); let W_stride = stride((*W).shape); let mut gw_shape = array![mg]; let mut i = 1; while i != (*W).shape.len() { gw_shape.append(*(*W).shape.at(i)); i += 1; }; let gw_shape = gw_shape.span(); let mut b = 0; while b != sN { let mut g = 0; while g != group { let gx = TensorTrait::new( gx_shape, SpanTrait::slice( (*X).data, b * *X_stride.at(0) + (g * dw) * *X_stride.at(1), *X_stride.at(1) * dw ) ); let gw = TensorTrait::new( gw_shape, SpanTrait::slice((*W).data, (g * mg) * *W_stride.at(0), *W_stride.at(0) * mg) ); let cv = conv( @gx, @gw, Option::None, Option::Some(auto_pad), Option::Some(dilations), Option::Some(1), Option::Some(kernel_shape), Option::Some(pads), Option::Some(strides) ); if b == 0 { td += *cv.shape.at(1); } res_b.append(b); res_cv.append(cv); g += 1; }; b += 1; }; let res_b = res_b.span(); let res_cv = res_cv.span(); let mut final_shape = array![sN, td]; let mut cv = *res_cv.at(0); let mut i = 2; while i != cv.shape.len() { final_shape.append(*cv.shape.at(i)); i += 1; }; let final_shape = final_shape.span(); let mut final: Array<T> = array![]; let mut p = 0; let mut i = 0; while i != res_b.len() { let cv = *res_cv.at(i); let mut n = 0; while n != cv.data.len() { final.append(*cv.data.at(n)); n += 1; }; p += *cv.shape.at(1); if p >= td { p = 0; } i += 1; }; let final = final.span(); let final = match B { Option::Some(B) => { let mut final_b: Array<T> = array![]; let final_stride = stride(final_shape); let mut i = 0; while i != *final_shape.at(0) { let mut j = 0; while j != B.len() { let mut k = 0; while k != *final_stride.at(1) { final_b .append( *final.at(i * *final_stride.at(0) + j * *final_stride.at(1) + k) + *B.at(j) ); k += 1; }; j += 1; }; i += 1; }; final_b.span() }, Option::None => { final }, }; return TensorTrait::new(final_shape, final); } // group == 1 if *dilations.at(0) != 1 || min(dilations.clone()) != max(dilations.clone()) { // computation of the dilated kernel let nd = dilations.len(); let mut new_kernel_shape: Array<usize> = array![]; let mut new_shape: Array<usize> = array![]; new_shape.append_span(SpanTrait::slice((*W).shape, 0, (*W).shape.len() - nd)); let mut i = 0; while i != dilations.len() { let d = *dilations.at(i); let di = (*W).shape.len() - nd + i; new_shape.append(*(*W).shape.at(di) + (*(*W).shape.at(di) - 1) * (d - 1)); new_kernel_shape.append(*kernel_shape.at(i) + (*kernel_shape.at(i) - 1) * (d - 1)); i += 1; }; let new_shape = new_shape.span(); let new_w_strides = stride(new_shape); let mut new_w = NullableVecImpl::new(); new_w.set(*new_shape.at(0) * *new_w_strides.at(0) - 1, NumberTrait::zero()); let mut indices = array![]; indices.append(arange(0, *new_shape.at(0), 1)); indices.append(arange(0, *new_shape.at(1), 1)); let mut i = 0; while i != dilations.len() { let d = *dilations.at(i); let di = (*W).shape.len() - nd + i; indices.append(arange(0, *new_shape.at(di), d)); i += 1; }; let set_of_all_indices = cartesian(indices.span()); let mut new_w_arr: Array<T> = array![]; let mut i = 0; let mut prev = 0; while i != (*W).data.len() { let nd_index = *set_of_all_indices.at(i); let mut flatten_index = 0; let mut j = 0; while j != nd_index.len() { flatten_index += *nd_index.at(j) * *new_w_strides.at(j); j += 1; }; if flatten_index > prev + 1 { let mut j = prev + 1; while j != flatten_index { new_w_arr.append(NumberTrait::zero()); }; j += 1; } new_w_arr.append(*(*W).data.at(i)); new_w.set(flatten_index, *(*W).data.at(i)); prev = flatten_index; i += 1; }; } let pads = match auto_pad { AUTO_PAD::NOTSET => { pads }, AUTO_PAD::SAME_UPPER => { let mut head: Array<usize> = array![]; let mut tail: Array<usize> = array![]; let mut i = 0; while i != nd { let d = *(*X).shape.at(i); let target_size = (d + *strides.at(i) - 1) / *strides.at(i); let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d; let pad_head = pad_needed / 2; let pad_tail = pad_needed - pad_head; head.append(pad_head); tail.append(pad_tail); i += 1; }; head.append_span(tail.span()); let pads = head.span(); pads }, AUTO_PAD::SAME_LOWER => { let mut head: Array<usize> = array![]; let mut tail: Array<usize> = array![]; let mut i = 0; while i != nd { let d = *(*X).shape.at(i); let target_size = (d + *strides.at(i) - 1) / *strides.at(i); let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d; let pad_head = (pad_needed + 1) / 2; let pad_tail = pad_needed - pad_head; head.append(pad_head); tail.append(pad_tail); i += 1; }; head.append_span(tail.span()); let pads = head.span(); pads }, AUTO_PAD::VALID => { let mut head: Array<usize> = array![]; let mut tail: Array<usize> = array![]; let mut i = 0; while i != nd { let d = *(*X).shape.at(i); let target_size = (d + *strides.at(i) - 1) / *strides.at(i); let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d; let pad_head = pad_needed / 2; let pad_tail = pad_needed - pad_head; head.append(pad_head); tail.append(pad_tail); i += 1; }; head.append_span(tail.span()); let pads = head.span(); pads }, }; if (*X).shape.len() == 3 { let sN = *(*X).shape.at(0); let sC = *(*X).shape.at(1); let sH = *(*X).shape.at(2); let sM = *(*W).shape.at(0); let kh = *kernel_shape.at(0); let sth = *strides.at(0); let h_out = ((sH - kh + *pads.at(0) + *pads.at(1)) / sth) + 1; let h0 = *pads.at(0); let oh: i32 = -1 * (kh % 2).into(); let bh: i32 = -h0.into(); let eh = h_out * sth; let mut res = NullableVecImpl::new(); let res_shape = array![sN, sM, h_out].span(); let res_strides = stride(res_shape); res.set(sN * *res_strides.at(0) - 1, NumberTrait::zero()); match B { Option::Some(B) => { let mut i = 0; while i != sN { let mut j = 0; while j != sM { let b_j = *B.at(j); let mut k = 0; while k != h_out { res.set(i * *res_strides.at(0) + j * *res_strides.at(1) + k, b_j); k += 1; }; j += 1; }; i += 1; }; }, Option::None => {}, } let mut n = 0; while n != sN { let mut nw = 0; while nw != sM { let mut c = 0; while c != sC { let w = SpanTrait::slice((*W).data, nw * sC * kh + c * kh, kh); let mut io = bh; while io < eh.into() { let hr = (io - bh) / sth.into(); if hr < h_out.into() { let i = io + (kh % 2).into(); let ih1 = I32Number::max(0, i + oh).into(); let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into(); let img = SpanTrait::slice((*X).data, n * sN + c * sC + ih1, ih2 - ih1); let s = if w.len() != img.len() { let jh1 = I32Number::max(0, -i - oh).into(); let jh2 = I32Number::min(sH.into() - (i + oh), kh.into()).into(); let w_ = SpanTrait::slice(w, jh1, jh2 - jh1); assert(w_.len() == img.len(), 'unexpected w and img len'); dot(img, w_) } else { dot(img, w) }; let hr = if hr < 0 { *res_strides.at(1) - hr.into() } else { hr.into() }; res .set( n * *res_strides.at(0) + nw * *res_strides.at(1) + hr, res.at(n * *res_strides.at(0) + nw * *res_strides.at(1) + hr) + s ); } io += sth.into(); }; c += 1; }; nw += 1; }; n += 1; }; let mut res_data: Array<T> = array![]; let mut i = 0; while i != res.len() { res_data.append(res.at(i)); i += 1; }; return TensorTrait::new(res_shape, res_data.span()); } if (*X).shape.len() == 4 { let sN = *(*X).shape.at(0); let sC = *(*X).shape.at(1); let sH = *(*X).shape.at(2); let sW = *(*X).shape.at(3); let sM = *(*W).shape.at(0); let kh = *kernel_shape.at(0); let kw = *kernel_shape.at(1); let sth = *strides.at(0); let stw = *strides.at(1); let h_out = ((sH - kh + *pads.at(0) + *pads.at(2)) / sth) + 1; let w_out = ((sW - kw + *pads.at(1) + *pads.at(3)) / stw) + 1; let h0 = *pads.at(0); let w0 = *pads.at(1); let oh: i32 = -1 * (kh % 2).into(); let ow: i32 = -1 * (kw % 2).into(); let bh: i32 = -h0.into(); let bw: i32 = -w0.into(); let eh = h_out * sth; let ew = w_out * stw; let mut res = NullableVecImpl::new(); let res_shape = array![sN, sM, h_out, w_out].span(); let res_strides = stride(res_shape); res.set(sN * *res_strides.at(0) - 1, NumberTrait::zero()); match B { Option::Some(B) => { let mut i = 0; while i != sN { let mut j = 0; while j != sM { let b_j = *B.at(j); let mut k = 0; while k != h_out { let mut l = 0; while l != w_out { res .set( i * *res_strides.at(0) + j * *res_strides.at(1) + k * *res_strides.at(2) + l, b_j ); l += 1; }; k += 1; }; j += 1; }; i += 1; }; }, Option::None => {}, } let mut n = 0; while n != sN { let mut nw = 0; while nw != sM { let mut c = 0; while c != sC { let w = SpanTrait::slice( (*W).data, nw * (sC * kh * kw) + c * (kh * kw), kh * kw ); let mut io = bh; while io < eh.into() { let hr = (io - bh) / sth.into(); if hr < h_out.into() { let i = io + (kh % 2).into(); let ih1 = I32Number::max(0, i + oh).into(); let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into(); let mut jo = bw; while jo < ew.into() { let wr = (jo - bw) / stw.into(); if wr < w_out.into() { let j = jo + (kw % 2).into(); let iw1 = I32Number::max(0, j + ow).into(); let iw2 = I32Number::min(j + ow + kw.into(), sW.into()).into(); let mut img: Array<T> = array![]; let mut ihi = ih1; while ihi != ih2 { img .append_span( SpanTrait::slice( (*X).data, n * (sC * sH * sW) + c * (sH * sW) + ihi * sW + iw1, iw2 - iw1 ) ); ihi += 1; }; let img = img.span(); let s = if w.len() != img.len() { let jh1 = I32Number::max(0, -i - oh).into(); let jh2 = I32Number::min(sH.into() - (i + oh), kh.into()) .into(); let jw1 = I32Number::max(0, -j - ow).into(); let jw2 = I32Number::min(sW.into() - (j + ow), kw.into()) .into(); let mut w_: Array<T> = array![]; let mut jhj = jh1; while jhj != jh2 { w_ .append_span( SpanTrait::slice(w, jhj * kw + jw1, jw2 - jw1) ); jhj += 1; }; let w_ = w_.span(); assert(w_.len() == img.len(), 'unexpected w and img len'); dot(img, w_) } else { dot(img, w) }; let hr = if hr < 0 { h_out - hr.into() } else { hr.into() }; let wr = if wr < 0 { w_out - wr.into() } else { wr.into() }; res .set( n * *res_strides.at(0) + nw * *res_strides.at(1) + hr * *res_strides.at(2) + wr, res .at( n * *res_strides.at(0) + nw * *res_strides.at(1) + hr * *res_strides.at(2) + wr ) + s ); } jo += stw.into(); }; } io += sth.into(); }; c += 1; }; nw += 1; }; n += 1; }; let mut res_data: Array<T> = array![]; let mut i = 0; while i != res.len() { res_data.append(res.at(i)); i += 1; }; return TensorTrait::new(res_shape, res_data.span()); } if (*X).shape.len() == 5 { let sN = *(*X).shape.at(0); let sC = *(*X).shape.at(1); let sH = *(*X).shape.at(2); let sW = *(*X).shape.at(3); let sZ = *(*X).shape.at(4); let sM = *(*W).shape.at(0); let kh = *kernel_shape.at(0); let kw = *kernel_shape.at(1); let kz = *kernel_shape.at(2); let sth = *strides.at(0); let stw = *strides.at(1); let stz = *strides.at(2); let h_out = ((sH - kh + *pads.at(0) + *pads.at(3)) / sth) + 1; let w_out = ((sW - kw + *pads.at(1) + *pads.at(4)) / stw) + 1; let z_out = ((sZ - kz + *pads.at(2) + *pads.at(5)) / stz) + 1; let h0 = *pads.at(0); let w0 = *pads.at(1); let z0 = *pads.at(2); let oh: i32 = -1 * (kh % 2).into(); let ow: i32 = -1 * (kw % 2).into(); let oz: i32 = -1 * (kz % 2).into(); let bh: i32 = -h0.into(); let bw: i32 = -w0.into(); let bz: i32 = -z0.into(); let eh = h_out * sth; let ew = w_out * stw; let ez = z_out * stz; let mut res = NullableVecImpl::new(); let res_shape = array![sN, sM, h_out, w_out, z_out].span(); let res_strides = stride(res_shape); res.set(sN * *res_strides.at(0) - 1, NumberTrait::zero()); match B { Option::Some(B) => { let mut i = 0; while i != sN { let mut j = 0; while j != sM { let b_j = *B.at(j); let mut k = 0; while k != h_out { let mut l = 0; while l != w_out { let mut m = 0; while m != z_out { res .set( i * *res_strides.at(0) + j * *res_strides.at(1) + k * *res_strides.at(2) + l * *res_strides.at(3) + m, b_j ); m += 1; }; l += 1; }; k += 1; }; j += 1; }; i += 1; }; }, Option::None => {}, } let mut n = 0; while n != sN { let mut nw = 0; while nw != sM { let mut c = 0; while c != sC { let w = SpanTrait::slice( (*W).data, nw * (sC * kh * kw * kz) + c * (kh * kw * kz), kh * kw * kz ); let mut io = bh; while io < eh.into() { let hr = (io - bh) / sth.into(); if hr < h_out.into() { let i = io + (kh % 2).into(); let ih1 = I32Number::max(0, i + oh).into(); let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into(); let mut jo = bw; while jo < ew.into() { let wr = (jo - bw) / stw.into(); if wr < w_out.into() { let j = jo + (kw % 2).into(); let iw1 = I32Number::max(0, j + ow).into(); let iw2 = I32Number::min(j + ow + kw.into(), sW.into()).into(); let mut zo = bz; while zo < ez.into() { let zr = (zo - bz) / stz.into(); if zr < z_out.into() { let z = zo + (kz % 2).into(); let iz1 = I32Number::max(0, z + oz).into(); let iz2 = I32Number::min(z + oz + kz.into(), sW.into()) .into(); let mut img: Array<T> = array![]; let mut ihi = ih1; while ihi != ih2 { let mut iwi = iw1; while iwi != iw2 { img .append_span( SpanTrait::slice( (*X).data, n * (sC * sH * sW * sZ) + c * (sH * sW * sZ) + ihi * (sW * sZ) + iwi * sZ + iz1, iz2 - iz1 ) ); iwi += 1; }; ihi += 1; }; let img = img.span(); let s = if w.len() != img.len() { let jh1 = I32Number::max(0, -i - oh).into(); let jh2 = I32Number::min( sH.into() - (i + oh), kh.into() ) .into(); let jw1 = I32Number::max(0, -j - ow).into(); let jw2 = I32Number::min( sW.into() - (j + ow), kw.into() ) .into(); let jz1 = I32Number::max(0, -z - oz).into(); let jz2 = I32Number::min( sZ.into() - (z + oz), kz.into() ) .into(); let mut w_: Array<T> = array![]; let mut jhj = jh1; while jhj != jh2 { let mut jwj = jw1; while jwj != jw2 { w_ .append_span( SpanTrait::slice( w, jhj * kw * kz + jwj * kz + jz1, jz2 - jz1 ) ); jwj += 1; }; jhj += 1; }; let w_ = w_.span(); assert( w_.len() == img.len(), 'unexpected w and img len' ); dot(img, w_) } else { dot(img, w) }; let hr = if hr < 0 { h_out - hr.into() } else { hr.into() }; let wr = if wr < 0 { w_out - wr.into() } else { wr.into() }; let zr = if zr < 0 { z_out - zr.into() } else { zr.into() }; res .set( n * *res_strides.at(0) + nw * *res_strides.at(1) + hr * *res_strides.at(2) + wr * *res_strides.at(3) + zr, res .at( n * *res_strides.at(0) + nw * *res_strides.at(1) + hr * *res_strides.at(2) + wr * *res_strides.at(3) + zr ) + s ); } zo += stz.into(); }; } jo += stw.into(); }; } io += sth.into(); }; c += 1; }; nw += 1; }; n += 1; }; let mut res_data: Array<T> = array![]; let mut i = 0; while i != res.len() { res_data.append(res.at(i)); i += 1; }; return TensorTrait::new(res_shape, res_data.span()); } // if (*X).shape.len() > 5 let sN = *(*X).shape.at(0); let sC = *(*X).shape.at(1); let sM = *(*W).shape.at(0); let w_stride = stride((*W).shape); let x_stride = stride((*X).shape); let mut shape_out: Array<usize> = array![]; let mut o_index: Array<i32> = array![]; let mut b_index: Array<i32> = array![]; let mut e_index: Array<usize> = array![]; let mut range_len: Array<usize> = array![]; let mut i = 0; while i != nd { shape_out .append( ((*(*X).shape.at(2 + i) - *kernel_shape.at(i) + *pads.at(i) + *pads.at(i + nd)) / *strides.at(i)) + 1 ); let k = *kernel_shape.at(i); o_index.append(-1 * (k % 2).into()); b_index.append(-(*pads.at(i)).into()); e_index.append(*shape_out.at(i) * *strides.at(i)); range_len.append((((*e_index.at(i)).into() - *b_index.at(i)).into()) / *strides.at(i)); i += 1; }; let o_index = o_index.span(); let b_index = b_index.span(); let shape_out = shape_out.span(); let range_len = range_len.span(); let range_stride = stride(range_len); let mut res_shape = array![sN, sM]; res_shape.append_span(shape_out); let res_shape = res_shape.span(); let res_strides = stride(res_shape); let mut res = NullableVecImpl::new(); res.set(sN * *res_strides.at(0) - 1, NumberTrait::zero()); match B { Option::Some(B) => { let mut i = 0; while i != sN { let mut j = 0; while j != sM { let b_j = *B.at(j); let mut k = 0; while k != *res_strides.at(1) { res.set(i * *res_strides.at(0) + j * *res_strides.at(1) + k, b_j); k += 1; }; j += 1; }; i += 1; }; }, Option::None => {}, } let mut n = 0; while n != sN { let mut nw = 0; while nw != sM { let mut c = 0; while c != sC { let w = SpanTrait::slice( (*W).data, nw * *w_stride.at(0) + c * *w_stride.at(1), *w_stride.at(1) ); let mut i = 0; while i != *range_len.at(0) * *range_stride.at(0) { let mut io_index: Array<i32> = array![]; let mut r_index: Array<i32> = array![]; let mut flatten_index = i; let mut nx = 0; while nx != nd { let (n_index, rem) = DivRem::div_rem( flatten_index, (*range_stride.at(nx)).try_into().unwrap() ); flatten_index = rem; io_index .append(n_index.into() * (*strides.at(nx)).into() + *b_index.at(nx)); r_index.append(n_index.into()); nx += 1; }; if r_index_check(r_index.span(), shape_out) { let mut indices: Array<i32> = array![]; let mut i1_index: Array<usize> = array![]; let mut i2_index: Array<usize> = array![]; let mut idiff_index: Array<usize> = array![]; let mut nx = 0; while nx != nd { indices.append(*io_index.at(nx) + (*kernel_shape.at(nx) % 2).into()); i1_index .append( I32Number::max(0, *indices.at(nx) + *o_index.at(nx)).into() ); i2_index .append( I32Number::min( (*(*X).shape.at(nx + 2)).into(), *indices.at(nx) + *o_index.at(nx) + (*kernel_shape.at(nx)).into() ) .into() ); if nx != nd - 1 { idiff_index.append(*i2_index.at(nx) - *i1_index.at(nx)); } nx += 1; }; let i1_index = i1_index.span(); let mut img: Array<T> = array![]; let img = if nx == 1 { let img = SpanTrait::slice( (*X).data, n * sN + c * sC + *i1_index.at(nd - 1), *i2_index.at(nd - 1) - *i1_index.at(nd - 1) ); img } else { let i_stride = stride(idiff_index.span()); let mut ii = 0; while ii != *i_stride.at(0) * *idiff_index.at(0) { let mut flatten_index = ii; let mut start = n * *x_stride.at(0) + c * *x_stride.at(1); let mut nx = 0; while nx != nd - 1 { let (ii_index, rem) = DivRem::div_rem( flatten_index, (*i_stride.at(nx)).try_into().unwrap() ); flatten_index = rem; start += (*i1_index.at(nx) + ii_index) * *x_stride.at(2 + nx); nx += 1; }; img .append_span( SpanTrait::slice( (*X).data, start + *i1_index.at(nd - 1), *i2_index.at(nd - 1) - *i1_index.at(nd - 1) ) ); ii += 1; }; img.span() }; let s = if w.len() != img.len() { let mut j1_index: Array<usize> = array![]; let mut j2_index: Array<usize> = array![]; let mut jdiff_index: Array<usize> = array![]; let mut nx = 0; while nx != nd { j1_index .append( I32Number::max(0, -*indices.at(nx) - *o_index.at(nx)).into() ); j2_index .append( I32Number::min( (*(*X).shape.at(nx + 2)).into() - *indices.at(nx) - *o_index.at(nx), (*kernel_shape.at(nx)).into() ) .into() ); if nx != nd - 1 { jdiff_index.append(*j2_index.at(nx) - *j1_index.at(nx)); } nx += 1; }; let j1_index = j1_index.span(); let mut w_: Array<T> = array![]; let w_ = if nx == 1 { let w_ = SpanTrait::slice( w, *j1_index.at(nd - 1), *j2_index.at(nd - 1) - *j1_index.at(nd - 1) ); w_ } else { let j_stride = stride(jdiff_index.span()); let mut jj = 0; while jj != *j_stride.at(0) * *jdiff_index.at(0) { let mut flatten_index = jj; let mut start = 0; let mut nx = 0; while nx != nd - 1 { let (jj_index, rem) = DivRem::div_rem( flatten_index, (*j_stride.at(nx)).try_into().unwrap() ); flatten_index = rem; start += (*j1_index.at(nx) + jj_index) * *kernel_shape.at(nx); nx += 1; }; w_ .append_span( SpanTrait::slice( w, start + *j1_index.at(nd - 1), *j2_index.at(nd - 1) - *j1_index.at(nd - 1) ) ); jj += 1; }; w_.span() }; dot(img, w_) } else { dot(img, w) }; let mut res_index = n * *res_strides.at(0) + nw * *res_strides.at(1); let mut nx = 0; while nx != nd { res_index += (*r_index.at(nx)).into() * *res_strides.at(2 + nx); nx += 1; }; res.set(res_index, res.at(res_index) + s); }; i += 1 }; c += 1; }; nw += 1; }; n += 1; }; let mut res_data: Array<T> = array![]; let mut i = 0; while i != res.len() { res_data.append(res.at(i)); i += 1; }; TensorTrait::new(res_shape, res_data.span()) } fn r_index_check(r_index: Span<i32>, shape_out: Span<usize>) -> bool { let mut i = 0; let flag = loop { if i == r_index.len() { break true; } if *r_index.at(i) >= (*shape_out.at(i)).into() { break false; } i += 1; }; flag } fn prod<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>,>( pA: Span<T>, start: usize ) -> T { let mut i = start; let mut prod = NumberTrait::one(); while i != pA.len() { prod = prod * (*pA.at(i)); i += 1; }; prod } fn min(mut a: Span<usize>) -> usize { assert(a.len() > 0, 'span cannot be empty'); let mut min = *a.at(0); loop { match a.pop_front() { Option::Some(v) => { if *v < min { min = *v; }; }, Option::None => { break min; } }; } } fn max(mut a: Span<usize>) -> usize { assert(a.len() > 0, 'span cannot be empty'); let mut max = *a.at(0); loop { match a.pop_front() { Option::Some(v) => { if *v > max { max = *v; }; }, Option::None => { break max; } }; } } fn arange(start: usize, end: usize, step: usize) -> Span<usize> { assert((end - start) % step == 0, 'incompatible step value'); let mut arr: Array<usize> = array![]; let mut i = start; while i < end { arr.append(i); i += step; }; arr.span() } fn cartesian(mut arrays: Span<Span<usize>>,) -> Span<Span<usize>> { let mut n = 1; let mut i = arrays.len() - 1; loop { n = n * (*(arrays.at(i))).len(); if i == 0 { break; } i -= 1; }; let mut i = 0; let mut size_arrays: Array<usize> = array![]; while i != arrays.len() { size_arrays.append((*(arrays.at(i))).len()); i += 1; }; let size_arrays = size_arrays.span(); let mut output_arrays = array![]; let mut m = n; let mut i = 0; while i != arrays.len() { m = m / (*(arrays.at(i))).len(); let mut out = repeat(*(arrays.at(i)), m); out = repeat_2(out, size_arrays, i); output_arrays.append(out); i += 1; }; let output_arrays = output_arrays.span(); let mut i = 0; let mut ret = ArrayTrait::new(); while i != n { let mut j = 0; let mut x: Array<usize> = array![]; while j != arrays.len() { x.append(*(output_arrays.at(j)).at(i)); j += 1; }; ret.append(x.span()); i += 1; }; ret.span() } fn repeat_2(mut array: Array<usize>, size_array: Span<usize>, index: usize) -> Array<usize> { let mut size = array.len(); let mut i = 0; while i != index { let mut j = 1; while j != *size_array.at(index - 1 - i) { let mut k = 0; while k != size { array.append(*array.at(k)); k += 1; }; j += 1; }; size = size * *size_array.at(index - 1 - i); i += 1; }; array } fn repeat(array: Span<usize>, m: usize,) -> Array<usize> { let mut out: Array<usize> = array![]; let mut j = 0; while j != array.len() { let mut k = 0; while k != m { out.append(*array.at(j)); k += 1; }; j += 1; }; out } fn dot< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>, >( a: Span<T>, b: Span<T> ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); while i != a.len() { sum = sum + *a.at(i) * *b.at(i); i += 1; }; sum }
https://github.com/gizatechxyz/orion
src/operators/nn/functional/conv_transpose.cairo
use orion::numbers::NumberTrait; use orion::operators::tensor::core::{stride}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; #[derive(Copy, Drop)] enum AUTO_PAD { NOTSET, SAME_UPPER, SAME_LOWER, VALID } fn conv_transpose< T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>, +Add<T>, +Mul<T>, >( X: @Tensor<T>, W: @Tensor<T>, B: Option<@Tensor<T>>, auto_pad: Option<AUTO_PAD>, dilations: Option<Span<usize>>, group: Option<usize>, kernel_shape: Option<Span<usize>>, output_padding: Option<Span<usize>>, output_shape: Option<Span<usize>>, pads: Option<Span<usize>>, strides: Option<Span<usize>>, ) -> Tensor<T> { let auto_pad = match auto_pad { Option::Some(auto_pad) => auto_pad, Option::None => { AUTO_PAD::NOTSET }, }; let dilations = match dilations { Option::Some(dilations) => dilations, Option::None => { let mut dilations: Array<usize> = array![]; let mut i = 2; while i != (*X).shape.len() { dilations.append(1); i += 1; }; dilations.span() }, }; let kernel_shape = match kernel_shape { Option::Some(kernel_shape) => kernel_shape, Option::None => { let mut kernel_shape: Array<usize> = array![]; let mut i = 2; while i != (*W).shape.len() { kernel_shape.append(*(*W).shape.at(i)); i += 1; }; kernel_shape.span() }, }; let output_padding = match output_padding { Option::Some(output_padding) => output_padding, Option::None => { let mut output_padding: Array<usize> = array![]; let mut i = 2; while i != (*X) .shape .len() { output_padding.append(0); output_padding.append(0); i += 1; }; output_padding.span() }, }; let strides = match strides { Option::Some(strides) => strides, Option::None => { let mut strides: Array<usize> = array![]; let mut i = 2; while i != (*X).shape.len() { strides.append(1); i += 1; }; strides.span() }, }; let (pads, _, output_shape) = match pads { Option::Some(pads) => { let n_dims = (*X).shape.len() - 2; let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { let mut output_shape: Array<usize> = array![]; let mut i = 0; while i != n_dims { output_shape .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + *output_padding.at(i) + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) - (*pads.at(i) + *pads.at(i + n_dims)) ); i += 1; }; output_shape.span() }, }; (pads, n_dims, output_shape) }, Option::None => { let (pads, n_dims, output_shape) = match auto_pad { AUTO_PAD::NOTSET => { let mut pads: Array<usize> = array![]; let mut i = 0; while i != strides.len() * 2 { pads.append(0); i += 1; }; let pads = pads.span(); let n_dims = (*X).shape.len() - 2; let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { let mut output_shape: Array<usize> = array![]; let mut i = 0; while i != n_dims { output_shape .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + *output_padding.at(i) + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) - (*pads.at(i) + *pads.at(i + n_dims)) ); i += 1; }; output_shape.span() }, }; (pads, n_dims, output_shape) }, AUTO_PAD::SAME_UPPER => { let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { let mut output_shape: Array<usize> = array![]; let mut i = 0; while i != strides .len() { output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); i += 1; }; output_shape.span() }, }; let mut total_padding: Array<usize> = array![]; let mut i = 0; while i != output_shape .len() { total_padding .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + *output_padding.at(i) + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) - *output_shape.at(i) ); i += 1; }; let total_padding = total_padding.span(); let mut pads: Array<usize> = array![]; let mut i = 0; while i != output_shape.len() { pads.append(*total_padding.at(i) / 2); i += 1; }; let mut i = 0; while i != output_shape .len() { pads.append(*total_padding.at(i) - (*total_padding.at(i) / 2)); i += 1; }; (pads.span(), pads.len() / 2, output_shape) }, AUTO_PAD::SAME_LOWER => { let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { let mut output_shape: Array<usize> = array![]; let mut i = 0; while i != strides .len() { output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); i += 1; }; output_shape.span() }, }; let mut total_padding: Array<usize> = array![]; let mut i = 0; while i != output_shape .len() { total_padding .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + *output_padding.at(i) + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) - *output_shape.at(i) ); i += 1; }; let total_padding = total_padding.span(); let mut pads: Array<usize> = array![]; let mut i = 0; while i != output_shape .len() { pads.append(*total_padding.at(i) - *total_padding.at(i) / 2); i += 1; }; let mut i = 0; while i != output_shape.len() { pads.append(*total_padding.at(i) / 2); i += 1; }; (pads.span(), pads.len() / 2, output_shape) }, AUTO_PAD::VALID => { let mut pads: Array<usize> = array![]; let mut i = 0; while i != strides.len() * 2 { pads.append(0); i += 1; }; let pads = pads.span(); let n_dims = (*X).shape.len() - 2; let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { let mut output_shape: Array<usize> = array![]; let mut i = 0; while i != n_dims { output_shape .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + *output_padding.at(i) + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) - (*pads.at(i) + *pads.at(i + n_dims)) ); i += 1; }; output_shape.span() }, }; (pads, n_dims, output_shape) }, }; (pads, n_dims, output_shape) }, }; let group = match group { Option::Some(group) => group, Option::None => { 1 }, }; let mut kernel_shape: Array<usize> = array![]; let mut i = 2; while i != (*W).shape.len() { kernel_shape.append(*(*W).shape.at(i)); i += 1; }; let kernel_shape = kernel_shape.span(); let kernel_size = prod(kernel_shape, 0); let mut num_output_channels = *(*W).shape.at(1) * group; let mut kernel_dim = (num_output_channels / group) * kernel_size; let C = *(*X).shape.at(1); let m = kernel_dim; let n = prod((*X).shape, 2); let k = C / group; let mut final: Array<T> = array![]; if group == 1 { let mut image_id = 0; while image_id != *(*X) .shape .at(0) { let w_t = TensorTrait::new(array![k, m].span(), (*W).data) .transpose(array![1, 0].span()); let image = SpanTrait::slice((*X).data, image_id * k * n, k * n); let gemm = w_t.matmul(@TensorTrait::new(array![k, n].span(), image)); let gemmc = gemm .reshape( array![ num_output_channels.try_into().unwrap(), (m / num_output_channels).try_into().unwrap(), n.try_into().unwrap() ] .span(), false ); let mut c = 0; while c != num_output_channels { let gemmc_c = TensorTrait::new( array![m / num_output_channels, n].span(), SpanTrait::slice( gemmc.data, (m / num_output_channels) * n * c, (m / num_output_channels) * n ) ); let mut res = col2im_naive_implementation( @gemmc_c, output_shape, kernel_shape, dilations, pads, strides ); match B { Option::Some(B) => { let mut i = 0; while i != res .len() { res.set(i, res.at(i) + *(*B).data.at(c)); i += 1; }; }, Option::None => {}, } c += 1; let mut i = 0; while i != res.len() { final.append(res.at(i)); i += 1; }; }; image_id += 1; }; } else { let mut output_array: Array<Span<T>> = array![]; let mut i = 0; let mut output_size = 1; while i != output_shape.len() { output_size *= *output_shape.at(i); i += 1; }; // Computation of conv transposition per group let mut group_id = 0; while group_id != group { let mut group_X: Array<T> = array![]; let mut group_W: Array<T> = array![]; let mut image_id = 0; while image_id != *(*X) .shape .at(0) { let start = image_id * n * C + (group_id * C / group) * n; let end = image_id * n * C + ((group_id + 1) * C / group) * n; let mut i = start; while i != end { group_X.append(*(*X).data.at(i)); i += 1; }; image_id += 1; }; let start = (group_id * C / group) * *(*W).shape.at(1) * kernel_size; let end = (group_id + 1) * C / group * *(*W).shape.at(1) * kernel_size; let mut i = start; while i != end { group_W.append(*(*W).data.at(i)); i += 1; }; let mut shape_X: Array<usize> = array![]; shape_X.append(*(*X).shape.at(0)); shape_X.append(C / group); let mut i = 2; while i != (*X).shape.len() { shape_X.append(*(*X).shape.at(i)); i += 1; }; let mut shape_W: Array<usize> = array![]; shape_W.append(C / group); let mut i = 1; while i != (*W).shape.len() { shape_W.append(*(*W).shape.at(i)); i += 1; }; // group_X : N x (C / group) x X.shape[2:] let group_X = TensorTrait::new(shape_X.span(), group_X.span()); // group_W : (C / group) x *(*W).shape.at(1) x W.shape[2:] let group_W = TensorTrait::new(shape_W.span(), group_W.span()); // group output : N x (num_output_channels / group) x output_shape let group_output = conv_transpose( @group_X, @group_W, B, Option::Some(auto_pad), Option::Some(dilations), Option::Some(1), Option::Some(kernel_shape), Option::Some(output_padding), Option::Some(output_shape), Option::Some(pads), Option::Some(strides) ); output_array.append(group_output.data); group_id += 1; }; let output_array = output_array.span(); // Sorting result per item of the batch // output size : N (batch size) x num_output_channels x output_shape let mut image_id = 0; while image_id != *(*X) .shape .at(0) { let mut group_id = 0; while group_id != group { let group_output = *output_array.at(group_id); let mut i = image_id * output_size * (num_output_channels / group); while i != (image_id + 1) * output_size * (num_output_channels / group) { final.append(*group_output.at(i)); i += 1; }; group_id += 1; }; image_id += 1; }; } let mut shape = array![*(*X).shape.at(0), num_output_channels]; let mut i = 0; while i != output_shape.len() { shape.append(*output_shape.at(i)); i += 1; }; TensorTrait::new(shape.span(), final.span()) } fn get_image<T, +Drop<T>, +Copy<T>>(self: @Tensor<T>, row: usize) -> Span<T> { assert((*self).shape.len() == 2, 'Expected a 2D tensor'); let row_length = *self.shape[1]; let start = row * row_length; (*self).data.slice(start, row_length) } fn col2im_naive_implementation< T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>, +Add<T>, >( data: @Tensor<T>, image_shape: Span<usize>, kernel_shape: Span<usize>, dilations: Span<usize>, pads: Span<usize>, strides: Span<usize>, ) -> NullableVec<T> { let n_dims = pads.len() / 2; col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides); let mut dim_col: Array<usize> = array![]; let mut i = 0; while i != n_dims { dim_col .append( (*image_shape.at(i) + (*pads.at(i) + *pads.at(i + n_dims)) - (*dilations.at(i) * (*kernel_shape.at(i) - 1) + 1)) / *strides.at(i) + 1 ); i += 1; }; let dim_col = dim_col.span(); let stride_img = stride(image_shape); let mut data_im = NullableVecImpl::new(); data_im.set(*image_shape.at(0) * *stride_img.at(0) - 1, NumberTrait::zero()); let kernel_size = prod(kernel_shape, 0); let col_size = prod(dim_col, 0); let mut c_col = 0; while c_col != kernel_size { let offset = get_indices(c_col, kernel_shape).span(); let mut col = 0; while col != col_size { let ind_col = get_indices(col, dim_col).span(); let mut ind_im: Array<usize> = array![]; let mut i = 0; while i != n_dims { if (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)) < *pads .at(i) { let neg_index = *pads.at(i) - (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)); ind_im.append(*image_shape.at(i) + neg_index); } else { ind_im .append( *ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i) - *pads.at(i) ); } i += 1; }; let ind_im = ind_im.span(); if !is_out(ind_im, image_shape) { let mut index = 0; let mut i = 0; while i != image_shape.len() { index += *stride_img.at(i) * *ind_im.at(i); i += 1; }; data_im.set(index, data_im.at(index) + *(*data).data.at(c_col * col_size + col)); } col += 1; }; c_col += 1; }; data_im } fn col2im_shape_check<T, +TensorTrait<T>, +Copy<T>, +Drop<T>,>( X: @Tensor<T>, output_shape: Span<usize>, kernel_shape: Span<usize>, dilations: Span<usize>, pads: Span<usize>, strides: Span<usize>, ) { let n_input_plane = *(*X).shape.at(0); let kernel_size = prod(kernel_shape, 0); assert(n_input_plane % kernel_size == 0, 'wrong input dimension'); let input_length = *(*X).shape.at(1); let n_dims = output_shape.len(); let mut n_blocks: Array<usize> = array![]; let mut i = 0; while i != n_dims { n_blocks .append( (*output_shape.at(i) + (*pads.at(i) + *pads.at(i + n_dims)) - *dilations.at(i) * (*kernel_shape.at(i) - 1) - 1) / *strides.at(i) + 1 ); i += 1; }; let block_size = prod(n_blocks.span(), 0); assert(input_length == block_size, 'input_length != block_size'); } fn get_indices(index: usize, shape: Span<usize>,) -> Array<usize> { let mut i = index; let mut res: Array<usize> = array![]; let mut k = shape.len() - 1; while k != 0 { let m = i % *shape.at(k); res.append(m); i -= m; i /= *shape.at(k); k -= 1; }; let mut new_res: Array<usize> = array![]; new_res.append(i); let mut i = shape.len() - 1; while i != 0 { new_res.append(*res.at(i - 1)); i -= 1; }; new_res } fn is_out(ind: Span<usize>, shape: Span<usize>,) -> bool { let mut n = 0; let is_out = loop { if n == ind.len() { break false; } let s = *shape.at(n); let i = *ind.at(n); if i < 0 { break true; } if i >= s { break true; } n += 1; }; is_out } fn prod<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>,>( pA: Span<T>, start: usize ) -> T { let mut i = start; let mut prod = NumberTrait::one(); while i != pA.len() { prod = prod * (*pA.at(i)); i += 1; }; prod }
https://github.com/gizatechxyz/orion
src/operators/nn/functional/depth_to_space.cairo
use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; /// Cf: NNTrait::depth_to_space docstring fn depth_to_space< T, impl TTensor: TensorTrait<T>, impl TAdd: Add<T>, impl TMul: Mul<T>, impl TTensorAdd: Add<Tensor<T>>, impl TPartialOrd: PartialOrd<T>, impl TAddEq: AddEq<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>, >( tensor: Tensor<T>, blocksize: usize, mode: felt252 ) -> Tensor<T> { assert((tensor.shape).len() == 4, 'Unexpected shape 4.'); let blocksize_i32: i32 = blocksize.try_into().unwrap(); let b: i32 = (*(tensor.shape).at(0)).try_into().unwrap(); let C: u32 = (*(tensor.shape).at(1)).try_into().unwrap(); let H: i32 = (*(tensor.shape).at(2)).try_into().unwrap(); let W: i32 = (*(tensor.shape).at(3)).try_into().unwrap(); let finalshape: Array<i32> = array![ b, (C / (blocksize * blocksize)).try_into().unwrap(), (H * blocksize_i32), (W * blocksize_i32) ]; if mode == 'DCR' { let tmpshape: Array<i32> = array![ b, blocksize_i32, blocksize_i32, (C / (blocksize * blocksize)).try_into().unwrap(), H, W ]; let reshaped = (tensor).reshape(target_shape: tmpshape.span(), allowzero: false); let transposed = reshaped.transpose(axes: array![0, 3, 4, 1, 5, 2].span()); transposed.reshape(target_shape: finalshape.span(), allowzero: false) } else { // assert mode == "CRD" let tmpshape: Array<i32> = array![ b, (C / (blocksize * blocksize)).try_into().unwrap(), blocksize_i32, blocksize_i32, H, W ]; let reshaped = (tensor).reshape(target_shape: tmpshape.span(), allowzero: false); let transposed = reshaped.transpose(axes: array![0, 1, 4, 2, 5, 3].span()); transposed.reshape(target_shape: finalshape.span(), allowzero: false) } }
https://github.com/gizatechxyz/orion
src/operators/nn/functional/gemm.cairo
use core::array::SpanTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::{core::{Tensor, TensorTrait}, math::arithmetic::mul_by_scalar}; /// Cf: NNTrait::gemm docstring fn gemm< T, MAG, impl TTensor: TensorTrait<T>, impl TAddTensor: Add<Tensor<T>>, impl TNumberTrait: NumberTrait<T, MAG>, impl TPartialEq: PartialEq<T>, impl TMul: Mul<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T> >( A: Tensor<T>, B: Tensor<T>, C: Option<Tensor<T>>, alpha: Option<T>, beta: Option<T>, transA: bool, transB: bool ) -> Tensor<T> { let mut A = A; let mut B = B; let alpha: T = if alpha.is_some() { alpha.unwrap() } else { NumberTrait::one() }; let beta: T = if beta.is_some() { beta.unwrap() } else { NumberTrait::one() }; if transA { A = A.transpose(array![1, 0].span()); } if transB { B = B.transpose(array![1, 0].span()); } match C { Option::Some(c) => { mul_by_scalar(@A.matmul(@B), alpha) + mul_by_scalar(@c, beta) }, Option::None(_) => { mul_by_scalar(@A.matmul(@B), alpha) } } }
https://github.com/gizatechxyz/orion
src/operators/nn/functional/grid_sample.cairo
use core::debug::PrintTrait; use orion::numbers::FP16x16; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{stride}; use orion::operators::tensor::{FP16x16Tensor, TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; #[derive(Copy, Drop)] enum MODE { NEAREST, LINEAR, CUBIC, } #[derive(Copy, Drop)] enum PADDING_MODE { ZEROS, BORDER, REFLECTION, } fn grid_sample< T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>, +Add<T>, +Mul<T>, +Sub<T>, +Div<T>, +AddEq<T>, +PrintTrait<T>, +PartialOrd<T>, +PartialEq<T>, +TryInto<T, usize>, +Into<usize, MAG>, +Rem<T>, +Neg<T>, +SubEq<T>, >( X: @Tensor<T>, grid: @Tensor<T>, align_corner: Option<usize>, mode: Option<MODE>, padding_mode: Option<PADDING_MODE>, ) -> Tensor<T> { let align_corner = match align_corner { Option::Some(align_corner) => align_corner, Option::None => 0, }; let mode = match mode { Option::Some(mode) => mode, Option::None => MODE::LINEAR, }; let padding_mode = match padding_mode { Option::Some(padding_mode) => padding_mode, Option::None => PADDING_MODE::ZEROS, }; let x_dims = (*X).shape; let x_stride = stride((*X).shape); let grid_dims = (*grid).shape; let grid_stride = stride((*grid).shape); let N = *x_dims.at(0); let C = *x_dims.at(1); let num_dims = x_dims.len() - 2; let dims = SpanTrait::slice(x_dims, 2, num_dims); let border = prepare_border(X, dims, align_corner); let mut y_dims: Array<usize> = array![N, C]; y_dims.append_span(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2)); let y_dims = y_dims.span(); if prod(y_dims, 0) == 0 { return TensorTrait::new(array![].span(), array![].span()); } let mut Y: Array<T> = array![]; let mut n = 0; while n != N { let grid_data = SpanTrait::slice((*grid).data, n * *grid_stride.at(0), *grid_stride.at(0)); let grid_data_stride = SpanTrait::slice(grid_stride, 1, grid_stride.len() - 1); let mut c = 0; while c != C { let X_data = SpanTrait::slice( (*X).data, n * *x_stride.at(0) + c * *x_stride.at(1), *x_stride.at(1) ); let X_data_stride = SpanTrait::slice(x_stride, 2, grid_stride.len() - 2); let all_coords = get_all_coords(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2)); let mut ix = 0; while ix != all_coords.len() { let ox = *all_coords.at(ix); let nx = get_sub(grid_data, grid_data_stride, ox); let nx = reverse(nx); let x = gs_denormalize_coordinates(nx, dims, align_corner); let x = match mode { MODE::NEAREST => { rint(x) }, MODE::LINEAR => { x }, MODE::CUBIC => { x }, }; let mut new_x: Array<T> = array![]; let mut i = 0; while i != x.len() { let v = *x.at(i); let mut x_min = *border.at(i); let mut x_max = *border.at(i + num_dims); let new_v = if v < x_min || v > x_max { let v = match padding_mode { PADDING_MODE::ZEROS => { v }, PADDING_MODE::BORDER => { clamp( v, NumberTrait::zero(), NumberTrait::new_unscaled((*dims.at(i)).into(), false) - NumberTrait::one() ) }, PADDING_MODE::REFLECTION => { gs_reflect(v, x_min, x_max) }, }; v } else { v }; new_x.append(new_v); i += 1; }; let x = new_x.span(); let y = match mode { MODE::NEAREST => { pixel_at_ndarray(X_data, dims, X_data_stride, x, border, padding_mode) }, MODE::LINEAR => { gs_linear_interpolation_nd_with_x( X_data, dims, X_data_stride, x, border, padding_mode ) }, MODE::CUBIC => { gs_cubic_interpolation_nd_with_x( X_data, dims, X_data_stride, x, border, padding_mode ) }, }; Y.append(y); ix += 1; }; c += 1; }; n += 1; }; TensorTrait::new(y_dims, Y.span()) } fn gs_cubic_interpolation_1d_with_x< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>, +Add<T>, +Div<T>, +Sub<T>, +AddEq<T>, +TryInto<T, usize>, +Into<usize, MAG>, +PartialOrd<T>, +PartialEq<T>, +Rem<T>, +PrintTrait<T>, >( data: Span<T>, x: T, border: Span<T>, padding_mode: PADDING_MODE ) -> T { let x_0 = NumberTrait::floor(x); let x_1 = x_0 + NumberTrait::one(); let x_2 = x_1 + NumberTrait::one(); let x_minus_1 = x_0 - NumberTrait::one(); let coeffs = gs_get_cubic_coeffs(x - x_0); let v_0 = pixel_at_array(data, x_minus_1.try_into().unwrap(), border, padding_mode); let v_1 = pixel_at_array(data, x_0.try_into().unwrap(), border, padding_mode); let v_2 = pixel_at_array(data, x_1.try_into().unwrap(), border, padding_mode); let v_3 = pixel_at_array(data, x_2.try_into().unwrap(), border, padding_mode); let v: Span<T> = array![v_0, v_1, v_2, v_3].span(); dot(coeffs, v) } fn gs_get_cubic_coeffs< T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T>, +Copy<T>, +Drop<T>, +AddEq<T>, +Add<T>, +Div<T>, +Mul<T>, +Sub<T>, >( x: T ) -> Span<T> { let one = NumberTrait::one(); let two = one + NumberTrait::one(); let three = two + NumberTrait::one(); let four = three + NumberTrait::one(); let five = four + NumberTrait::one(); let eigth = four + four; let A = NumberTrait::neg(three / four); let x = NumberTrait::abs(x); let mut coeffs: Array<T> = array![]; coeffs.append(((A * (x + one) - five * A) * (x + one) + eigth * A) * (x + one) - four * A); coeffs.append(((A + two) * x - (A + three)) * x * x + one); coeffs.append(((A + two) * (one - x) - (A + three)) * (one - x) * (one - x) + one); coeffs .append( ((A * ((one - x) + one) - five * A) * ((one - x) + one) + eigth * A) * ((one - x) + one) - four * A ); coeffs.span() } fn gs_cubic_interpolation_nd_with_x< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>, +Add<T>, +Div<T>, +Sub<T>, +AddEq<T>, +TryInto<T, usize>, +Into<usize, MAG>, +PartialOrd<T>, +PartialEq<T>, +Rem<T>, +PrintTrait<T>, >( data: Span<T>, data_dims: Span<usize>, data_stride: Span<usize>, x: Span<T>, border: Span<T>, padding_mode: PADDING_MODE ) -> T { let num_dims = data_dims.len(); assert(num_dims == x.len(), 'pixel at nd array: wrong dim'); assert(num_dims == (border.len() / 2), 'pixel at nd array: wrong dim'); if num_dims == 1 { let a = gs_cubic_interpolation_1d_with_x(data, *x.at(0), border, padding_mode); return a; } let mut res1d: Array<T> = array![]; let mut i = 0; while i != *data_dims.at(0) { let sub_data = SpanTrait::slice(data, i * *data_stride.at(0), *data_stride.at(0)); let sub_x = SpanTrait::slice(x, 1, x.len() - 1); let data_dims_sub = SpanTrait::slice(data_dims, 1, data_dims.len() - 1); let data_stride_sub = SpanTrait::slice(data_stride, 1, data_stride.len() - 1); let border1 = SpanTrait::slice(border, 1, num_dims - 1); let border2 = SpanTrait::slice(border, num_dims + 1, num_dims - 1); let mut border = ArrayTrait::new(); border.append_span(border1); border.append_span(border2); let r = gs_cubic_interpolation_nd_with_x( sub_data, data_dims_sub, data_stride_sub, sub_x, border.span(), padding_mode ); res1d.append(r); i += 1; }; gs_cubic_interpolation_1d_with_x( res1d.span(), *x.at(0), array![*border.at(0), *border.at(num_dims)].span(), padding_mode ) } fn gs_get_linear_coeffs<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Sub<T>,>( x: T ) -> Span<T> { let x = NumberTrait::abs(x); array![NumberTrait::one() - x, x].span() } fn gs_linear_interpolation_1d_with_x< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>, +Add<T>, +Div<T>, +Sub<T>, +TryInto<T, usize>, +Into<usize, MAG>, +PartialOrd<T>, +PartialEq<T>, +Rem<T>, +PrintTrait<T> >( data: Span<T>, x: T, border: Span<T>, padding_mode: PADDING_MODE ) -> T { let x_0 = NumberTrait::floor(x); let x_1 = x_0 + NumberTrait::one(); let coeffs = gs_get_linear_coeffs(x - x_0); let v_0 = pixel_at_array(data, x_0.try_into().unwrap(), border, padding_mode); let v_1 = pixel_at_array(data, x_1.try_into().unwrap(), border, padding_mode); let v: Span<T> = array![v_0, v_1].span(); dot(coeffs, v) } fn dot<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +Mul<T>,>( a: Span<T>, b: Span<T> ) -> T { assert(a.len() == b.len(), 'dot: wrong len'); let mut i = 0; let mut sum = NumberTrait::zero(); while i != a.len() { sum = sum + *a.at(i) * *b.at(i); i += 1; }; sum } fn gs_linear_interpolation_nd_with_x< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>, +Add<T>, +Div<T>, +Sub<T>, +TryInto<T, usize>, +Into<usize, MAG>, +PartialOrd<T>, +PartialEq<T>, +Rem<T>, +PrintTrait<T> >( data: Span<T>, data_dims: Span<usize>, data_stride: Span<usize>, x: Span<T>, border: Span<T>, padding_mode: PADDING_MODE ) -> T { let num_dims = data_dims.len(); assert(num_dims == x.len(), 'pixel at nd array: wrong dim'); assert(num_dims == (border.len() / 2), 'pixel at nd array: wrong dim'); if num_dims == 1 { let a = gs_linear_interpolation_1d_with_x(data, *x.at(0), border, padding_mode); return a; } let mut res1d: Array<T> = array![]; let mut i = 0; while i != *data_dims.at(0) { let sub_data = SpanTrait::slice(data, i * *data_stride.at(0), *data_stride.at(0)); let sub_x = SpanTrait::slice(x, 1, x.len() - 1); let data_dims_sub = SpanTrait::slice(data_dims, 1, data_dims.len() - 1); let data_stride_sub = SpanTrait::slice(data_stride, 1, data_stride.len() - 1); let border1 = SpanTrait::slice(border, 1, num_dims - 1); let border2 = SpanTrait::slice(border, num_dims + 1, num_dims - 1); let mut border = ArrayTrait::new(); border.append_span(border1); border.append_span(border2); let r = gs_linear_interpolation_nd_with_x( sub_data, data_dims_sub, data_stride_sub, sub_x, border.span(), padding_mode ); res1d.append(r); i += 1; }; gs_linear_interpolation_1d_with_x( res1d.span(), *x.at(0), array![*border.at(0), *border.at(num_dims)].span(), padding_mode ) } fn pixel_at_ndarray< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>, +Add<T>, +Div<T>, +Sub<T>, +TryInto<T, usize>, +Into<usize, MAG>, +PartialOrd<T>, +PartialEq<T>, +Rem<T>, +PrintTrait<T>, >( ndarray: Span<T>, ndarray_dims: Span<usize>, ndarray_stride: Span<usize>, x: Span<T>, border: Span<T>, padding_mode: PADDING_MODE ) -> T { let num_dims = ndarray_dims.len(); assert(num_dims == x.len(), 'pixel at nd array: wrong dim'); assert(num_dims == (border.len() / 2), 'pixel at nd array: wrong dim'); let i = *x.at(0); if num_dims == 1 { return pixel_at_array(ndarray, *x.at(0), border, padding_mode); } let d = NumberTrait::new_unscaled((*ndarray_dims.at(0)).into(), false); let ndarray = match padding_mode { PADDING_MODE::ZEROS => { let ndarray = if i >= NumberTrait::zero() && i < d { SpanTrait::slice( ndarray, i.try_into().unwrap() * *ndarray_stride.at(0), *ndarray_stride.at(0) ) } else { let ndarray: Span<T> = zeros(*ndarray_stride.at(0)); ndarray }; ndarray }, PADDING_MODE::BORDER => { let i = clamp(i, NumberTrait::zero(), d - NumberTrait::one()); SpanTrait::slice( ndarray, i.try_into().unwrap() * *ndarray_stride.at(0), *ndarray_stride.at(0) ) }, PADDING_MODE::REFLECTION => { let i: usize = (gs_reflect(i, *border.at(0), *border.at(num_dims))).try_into().unwrap(); SpanTrait::slice(ndarray, i * *ndarray_stride.at(0), *ndarray_stride.at(0)) }, }; let x = SpanTrait::slice(x, 1, x.len() - 1); let ndarray_dims = SpanTrait::slice(ndarray_dims, 1, ndarray_dims.len() - 1); let ndarray_stride = SpanTrait::slice(ndarray_stride, 1, ndarray_stride.len() - 1); let border1 = SpanTrait::slice(border, 1, num_dims - 1); let border2 = SpanTrait::slice(border, num_dims + 1, num_dims - 1); let mut border = ArrayTrait::new(); border.append_span(border1); border.append_span(border2); pixel_at_ndarray(ndarray, ndarray_dims, ndarray_stride, x, border.span(), padding_mode) } fn pixel_at_array< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>, +Add<T>, +Div<T>, +Sub<T>, +TryInto<T, usize>, +Into<usize, MAG>, +PartialOrd<T>, +PartialEq<T>, +Rem<T>, +PrintTrait<T>, >( array: Span<T>, i: T, border: Span<T>, padding_mode: PADDING_MODE ) -> T { let d = NumberTrait::new_unscaled(array.len().into(), false); let pixel = match padding_mode { PADDING_MODE::ZEROS => { let pixel = if i >= NumberTrait::zero() && i < d { *array.at(i.try_into().unwrap()) } else { NumberTrait::zero() }; pixel }, PADDING_MODE::BORDER => { let i = clamp(i, NumberTrait::zero(), d - NumberTrait::one()); let pixel = *array.at(i.try_into().unwrap()); pixel }, PADDING_MODE::REFLECTION => { let i: usize = (gs_reflect(i, *border.at(0), *border.at(1))).try_into().unwrap(); let pixel = *array.at(i); pixel }, }; pixel } fn zeros<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>>(n: usize) -> Span<T> { let mut zeros: Array<T> = array![]; let mut i = 0; while i != n { zeros.append(NumberTrait::zero()); i += 1; }; zeros.span() } fn rint< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +SubEq<T>, +Rem<T>, +PartialEq<T>, +PartialOrd<T>, +Add<T>, +Sub<T> >( data: Span<T> ) -> Span<T> { // round to nearest if ties rounds to the nearest even value. let mut rint: Array<T> = array![]; let two: T = NumberTrait::one() + NumberTrait::one(); let mut i = 0; while i != data.len() { let x = *data.at(i); let mut round = NumberTrait::round(x); let diff = round - x; if diff == NumberTrait::half() { if round % two != NumberTrait::zero() { round -= NumberTrait::one() } } rint.append(round); i += 1; }; rint.span() } fn clamp<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>>( val: T, low: T, high: T ) -> T { if val < low { return low; } if val > high { return high; } val } fn gs_reflect< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T>, +Add<T>, +Sub<T>, +Div<T>, +Mul<T>, +Rem<T>, +PrintTrait<T>, >( x: T, x_min: T, x_max: T ) -> T { let two: T = NumberTrait::one() + NumberTrait::one(); let mut fx = x; let rng = x_max - x_min; let fx = if fx < x_min { let dx = x_min - fx; let n = NumberTrait::floor(dx / rng); let r = dx - n * rng; let fx = if NumberTrait::round(n % two) == NumberTrait::zero() { x_min + r } else { x_max - r }; fx } else if fx > x_max { let dx = fx - x_max; let n = NumberTrait::floor(dx / rng); let r = dx - n * rng; let fx = if NumberTrait::round(n % two) == NumberTrait::zero() { x_max - r } else { x_min + r }; fx } else { fx }; fx } fn reverse<T, +Copy<T>, +Drop<T>,>(data: Span<T>) -> Span<T> { let mut rev: Array<T> = array![]; let mut i = data.len(); while i != 0 { rev.append(*data.at(i - 1)); i -= 1; }; rev.span() } fn get_sub<T, +Copy<T>, +Drop<T>,>( data: Span<T>, stride_data: Span<usize>, index: Span<usize>, ) -> Span<T> { let mut acc_indices = 0; let mut i = 0; while i != index.len() { acc_indices += *index.at(i) * *stride_data.at(i); i += 1; }; SpanTrait::slice(data, acc_indices, *stride_data.at(index.len() - 1)) } fn prod<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>,>( pA: Span<T>, start: usize ) -> T { let mut i = start; let mut prod = NumberTrait::one(); while i != pA.len() { prod = prod * (*pA.at(i)); i += 1; }; prod } fn prepare_border< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>, +Add<T>, +Div<T>, +Sub<T>, +Into<usize, MAG>, +Neg<T> >( self: @Tensor<T>, dims: Span<usize>, align_corner: usize ) -> Span<T> { let num_dims = dims.len(); let mut borders1: Array<T> = array![]; let mut borders2: Array<T> = array![]; let mut i = 0; while i != num_dims { if align_corner == 0 { borders1.append(-NumberTrait::half()); borders2 .append( NumberTrait::new_unscaled((*dims.at(i)).into(), false) - NumberTrait::half() ); } else { borders1.append(NumberTrait::zero()); borders2 .append( NumberTrait::new_unscaled((*dims.at(i)).into(), false) - NumberTrait::one() ); } i += 1; }; borders1.append_span(borders2.span()); borders1.span() } fn arange(start: usize, end: usize, step: usize) -> Span<usize> { assert((end - start) % step == 0, 'incompatible step value'); let mut arr: Array<usize> = array![]; let mut i = start; while i != end { arr.append(i); i += step; }; arr.span() } fn gs_denormalize_coordinates< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>, +Add<T>, +Div<T>, +Sub<T>, +Into<usize, MAG> >( n: Span<T>, dims: Span<usize>, align_corner: usize ) -> Span<T> { let mut x: Array<T> = array![]; let mut i = 0; while i != n.len() { let v = *n.at(i); let dim = *dims.at(i); x.append(gs_denormalize(v, dim, align_corner)); i += 1; }; x.span() } fn gs_denormalize< T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>, +Add<T>, +Div<T>, +Sub<T>, +Into<usize, MAG> >( n: T, length: usize, align_corner: usize ) -> T { let length = NumberTrait::new_unscaled(length.into(), false); let two: T = NumberTrait::one() + NumberTrait::one(); let x = if align_corner == 0 { ((n + NumberTrait::one()) * length - NumberTrait::one()) / two } else { (n + NumberTrait::one()) / two * (length - NumberTrait::one()) }; x } fn get_all_coords(shape: Span<usize>) -> Span<Span<usize>> { let mut all_indices = array![]; let mut i = 0; while i != shape.len() { all_indices.append(arange(0, *shape.at(i), 1)); i += 1; }; cartesian(all_indices.span()) } fn cartesian(mut arrays: Span<Span<usize>>,) -> Span<Span<usize>> { let mut n = 1; let mut i = arrays.len() - 1; loop { n = n * (*(arrays.at(i))).len(); if i == 0 { break; } i -= 1; }; let mut i = 0; let mut size_arrays: Array<usize> = array![]; while i != arrays.len() { size_arrays.append((*(arrays.at(i))).len()); i += 1; }; let size_arrays = size_arrays.span(); let mut output_arrays = ArrayTrait::<Array<usize>>::new(); let mut m = n; let mut i = 0; while i != arrays.len() { m = m / (*(arrays.at(i))).len(); let mut out = repeat(*(arrays.at(i)), m); out = repeat_2(out, size_arrays, i); output_arrays.append(out); i += 1; }; let output_arrays = output_arrays.span(); let mut i = 0; let mut ret = array![]; while i != n { let mut j = 0; let mut x = ArrayTrait::new(); while j != arrays.len() { x.append(*(output_arrays.at(j)).at(i)); j += 1; }; ret.append(x.span()); i += 1; }; ret.span() } fn repeat_2(mut array: Array<usize>, size_array: Span<usize>, index: usize) -> Array<usize> { let mut size = array.len(); let mut i = 0; while i != index { let mut j = 1; while j != *size_array.at(index - 1 - i) { let mut k = 0; while k != size { array.append(*array.at(k)); k += 1; }; j += 1; }; size = size * *size_array.at(index - 1 - i); i += 1; }; array } fn repeat(array: Span<usize>, m: usize,) -> Array<usize> { let mut out: Array<usize> = array![]; let mut j = 0; while j != array.len() { let mut k = 0; while k != m { out.append(*array.at(j)); k += 1; }; j += 1; }; out }
https://github.com/gizatechxyz/orion
src/operators/nn/functional/hard_sigmoid.cairo
use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: NNTrait::hard_sigmoid docstring fn hard_sigmoid< T, MAG, impl TNumber: NumberTrait<T, MAG>, impl TTensor: TensorTrait<T>, impl TPartialOrd: PartialOrd<T>, impl TAdd: Add<T>, impl TMul: Mul<T>, impl TDiv: Div<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>, >( mut x: Tensor<T>, alpha: @T, beta: @T ) -> Tensor<T> { let mut data_result: Array<T> = array![]; loop { match x.data.pop_front() { Option::Some(item) => { let temp = (*item) * (*alpha) + (*beta); let result = temp.min(NumberTrait::one()).max(NumberTrait::zero()); data_result.append(result); }, Option::None => { break; } }; }; TensorTrait::new(x.shape, data_result.span()) }
https://github.com/gizatechxyz/orion
src/operators/nn/functional/leaky_relu.cairo
use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: NNTrait::leaky_relu docstring fn leaky_relu< T, MAG, impl FNumber: NumberTrait<T, MAG>, impl FTensor: TensorTrait<T>, impl FPartialOrd: PartialOrd<T>, impl FMul: Mul<T>, impl FCopy: Copy<T>, impl FDrop: Drop<T>, >( mut z: Tensor<T>, alpha: @T ) -> Tensor<T> { assert(*alpha < NumberTrait::one(), 'alpha must be less than 1'); let mut data_result: Array<T> = array![]; loop { match z.data.pop_front() { Option::Some(item) => { if (*item >= NumberTrait::zero()) { data_result.append(*item); } else { data_result.append(*item * *alpha); }; }, Option::None => { break; } }; }; TensorTrait::new(z.shape, data_result.span()) }
https://github.com/gizatechxyz/orion
src/operators/nn/functional/linear.cairo
use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: NNTrait::linear docstring fn linear< T, impl TTensor: TensorTrait<T>, impl TAddTensor: Add<Tensor<T>>, impl TCopy: Copy<T>, impl TDrop: Drop<T> >( z: Tensor<T>, weights: Tensor<T>, bias: Tensor<T> ) -> Tensor<T> { assert(z.shape.len() == 1, 'input tensor must be 1D'); assert(weights.shape.len() == 2, 'weights tensor must be 2D'); assert(bias.shape.len() == 1, 'bias tensor must be 1D'); let dot = weights.matmul(@z); let sum = dot + bias; sum }
https://github.com/gizatechxyz/orion