text
stringlengths 17
362k
| id
stringlengths 13
115
| metadata
dict | __index_level_0__
int64 0
75
|
---|---|---|---|
# global
import math
import numpy as np
from typing import Optional, Union, Tuple, List, Literal, Sequence, Callable
# local
import ivy
from ivy.functional.ivy.layers import (
_handle_padding,
_get_num_padded_values,
_validate_max_pool_params,
_depth_max_pooling_helper,
)
from ivy.functional.backends.numpy.layers import _add_dilations
from ivy.functional.ivy.experimental.layers import (
_padding_ceil_mode,
)
from ivy.func_wrapper import with_supported_dtypes
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
def _determine_depth_max_pooling(x, kernel, strides, dims, data_format="channel_last"):
kernel, strides, depth_pooling = _depth_max_pooling_helper(
x.shape, kernel, strides, dims=dims, data_format=data_format
)
if depth_pooling:
x = np.transpose(x, (0, dims + 1, *range(1, dims + 1)))
return x, kernel, strides, depth_pooling
def max_pool1d(
x: np.ndarray,
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
dilation: Union[int, Tuple[int]] = 1,
ceil_mode: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
dims = 1
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
if data_format == "NCW":
x = np.swapaxes(x, 1, 2)
kernel = [kernel[i] for i in [0, 2, 1]] if len(kernel) == (dims + 2) else kernel
strides = (
[strides[i] for i in [0, 2, 1]] if len(strides) == (dims + 2) else strides
)
padding = (
[padding[i] for i in [0, 2, 1]]
if isinstance(padding, list) and len(padding) == (dims + 2)
else padding
)
padding = (
[padding[i] for i in [0, 2, 1]]
if isinstance(padding, list) and len(padding) == (dims + 2)
else padding
)
x, kernel, strides, depth_pooling = _determine_depth_max_pooling(
x, kernel, strides, dims, data_format="channel_last"
)
x_shape = x.shape[1:2]
filters = np.ones((list(kernel)), dtype=x.dtype)
if not depth_pooling:
if dilation[0] > 1:
filters = _add_dilations(filters, dilation[0], axis=0, values=0)
kernel = list(filters.shape)
pad_list = padding
if isinstance(padding, str):
pad_w = _handle_padding(x_shape[0], strides[0], kernel[0], padding)
pad_list = [
(pad_w // 2, pad_w - pad_w // 2),
]
if ceil_mode:
pad_list[0] = _padding_ceil_mode(
x_shape[0], kernel[0], pad_list[0], strides[0]
)
x = np.pad(
x,
[
(0, 0),
*pad_list,
(0, 0),
],
"constant",
constant_values=-math.inf,
)
else:
if isinstance(padding, list) and any(
item != 0 for sublist in padding for item in sublist
):
raise NotImplementedError(
"Nonzero explicit padding is not supported for depthwise max pooling"
)
x_shape = x.shape
new_w = (x_shape[1] - kernel[0]) // strides[0] + 1
new_shape = [x_shape[0], new_w] + list(kernel) + [x_shape[-1]]
new_strides = (
x.strides[0],
x.strides[1] * strides[0],
x.strides[1],
x.strides[2],
)
# B x OW x KW x I
sub_matrices = np.lib.stride_tricks.as_strided(
x, new_shape, new_strides, writeable=False
)
# B x OW x KW x I
sub_matrices = np.where(
filters.reshape([1] * 2 + list(kernel) + [1]), sub_matrices, -math.inf
)
res = sub_matrices.max(axis=(2))
if depth_pooling:
res = np.swapaxes(res, 1, 2)
if data_format == "NCW":
res = np.swapaxes(res, 1, 2)
return res
def max_pool2d(
x: np.ndarray,
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
dilation: Union[int, Tuple[int, ...]] = 1,
ceil_mode: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
dims = 2
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
if data_format == "NCHW":
x = np.transpose(x, (0, 2, 3, 1))
kernel = (
[kernel[i] for i in [0, 2, 3, 1]] if len(kernel) == (dims + 2) else kernel
)
strides = (
[strides[i] for i in [0, 2, 3, 1]]
if len(strides) == (dims + 2)
else strides
)
padding = (
[padding[i] for i in [0, 2, 3, 1]]
if isinstance(padding, list) and len(padding) == (dims + 2)
else padding
)
x, kernel, strides, depth_pooling = _determine_depth_max_pooling(
x, kernel, strides, dims, data_format="channel_last"
)
x_shape = list(x.shape[1:3])
filters = np.ones((list(kernel)), dtype=x.dtype)
if not depth_pooling:
for j in range(dims):
if dilation[j] > 1:
filters = _add_dilations(filters, dilation[j], axis=j, values=0)
kernel = list(filters.shape)
pad_list = padding
if isinstance(padding, str):
pad_h = _handle_padding(x_shape[0], strides[0], kernel[0], padding)
pad_w = _handle_padding(x_shape[1], strides[1], kernel[1], padding)
pad_list = [
(pad_h // 2, pad_h - pad_h // 2),
(pad_w // 2, pad_w - pad_w // 2),
]
pad_list = list(pad_list)
if ceil_mode:
for i in range(dims):
pad_list[i] = _padding_ceil_mode(
x_shape[i], kernel[i], pad_list[i], strides[i]
)
x = np.pad(
x,
[
(0, 0),
*pad_list,
(0, 0),
],
"constant",
constant_values=-math.inf,
)
else:
if isinstance(padding, list) and any(
item != 0 for sublist in padding for item in sublist
):
raise NotImplementedError(
"Nonzero explicit padding is not supported for depthwise max pooling"
)
x_shape = x.shape
new_h = (x_shape[1] - kernel[0]) // strides[0] + 1
new_w = (x_shape[2] - kernel[1]) // strides[1] + 1
new_shape = [x_shape[0], new_h, new_w] + list(kernel) + [x_shape[-1]]
new_strides = (
x.strides[0],
x.strides[1] * strides[0],
x.strides[2] * strides[1],
x.strides[1],
x.strides[2],
x.strides[3],
)
# B x OH x OW x KH x KW x I
sub_matrices = np.lib.stride_tricks.as_strided(
x, new_shape, new_strides, writeable=False
)
# B x OH x OW x KH x KW x I
sub_matrices = np.where(
filters.reshape([1] * 3 + list(kernel) + [1]), sub_matrices, -math.inf
)
# B x OH x OW x O
res = sub_matrices.max(axis=(3, 4))
if depth_pooling:
res = np.transpose(res, (0, 2, 3, 1))
if data_format == "NCHW":
return np.transpose(res, (0, 3, 1, 2))
return res
def max_pool3d(
x: np.ndarray,
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
dilation: Union[int, Tuple[int, ...]] = 1,
ceil_mode: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
dims = 3
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
if data_format == "NCDHW":
x = np.transpose(x, (0, 2, 3, 4, 1))
kernel = (
[kernel[i] for i in [0, 2, 3, 4, 1]]
if len(kernel) == (dims + 2)
else kernel
)
strides = (
[strides[i] for i in [0, 2, 3, 4, 1]]
if len(strides) == (dims + 2)
else strides
)
padding = (
[padding[i] for i in [0, 2, 3, 4, 1]]
if isinstance(padding, list) and len(padding) == (dims + 2)
else padding
)
x, kernel, strides, depth_pooling = _determine_depth_max_pooling(
x, kernel, strides, dims, data_format="channel_last"
)
x_shape = x.shape[1:4]
filters = np.ones((list(kernel)), dtype=x.dtype)
if not depth_pooling:
for j in range(dims):
if dilation[j] > 1:
filters = _add_dilations(filters, dilation[j], axis=j, values=0)
kernel = list(filters.shape)
pad_list = padding
if isinstance(padding, str):
pad_d = _handle_padding(x_shape[0], strides[0], kernel[0], padding)
pad_h = _handle_padding(x_shape[1], strides[1], kernel[1], padding)
pad_w = _handle_padding(x_shape[2], strides[2], kernel[2], padding)
pad_list = [
(pad_d // 2, pad_d - pad_d // 2),
(pad_h // 2, pad_h - pad_h // 2),
(pad_w // 2, pad_w - pad_w // 2),
]
pad_list = list(pad_list)
if ceil_mode:
for i in range(dims):
pad_list[i] = _padding_ceil_mode(
x_shape[i], kernel[i], pad_list[i], strides[i]
)
x = np.pad(
x,
[
(0, 0),
*pad_list,
(0, 0),
],
"constant",
constant_values=-math.inf,
)
else:
if isinstance(padding, list) and any(
item != 0 for sublist in padding for item in sublist
):
raise NotImplementedError(
"Nonzero explicit padding is not supported for depthwise max pooling"
)
x_shape = x.shape
new_d = (x_shape[1] - kernel[0]) // strides[0] + 1
new_h = (x_shape[2] - kernel[1]) // strides[1] + 1
new_w = (x_shape[3] - kernel[2]) // strides[2] + 1
new_shape = [x_shape[0], new_d, new_h, new_w] + list(kernel) + [x_shape[-1]]
new_strides = (
x.strides[0],
x.strides[1] * strides[0],
x.strides[2] * strides[1],
x.strides[3] * strides[2],
x.strides[1],
x.strides[2],
x.strides[3],
x.strides[4],
)
# B x OD x OH x OW x KD x KH x KW x I
sub_matrices = np.lib.stride_tricks.as_strided(
x, new_shape, new_strides, writeable=False
)
# B x OD x OH x OW x KD x KH x KW x I
sub_matrices = np.where(
filters.reshape([1] * 4 + list(kernel) + [1]), sub_matrices, -math.inf
)
# B x OD x OH x OW x O
res = sub_matrices.max(axis=(4, 5, 6))
if depth_pooling:
res = np.transpose(res, (0, 2, 3, 4, 1))
if data_format == "NCDHW":
return np.transpose(res, (0, 4, 1, 2, 3))
return res
def _get_padded_values(x_shape, kernel, strides, padding, ceil_mode, dim):
if isinstance(padding, str):
pad_specific = [
_handle_padding(x_shape[i], strides[i], kernel[i], padding)
for i in range(dim)
]
padding = [
(pad_specific[i] // 2, pad_specific[i] - pad_specific[i] // 2)
for i in range(dim)
]
else:
if isinstance(padding, int):
padding = [(padding,) * 2] * dim
pad_specific = [sum(padding[i]) for i in range(dim)]
c = []
if ceil_mode:
for i in range(dim):
padding[i], c_i = _padding_ceil_mode(
x_shape[i], kernel[i], padding[i], strides[i], True
)
c.append(c_i)
pad_specific[i] = sum(padding[i])
return padding, pad_specific, c
def avg_pool1d(
x: np.ndarray,
kernel: Union[int, Tuple[int]],
strides: Union[int, Tuple[int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if isinstance(kernel, int):
kernel = [kernel]
elif len(kernel) == 1:
kernel = [kernel[0]]
if isinstance(strides, int):
strides = [strides]
elif len(strides) == 1:
strides = [strides[0]]
if data_format in ("NCW", "NCL"):
x = np.swapaxes(x, 1, 2)
x_shape = x.shape[1:-1]
padding, pad_specific, c = _get_padded_values(
x_shape, kernel, strides, padding, ceil_mode, 1
)
x = np.pad(
x,
[
(0, 0),
*padding,
(0, 0),
],
constant_values=0.0,
)
x_shape = x.shape
new_w = (x_shape[1] - kernel[0]) // strides[0] + 1
new_shape = [x_shape[0], new_w, kernel[0]] + [x_shape[-1]]
new_strides = (
x.strides[0],
x.strides[1] * strides[0],
x.strides[1],
x.strides[2],
)
sub_matrices = np.lib.stride_tricks.as_strided(
x, new_shape, new_strides, writeable=False
)
res = np.mean(sub_matrices, axis=2)
if (not count_include_pad or ceil_mode) and any(pad_specific):
if not count_include_pad:
num_padded_values = np.array(
ivy.map(
_get_num_padded_values,
constant={
"p": pad_specific[0],
"n": x.shape[1] - pad_specific[0],
"k": kernel[0],
"s": strides[0],
},
unique={
"i": np.arange(res.shape[1]),
},
),
dtype=res.dtype,
)
else:
num_padded_values = np.zeros(res.shape[1], dtype=res.dtype)
num_padded_values[-1] = c[0]
res = (kernel[0] * res) / (kernel[0] - num_padded_values[:, None])
if data_format in ("NCW", "NCL"):
return res.swapaxes(1, 2)
return res
def avg_pool2d(
x: np.ndarray,
kernel: Union[int, Tuple[int], Tuple[int, int]],
strides: Union[int, Tuple[int], Tuple[int, int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if isinstance(kernel, int):
kernel = [kernel] * 2
elif len(kernel) == 1:
kernel = [kernel[0]] * 2
if isinstance(strides, int):
strides = [strides] * 2
elif len(strides) == 1:
strides = [strides[0]] * 2
if data_format == "NCHW":
x = np.transpose(x, (0, 2, 3, 1))
x_shape = list(x.shape[1:3])
padding, pad_specific, c = _get_padded_values(
x_shape, kernel, strides, padding, ceil_mode, 2
)
x = np.pad(
x,
[
(0, 0),
*padding,
(0, 0),
],
constant_values=0.0,
)
x_shape = x.shape
new_h = (x_shape[1] - kernel[0]) // strides[0] + 1
new_w = (x_shape[2] - kernel[1]) // strides[1] + 1
new_shape = [x_shape[0], new_h, new_w] + list(kernel) + [x_shape[-1]]
new_strides = (
x.strides[0],
x.strides[1] * strides[0],
x.strides[2] * strides[1],
x.strides[1],
x.strides[2],
x.strides[3],
)
# B x OH x OW x KH x KW x I
sub_matrices = np.lib.stride_tricks.as_strided(
x, new_shape, new_strides, writeable=False
)
# B x OH x OW x O
if divisor_override is not None:
res = np.sum(sub_matrices, axis=(3, 4)) / divisor_override
else:
res = np.mean(sub_matrices, axis=(3, 4))
if (
(not count_include_pad or ceil_mode)
and any(pad_specific)
and not divisor_override
):
if not count_include_pad:
num_padded_values = [
np.array(
ivy.map(
_get_num_padded_values,
constant={
"p": pad_specific[i],
"n": x.shape[i + 1] - pad_specific[i],
"k": kernel[i],
"s": strides[i],
},
unique={
"i": np.arange(res.shape[i + 1]),
},
),
dtype=res.dtype,
)
for i in range(2)
]
else:
num_padded_values = []
for i in range(2):
num_pad = np.zeros(res.shape[i + 1], dtype=res.dtype)
num_pad[-1] = c[i]
num_padded_values.append(num_pad)
num_padded_values1 = num_padded_values[0][:, None]
num_padded_values2 = num_padded_values[1][None, :]
num_padded_values = (
num_padded_values1 * kernel[1]
+ num_padded_values2 * kernel[0]
- num_padded_values1 * num_padded_values2
)
kernel_mul = np.prod(kernel)
res = (kernel_mul * res) / (kernel_mul - np.expand_dims(num_padded_values, -1))
if data_format == "NCHW":
return np.transpose(res, (0, 3, 1, 2))
return res
def avg_pool3d(
x: np.ndarray,
kernel: Union[int, Tuple[int], Tuple[int, int, int]],
strides: Union[int, Tuple[int], Tuple[int, int, int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if isinstance(kernel, int):
kernel = [kernel] * 3
elif len(kernel) == 1:
kernel = [kernel[0]] * 3
if isinstance(strides, int):
strides = [strides] * 3
elif len(strides) == 1:
strides = [strides[0]] * 3
if data_format == "NCDHW":
x = np.transpose(x, (0, 2, 3, 4, 1))
x_shape = list(x.shape[1:4])
padding, pad_specific, c = _get_padded_values(
x_shape, kernel, strides, padding, ceil_mode, 3
)
x = np.pad(
x,
[
(0, 0),
*padding,
(0, 0),
],
constant_values=0.0,
)
x_shape = x.shape
new_d = (x_shape[1] - kernel[0]) // strides[0] + 1
new_h = (x_shape[2] - kernel[1]) // strides[1] + 1
new_w = (x_shape[3] - kernel[2]) // strides[2] + 1
new_shape = [x_shape[0], new_d, new_h, new_w] + list(kernel) + [x_shape[-1]]
new_strides = (
x.strides[0],
x.strides[1] * strides[0],
x.strides[2] * strides[1],
x.strides[3] * strides[2],
x.strides[1],
x.strides[2],
x.strides[3],
x.strides[4],
)
# B x OH x OW x KH x KW x I
sub_matrices = np.lib.stride_tricks.as_strided(
x, new_shape, new_strides, writeable=False
)
# B x OH x OW x O
if divisor_override is not None:
res = np.sum(sub_matrices, axis=(4, 5, 6)) / divisor_override
else:
res = np.mean(sub_matrices, axis=(4, 5, 6))
if (
(not count_include_pad or ceil_mode)
and any(pad_specific)
and not divisor_override
):
if not count_include_pad:
num_padded_values = [
np.array(
ivy.map(
_get_num_padded_values,
constant={
"p": pad_specific[i],
"n": x.shape[i + 1] - pad_specific[i],
"k": kernel[i],
"s": strides[i],
},
unique={
"i": np.arange(res.shape[i + 1]),
},
),
dtype=res.dtype,
)
for i in range(3)
]
else:
num_padded_values = []
for i in range(3):
num_pad = np.zeros(res.shape[i + 1], dtype=res.dtype)
num_pad[-1] = c[i]
num_padded_values.append(num_pad)
num_padded_values1 = num_padded_values[0].reshape((-1, 1, 1))
num_padded_values2 = num_padded_values[1].reshape((1, -1, 1))
num_padded_values3 = num_padded_values[2].reshape((1, 1, -1))
num_padded_values = (
num_padded_values1 * kernel[1] * kernel[2]
+ num_padded_values2 * kernel[0] * kernel[2]
+ num_padded_values3 * kernel[0] * kernel[1]
+ num_padded_values1 * num_padded_values2 * num_padded_values3
- num_padded_values1 * num_padded_values2 * kernel[2]
- num_padded_values1 * num_padded_values3 * kernel[1]
- num_padded_values2 * num_padded_values3 * kernel[0]
)
kernel_mul = np.prod(kernel)
res = (kernel_mul * res) / (kernel_mul - np.expand_dims(num_padded_values, -1))
if data_format == "NCDHW":
return np.transpose(res, (0, 4, 1, 2, 3))
return res
def fft(
x: np.ndarray,
dim: int,
/,
*,
norm: str = "backward",
n: Optional[Union[int, Tuple[int]]] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if not isinstance(dim, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(dim)}"
)
if n is None:
n = x.shape[dim]
if n < -len(x.shape):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {dim}, expecting ranging"
" from {-len(x.shape)} to {len(x.shape)-1} "
)
if not isinstance(n, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(n)}"
)
if n <= 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {n}, expecting more than 1"
)
if norm not in {"backward", "ortho", "forward"}:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
if x.dtype in [np.uint64, np.int64, np.float64, np.complex128]:
out_dtype = np.complex128
else:
out_dtype = np.complex64
return np.fft.fft(x, n, dim, norm).astype(out_dtype)
@with_supported_dtypes({"1.26.3 and below": ("float32", "float64")}, backend_version)
def dct(
x: np.ndarray,
/,
*,
type: Literal[1, 2, 3, 4] = 2,
n: Optional[int] = None,
axis: int = -1,
norm: Optional[Literal["ortho"]] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if norm not in (None, "ortho"):
raise ValueError("Norm must be either None or 'ortho'")
if axis < 0:
axis = axis + len(x.shape)
if n is not None:
signal_len = x.shape[axis]
if n <= signal_len:
local_idx = [slice(None)] * len(x.shape)
local_idx[axis] = slice(None, n)
x = x[tuple(local_idx)]
else:
pad_idx = [[0, 0] for _ in range(len(x.shape))]
pad_idx[axis][1] = n - signal_len
x = np.pad(x, pad_idx)
real_zero = np.array(0.0, dtype=x.dtype)
axis_dim = x.shape[axis]
axis_dim_float = np.array(axis_dim, dtype=x.dtype)
cast_final = True if x.dtype != np.float64 else False
if type == 1:
if norm:
raise ValueError("Normalization not supported for type-I DCT")
axis_idx = [slice(None)] * len(x.shape)
axis_idx[axis] = slice(-2, 0, -1)
x = np.concatenate([x, x[tuple(axis_idx)]], axis=axis)
dct_out = np.real(np.fft.rfft(x, axis=axis))
elif type == 2:
cmplx = np.empty(axis_dim, dtype=np.complex64)
cmplx.real = real_zero
cmplx.imag = -np.arange(axis_dim_float) * math.pi * 0.5 / axis_dim_float
scale_dims = [1] * len(x.shape)
scale_dims[axis] = axis_dim
scale = 2.0 * np.exp(cmplx).reshape(scale_dims)
axis_idx = [slice(None)] * len(x.shape)
axis_idx[axis] = slice(None, axis_dim)
dct_out = np.real(
np.fft.rfft(x, n=2 * axis_dim, axis=axis)[tuple(axis_idx)] * scale
)
if norm == "ortho":
n1 = 0.5 * np.reciprocal(np.sqrt(axis_dim_float))
n2 = n1 * math.sqrt(2.0)
sf = np.pad(np.expand_dims(n1, 0), (0, axis_dim - 1), constant_values=n2)
dct_out = sf.reshape(scale_dims) * dct_out
elif type == 3:
cmplx = np.empty(axis_dim, dtype=np.complex64)
cmplx.real = real_zero
cmplx.imag = np.arange(axis_dim_float) * math.pi * 0.5 / axis_dim_float
scale_dims = [1] * len(x.shape)
scale_dims[axis] = axis_dim
scale = 2.0 * np.exp(cmplx).reshape(scale_dims)
if norm == "ortho":
n1 = np.sqrt(axis_dim_float)
n2 = n1 * np.sqrt(0.5)
sf = np.pad(np.expand_dims(n1, 0), (0, axis_dim - 1), constant_values=n2)
x = x * sf.reshape(scale_dims)
else:
x = x * axis_dim_float
axis_idx = [slice(None)] * len(x.shape)
axis_idx[axis] = slice(None, axis_dim)
x = x.astype(np.complex64)
x.imag = real_zero
dct_out = np.real(np.fft.irfft(scale * x, n=2 * axis_dim, axis=axis))[
tuple(axis_idx)
]
elif type == 4:
dct_2 = dct(x, type=2, n=2 * axis_dim, axis=axis, norm=None)
axis_idx = [slice(None)] * len(x.shape)
axis_idx[axis] = slice(1, None, 2)
dct_out = dct_2[tuple(axis_idx)]
if norm == "ortho":
dct_out *= math.sqrt(0.5) * np.reciprocal(np.sqrt(axis_dim_float))
return dct_out.astype(np.float32) if cast_final else dct_out
def idct(
x: np.ndarray,
/,
*,
type: Literal[1, 2, 3, 4] = 2,
n: Optional[int] = None,
axis: int = -1,
norm: Optional[Literal["ortho"]] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type]
return dct(x, type=inverse_type, n=n, axis=axis, norm=norm, out=out)
def dropout1d(
x: np.ndarray,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NWC",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if training:
x_shape = x.shape
is_batched = len(x_shape) == 3
if data_format == "NCW":
perm = (0, 2, 1) if is_batched else (1, 0)
x = np.transpose(x, perm)
x_shape = x.shape
mask = np.random.binomial(1, 1 - prob, x_shape)
res = np.where(mask, x / (1 - prob), 0)
if data_format == "NCW":
res = np.transpose(res, perm)
else:
res = x
return res
def dropout2d(
x: np.ndarray,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NHWC",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if training:
x_shape = x.shape
is_batched = len(x_shape) == 4
if data_format == "NCHW":
perm = (0, 2, 3, 1) if is_batched else (1, 2, 0)
x = np.transpose(x, perm)
x_shape = x.shape
mask = np.random.binomial(1, 1 - prob, x_shape)
res = np.where(mask, x / (1 - prob), 0)
if data_format == "NCHW":
perm = (0, 3, 1, 2) if is_batched else (2, 0, 1)
res = np.transpose(res, perm)
else:
res = x
return res
def dropout3d(
x: np.ndarray,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NDHWC",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if training:
x_shape = x.shape
is_batched = len(x_shape) == 5
if data_format == "NCDHW":
perm = (0, 2, 3, 4, 1) if is_batched else (1, 2, 3, 0)
x = np.transpose(x, perm)
x_shape = x.shape
mask = np.random.binomial(1, 1 - prob, x_shape)
res = np.where(mask, x / (1 - prob), 0)
if data_format == "NCDHW":
perm = (0, 4, 1, 2, 3) if is_batched else (3, 0, 1, 2)
res = np.transpose(res, perm)
else:
res = x
return res
def ifft(
x: np.ndarray,
dim: int,
*,
norm: str = "backward",
n: Optional[Union[int, Tuple[int]]] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if not isinstance(dim, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(dim)}"
)
if n is None:
n = x.shape[dim]
if n < -len(x.shape):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {dim}, expecting ranging"
" from {-len(x.shape)} to {len(x.shape)-1} "
)
if not isinstance(n, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(n)}"
)
if n <= 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {n}, expecting more than 1"
)
if norm not in {"backward", "ortho", "forward"}:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
return np.asarray(np.fft.ifft(x, n, dim, norm), dtype=x.dtype)
def fft2(
x: np.ndarray,
*,
s: Optional[Sequence[int]] = None,
dim: Sequence[int] = (-2, -1),
norm: str = "backward",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
ivy.utils.assertions.check_elem_in_list(
norm,
["backward", "ortho", "forward"],
message=f"Unrecognized normalization mode {norm}",
)
if not all(isinstance(j, int) for j in dim):
raise ivy.utils.exceptions.IvyError(
f"Expecting {dim} to be a sequence of integers <class integer>"
)
if s is None:
s = (x.shape[dim[0]], x.shape[dim[1]])
if all(j < -len(x.shape) for j in s):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {dim}, expecting ranging"
" from {-len(x.shape)} to {len(x.shape)-1} "
)
if not all(isinstance(j, int) for j in s):
raise ivy.utils.exceptions.IvyError(
f"Expecting {s} to be a sequence of integers <class integer>"
)
if all(j <= 1 for j in s):
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {s}, expecting s points larger than 1"
)
return np.fft.fft2(x, s, dim, norm).astype(np.complex128)
def ifftn(
x: np.ndarray,
s: Optional[Union[int, Tuple[int]]] = None,
axes: Optional[Union[int, Tuple[int]]] = None,
*,
norm: str = "backward",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.fft.ifftn(x, s, axes, norm).astype(x.dtype)
@with_unsupported_dtypes({"1.26.3 and below": ("complex",)}, backend_version)
def embedding(
weights: np.ndarray,
indices: np.ndarray,
/,
*,
max_norm: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
ivy.utils.assertions.check_equal(
len(weights.shape), 2, message="weights must be 2-d", as_array=False
)
embeddings = np.take(weights, indices, axis=0)
if max_norm is not None:
norms = np.linalg.norm(embeddings, axis=-1, keepdims=True)
embeddings = np.where(
norms > max_norm, embeddings * max_norm / norms, embeddings
)
embeddings = np.where(
norms < -max_norm, embeddings * -max_norm / norms, embeddings
)
return embeddings
def rfft(
x: np.ndarray,
/,
*,
n: Optional[int] = None,
axis: int = -1,
norm: Literal["backward", "ortho", "forward"] = "backward",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
x = x.real
ret = np.fft.rfft(x, n=n, axis=axis, norm=norm)
if x.dtype != np.float64:
ret = ret.astype(np.complex64)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def rfftn(
x: np.ndarray,
s: Optional[Sequence[int]] = None,
axes: Optional[Sequence[int]] = None,
*,
norm: str = "backward",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if not all(isinstance(j, int) for j in axes):
raise ivy.utils.exceptions.IvyError(
f"Expecting {axes} to be a sequence of integers <class integer>"
)
if s is None:
s = (x.shape[axes[0]], x.shape[axes[1]])
if all(j < -len(x.shape) for j in s):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {axes}, expecting ranging"
f" from {-len(x.shape)} to {len(x.shape)-1} "
)
if not all(isinstance(j, int) for j in s):
raise ivy.utils.exceptions.IvyError(
f"Expecting {s} to be a sequence of integers <class integer>"
)
if all(j <= 1 for j in s):
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {s}, expecting s points larger than 1"
)
if norm not in {"backward", "ortho", "forward"}:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
return np.fft.rfftn(x, s, axes, norm).astype(np.complex128)
# stft
def stft(
signals: np.ndarray,
frame_length: int,
frame_step: int,
/,
*,
fft_length: Optional[int] = None,
window_fn: Optional[Callable] = None,
pad_end: Optional[bool] = False,
name: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if not isinstance(frame_length, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(frame_length)}"
)
if frame_length < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {frame_length}, expecting frame_length larger than or"
" equal to 1"
)
if not isinstance(frame_step, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(frame_step)}"
)
if frame_step < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {frame_length}, expecting frame_length larger than or"
" equal to 1"
)
if fft_length is not None:
if not isinstance(fft_length, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(fft_length)}"
)
if fft_length < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {frame_length}, expecting frame_length larger"
" than or equal to 1"
)
input_dtype = signals.dtype
if input_dtype == np.float32:
dtype = np.complex64
elif input_dtype == np.float64:
dtype = np.complex128
def stft_1D(signals, frame_length, frame_step, fft_length, pad_end):
if fft_length is None:
fft_length = 1
while fft_length < frame_length:
fft_length *= 2
num_samples = signals.shape[-1]
if pad_end:
num_samples = signals.shape[-1]
num_frames = -(-num_samples // frame_step)
pad_length = max(
0, frame_length + frame_step * (num_frames - 1) - num_samples
)
signals = np.pad(signals, [(0, pad_length)])
else:
num_frames = 1 + (num_samples - frame_length) // frame_step
stft_result = []
if window_fn is None:
window = 1
else:
window = window_fn(frame_length)
for i in range(num_frames):
start = i * frame_step
end = start + frame_length
frame = signals[..., start:end]
windowed_frame = frame * window
pad_length = fft_length - frame_length
windowed_frame = np.pad(windowed_frame, [(0, pad_length)])
windowed_frame = np.array(windowed_frame, dtype=dtype)
fft_frame = np.fft.fft(windowed_frame, axis=-1)
slit = int(fft_length // 2 + 1)
stft_result.append(fft_frame[..., 0:slit])
stft = np.stack(stft_result, axis=0)
return stft
def stft_helper(nested_list, frame_length, frame_step, fft_length):
nested_list = nested_list
if len(np.shape(nested_list)) > 1:
return [
stft_helper(sublist, frame_length, frame_step, fft_length)
for sublist in nested_list
]
else:
return stft_1D(nested_list, frame_length, frame_step, fft_length, pad_end)
to_return = stft_helper(signals, frame_length, frame_step, fft_length)
return np.array(to_return, dtype=dtype)
| ivy/ivy/functional/backends/numpy/experimental/layers.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/experimental/layers.py",
"repo_id": "ivy",
"token_count": 19283
} | 22 |
# global
from collections import namedtuple
from typing import Union, Optional, Tuple, Literal, List, NamedTuple, Sequence
import numpy as np
# local
import ivy
from ivy import inf
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.backends.numpy.helpers import _scalar_output_to_0d_array
from . import backend_version
# Array API Standard #
# -------------------#
@with_unsupported_dtypes({"1.26.3 and below": ("float16", "complex")}, backend_version)
def cholesky(
x: np.ndarray, /, *, upper: bool = False, out: Optional[np.ndarray] = None
) -> np.ndarray:
if not upper:
ret = np.linalg.cholesky(x)
else:
axes = list(range(len(x.shape) - 2)) + [len(x.shape) - 1, len(x.shape) - 2]
ret = np.transpose(np.linalg.cholesky(np.transpose(x, axes=axes)), axes=axes)
return ret
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def cross(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
axisa: int = -1,
axisb: int = -1,
axisc: int = -1,
axis: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.cross(a=x1, b=x2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)
@_scalar_output_to_0d_array
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def det(x: np.ndarray, /, *, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.linalg.det(x)
def diagonal(
x: np.ndarray,
/,
*,
offset: int = 0,
axis1: int = -2,
axis2: int = -1,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.diagonal(x, offset=offset, axis1=axis1, axis2=axis2)
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def eigh(
x: np.ndarray, /, *, UPLO: str = "L", out: Optional[np.ndarray] = None
) -> Tuple[np.ndarray]:
result_tuple = NamedTuple(
"eigh", [("eigenvalues", np.ndarray), ("eigenvectors", np.ndarray)]
)
eigenvalues, eigenvectors = np.linalg.eigh(x, UPLO=UPLO)
return result_tuple(eigenvalues, eigenvectors)
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def eigvalsh(
x: np.ndarray, /, *, UPLO: str = "L", out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.linalg.eigvalsh(x, UPLO=UPLO)
@_scalar_output_to_0d_array
def inner(
x1: np.ndarray, x2: np.ndarray, /, *, out: Optional[np.ndarray] = None
) -> np.ndarray:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return np.inner(x1, x2)
@with_unsupported_dtypes(
{"1.26.3 and below": ("bfloat16", "float16", "complex")},
backend_version,
)
def inv(
x: np.ndarray,
/,
*,
adjoint: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if adjoint:
if x.ndim < 2:
raise ValueError("Input must be at least 2D")
permutation = list(range(x.ndim))
permutation[-2], permutation[-1] = permutation[-1], permutation[-2]
x_adj = np.transpose(x, permutation).conj()
return np.linalg.inv(x_adj)
return np.linalg.inv(x)
@with_unsupported_dtypes({"1.26.3 and below": ("float16", "bfloat16")}, backend_version)
def matmul(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
transpose_a: bool = False,
transpose_b: bool = False,
adjoint_a: bool = False,
adjoint_b: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if transpose_a:
x1 = np.swapaxes(x1, -1, -2)
if transpose_b:
x2 = np.swapaxes(x2, -1, -2)
if adjoint_a:
x1 = np.swapaxes(np.conjugate(x1), -1, -2)
if adjoint_b:
x2 = np.swapaxes(np.conjugate(x2), -1, -2)
ret = np.matmul(x1, x2, out=out)
if len(x1.shape) == len(x2.shape) == 1:
ret = np.array(ret)
return ret
matmul.support_native_out = True
@_scalar_output_to_0d_array
@with_unsupported_dtypes({"1.26.3 and below": ("float16", "bfloat16")}, backend_version)
def matrix_norm(
x: np.ndarray,
/,
*,
ord: Union[int, float, Literal[inf, -inf, "fro", "nuc"]] = "fro",
axis: Tuple[int, int] = (-2, -1),
keepdims: bool = False,
dtype: Optional[np.dtype] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if dtype is not None:
x = ivy.astype(x, dtype)
if not isinstance(axis, tuple):
axis = tuple(axis)
return np.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
def matrix_power(
x: np.ndarray, n: int, /, *, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.linalg.matrix_power(x, n)
@with_unsupported_dtypes(
{"1.26.3 and below": ("float16", "bfloat16", "complex")},
backend_version,
)
@_scalar_output_to_0d_array
def matrix_rank(
x: np.ndarray,
/,
*,
atol: Optional[Union[float, Tuple[float]]] = None,
rtol: Optional[Union[float, Tuple[float]]] = None,
hermitian: Optional[bool] = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if (x.ndim < 2) or (0 in x.shape):
return np.asarray(0, np.int64)
# we don't use the native matrix_rank function because the behaviour of the
# tolerance argument is difficult to unify,
# and the native implementation is compositional
svd_values = np.linalg.svd(x, hermitian=hermitian, compute_uv=False)
sigma = np.max(svd_values, axis=-1, keepdims=False)
atol = (
atol if atol is not None else np.finfo(x.dtype).eps * max(x.shape[-2:]) * sigma
)
rtol = rtol if rtol is not None else 0.0
tol = np.maximum(atol, rtol * sigma)
# make sure it's broadcastable again with svd_values
tol = np.expand_dims(tol, axis=-1)
ret = np.count_nonzero(svd_values > tol, axis=-1)
return ret
def matrix_transpose(
x: np.ndarray, /, *, conjugate: bool = False, out: Optional[np.ndarray] = None
) -> np.ndarray:
if conjugate:
x = np.conjugate(x)
return np.swapaxes(x, -1, -2)
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def outer(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return np.outer(x1, x2, out=out)
outer.support_native_out = True
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def pinv(
x: np.ndarray,
/,
*,
rtol: Optional[Union[float, Tuple[float]]] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if rtol is None:
return np.linalg.pinv(x)
else:
return np.linalg.pinv(x, rtol)
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def qr(
x: np.ndarray,
/,
*,
mode: str = "reduced",
out: Optional[Tuple[np.ndarray, np.ndarray]] = None,
) -> Tuple[np.ndarray, np.ndarray]:
res = namedtuple("qr", ["Q", "R"])
q, r = np.linalg.qr(x, mode=mode)
return res(q, r)
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def slogdet(
x: np.ndarray,
/,
) -> Tuple[np.ndarray, np.ndarray]:
results = NamedTuple("slogdet", [("sign", np.ndarray), ("logabsdet", np.ndarray)])
sign, logabsdet = np.linalg.slogdet(x)
sign = np.asarray(sign) if not isinstance(sign, np.ndarray) else sign
logabsdet = (
np.asarray(logabsdet) if not isinstance(logabsdet, np.ndarray) else logabsdet
)
return results(sign, logabsdet)
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def solve(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
adjoint: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if adjoint:
x1 = np.swapaxes(np.conjugate(x1), -1, -2)
expanded_last = False
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
if len(x2.shape) <= 1:
if x2.shape[-1] == x1.shape[-1]:
expanded_last = True
x2 = np.expand_dims(x2, axis=1)
for i in range(len(x1.shape) - len(x2.shape)):
x2 = np.expand_dims(x2, axis=0)
ret = np.linalg.solve(x1, x2)
if expanded_last:
ret = np.squeeze(ret, axis=-1)
return ret
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def svd(
x: np.ndarray, /, *, compute_uv: bool = True, full_matrices: bool = True
) -> Union[np.ndarray, Tuple[np.ndarray, ...]]:
if compute_uv:
results = namedtuple("svd", "U S Vh")
U, D, VT = np.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv)
return results(U, D, VT)
else:
results = namedtuple("svd", "S")
D = np.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv)
return results(D)
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def svdvals(
x: np.ndarray, /, *, driver: Optional[str] = None, out: Optional[np.ndarray] = None
) -> np.ndarray:
# TODO: handling the driver argument
return np.linalg.svd(x, compute_uv=False)
def tensorsolve(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
axes: Optional[Union[int, Tuple[List[int], List[int]]]] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.linalg.tensorsolve(x1, x2, axes=axes)
@with_unsupported_dtypes({"1.25.2 and below": ("float16", "bfloat16")}, backend_version)
def tensordot(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
axes: Union[int, Tuple[List[int], List[int]]] = 2,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return np.tensordot(x1, x2, axes=axes)
@_scalar_output_to_0d_array
@with_unsupported_dtypes({"1.26.3 and below": ("float16", "bfloat16")}, backend_version)
def trace(
x: np.ndarray,
/,
*,
offset: int = 0,
axis1: int = 0,
axis2: int = 1,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.trace(x, offset=offset, axis1=axis1, axis2=axis2, out=out)
trace.support_native_out = True
def vecdot(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
axis: int = -1,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return np.tensordot(x1, x2, axes=(axis, axis))
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def eig(x: np.ndarray, /, *, out: Optional[np.ndarray] = None) -> Tuple[np.ndarray]:
result_tuple = NamedTuple(
"eig", [("eigenvalues", np.ndarray), ("eigenvectors", np.ndarray)]
)
eigenvalues, eigenvectors = np.linalg.eig(x)
return result_tuple(eigenvalues, eigenvectors)
def vector_norm(
x: np.ndarray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
ord: Union[int, float, Literal[inf, -inf]] = 2,
dtype: Optional[np.dtype] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if dtype and x.dtype != dtype:
x = x.astype(dtype)
abs_x = np.abs(x)
if isinstance(axis, list):
axis = tuple(axis)
if ord == 0:
return np.sum(
(abs_x != 0).astype(abs_x.dtype), axis=axis, keepdims=keepdims, out=out
)
elif ord == inf:
return np.max(abs_x, axis=axis, keepdims=keepdims, out=out)
elif ord == -inf:
return np.min(abs_x, axis=axis, keepdims=keepdims, out=out)
else:
# There is a rounding error whenever the input is a 0-dim
# The solution at the moment is to convert the 0-dim array to 1-dim
# and then convert it back to 0-dim in case of keepdims=True
if x.ndim == 0:
abs_x = np.expand_dims(abs_x, axis=0)
res = (
np.sum(abs_x**ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
).astype(abs_x.dtype)
if keepdims and x.ndim == 0:
res = np.squeeze(res)
return res
# Extra #
# ----- #
def diag(
x: np.ndarray,
/,
*,
k: int = 0,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.diag(x, k=k)
@with_unsupported_dtypes({"1.24.0 and below": ("complex",)}, backend_version)
def vander(
x: np.ndarray,
/,
*,
N: Optional[int] = None,
increasing: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.vander(x, N=N, increasing=increasing).astype(x.dtype)
@with_unsupported_dtypes(
{
"1.26.3 and below": (
"complex",
"unsigned",
)
},
backend_version,
)
def vector_to_skew_symmetric_matrix(
vector: np.ndarray, /, *, out: Optional[np.ndarray] = None
) -> np.ndarray:
batch_shape = list(vector.shape[:-1])
# BS x 3 x 1
vector_expanded = np.expand_dims(vector, -1)
# BS x 1 x 1
a1s = vector_expanded[..., 0:1, :]
a2s = vector_expanded[..., 1:2, :]
a3s = vector_expanded[..., 2:3, :]
# BS x 1 x 1
zs = np.zeros(batch_shape + [1, 1], dtype=vector.dtype)
# BS x 1 x 3
row1 = np.concatenate((zs, -a3s, a2s), -1)
row2 = np.concatenate((a3s, zs, -a1s), -1)
row3 = np.concatenate((-a2s, a1s, zs), -1)
# BS x 3 x 3
return np.concatenate((row1, row2, row3), -2, out=out)
vector_to_skew_symmetric_matrix.support_native_out = True
| ivy/ivy/functional/backends/numpy/linear_algebra.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/linear_algebra.py",
"repo_id": "ivy",
"token_count": 6208
} | 23 |
# global
import math
from typing import Optional, Union
import paddle
import ivy
import ivy.functional.backends.paddle as paddle_backend
from ivy import promote_types_of_inputs
from ivy.func_wrapper import (
with_supported_device_and_dtypes,
with_supported_dtypes,
with_unsupported_device_and_dtypes,
with_unsupported_dtypes,
)
# local
from . import backend_version
def _elementwise_helper(x1, x2):
if (not hasattr(x1, "dtype") or not hasattr(x2, "dtype")) or (x1.dtype != x2.dtype):
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
# the following was needed in versions <=2.4.2 because most functions didn't
# accept 0D inputs along other inputs
# if x1.shape != x2.shape:
# x1, x2 = paddle_backend.broadcast_arrays(x1, x2)
return x1, x2, x1.dtype
@with_unsupported_dtypes(
{"2.6.0 and below": ("int8", "int16", "uint8", "float16", "bool", "bfloat16")},
backend_version,
)
def add(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
alpha: Optional[Union[int, float]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if alpha not in (1, None):
x2 = paddle_backend.multiply(x2, alpha)
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return paddle.add(x1, x2).astype(ret_dtype)
def bitwise_xor(
x1: Union[int, bool, paddle.Tensor],
x2: Union[int, bool, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
return paddle.bitwise_xor(x1, x2)
@with_supported_dtypes(
{
"2.6.0 and below": (
"float16",
"float32",
"float64",
)
},
backend_version,
)
def expm1(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
return paddle.expm1(x)
def bitwise_invert(
x: Union[int, bool, paddle.Tensor], /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return paddle.bitwise_not(x)
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"uint8",
"complex64",
"complex128",
"bool",
)
}
},
backend_version,
)
def isfinite(
x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return paddle.isfinite(x)
@with_unsupported_dtypes(
{"2.6.0 and below": ("complex", "uint8")},
backend_version,
)
def isinf(
x: paddle.Tensor,
/,
*,
detect_positive: bool = True,
detect_negative: bool = True,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if not ivy.is_complex_dtype(x):
if detect_negative and detect_positive:
return paddle.isinf(x)
if detect_negative:
return paddle_backend.equal(x, float("-inf"))
if detect_positive:
return paddle_backend.equal(x, float("inf"))
return paddle.zeros(shape=x.shape, dtype=bool)
@with_unsupported_dtypes(
{"2.6.0 and below": ("bfloat16",)},
backend_version,
)
def equal(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if paddle.is_complex(x1):
real = paddle.equal(x1.real(), x2.real())
imag = paddle.equal(x1.imag(), x2.imag())
return paddle_backend.logical_and(real, imag)
return paddle.equal(x1, x2)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "float32", "float64", "int32", "int64", "complex")},
backend_version,
)
def less_equal(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if isinstance(x1, paddle.Tensor) and isinstance(x2, paddle.Tensor):
if paddle.is_complex(x1) and paddle.is_complex(x2):
real = paddle.less_equal(x1.real(), x2.real())
imag = paddle.less_equal(x1.imag(), x2.imag())
return paddle_backend.logical_and(real, imag)
return paddle.less_equal(x1, x2)
def bitwise_and(
x1: Union[int, bool, paddle.Tensor],
x2: Union[int, bool, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
return paddle.bitwise_and(x1, x2)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "complex")},
backend_version,
)
def ceil(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
return paddle.complex(paddle.ceil(x.real()), paddle.ceil(x.imag()))
return paddle.ceil(x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "complex")},
backend_version,
)
def floor(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
return paddle.complex(paddle.floor(x.real()), paddle.floor(x.imag()))
return paddle.floor(x)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"float32",
"float64",
)
}
},
backend_version,
)
def asin(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
return paddle.asin(x)
@with_supported_dtypes(
{
"2.6.0 and below": (
"float32",
"float64",
)
},
backend_version,
)
def asinh(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
return paddle.asinh(x)
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float16", "float32", "float64", "complex")}},
backend_version,
)
def sign(
x: paddle.Tensor,
/,
*,
np_variant: Optional[bool] = True,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.sgn(x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "complex")}, backend_version
)
def sqrt(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
"""Calculate the square root with type handling."""
if paddle.is_complex(x):
angle = paddle.angle(x)
return paddle.complex(
paddle.cos(angle / 2), paddle.sin(angle / 2)
) * paddle.sqrt(paddle.abs(x))
return paddle.sqrt(x)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"float32",
"float64",
)
}
},
backend_version,
)
def cosh(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
return paddle.cosh(x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "complex")}, backend_version
)
def log10(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
base = paddle.to_tensor(10.0).squeeze()
return paddle_backend.divide(
paddle_backend.log(x), paddle_backend.log(base)
).astype(x.dtype)
return paddle.log10(x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "complex")},
backend_version,
)
def log2(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
base = paddle.to_tensor(2.0).squeeze()
return paddle_backend.divide(
paddle_backend.log(x), paddle_backend.log(base)
).astype(x.dtype)
return paddle.log2(x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "complex")},
backend_version,
)
def log1p(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
return paddle.complex(paddle.log1p(paddle.abs(x)), paddle.angle(x + 1))
return paddle.log1p(x)
@with_supported_dtypes(
{
"2.6.0 and below": (
"float",
"int32",
"int64",
"complex",
)
},
backend_version,
)
def isnan(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
return paddle.logical_or(paddle.isnan(x.real()), paddle.isnan(x.imag()))
return paddle.isnan(x)
@with_unsupported_dtypes(
{
"2.6.0 and below": (
"int8",
"uint8",
)
},
backend_version,
)
def less(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if isinstance(x1, paddle.Tensor) and isinstance(x2, paddle.Tensor):
if paddle.is_complex(x1) and paddle.is_complex(x2):
real = paddle.less_than(x1.real(), x2.real())
imag = paddle.less_than(x1.imag(), x2.imag())
return logical_and(real, imag)
return paddle.less_than(x1, x2)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "int32", "int64", "float32", "float64", "complex")},
backend_version,
)
def multiply(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if isinstance(x1, paddle.Tensor) and isinstance(x2, paddle.Tensor):
if paddle.is_complex(x1) or paddle.is_complex(x2):
a, b = x1.real(), x1.imag()
c, d = x2.real(), x2.imag()
real = a * c - b * d
imag = a * d + b * c
return paddle.complex(real, imag)
return paddle.multiply(x1, x2).astype(ret_dtype)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"float32",
"float64",
)
}
},
backend_version,
)
def cos(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
return paddle.cos(x)
@with_unsupported_dtypes({"2.6.0 and below": ("uint8", "float16")}, backend_version)
def logical_not(
x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if paddle.is_complex(x):
return paddle.logical_and(
paddle.logical_not(x.real()), paddle.logical_not(x.imag())
)
return paddle.logical_not(x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64", "complex")},
backend_version,
)
def divide(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if isinstance(x1, paddle.Tensor) and isinstance(x2, paddle.Tensor):
if paddle.is_complex(x1) or paddle.is_complex(x2):
angle_value = paddle.angle(x1) - paddle.angle(x2)
abs_value = paddle.abs(x1) / paddle.abs(x2)
return paddle.complex(
abs_value * paddle.cos(angle_value), abs_value * paddle.sin(angle_value)
)
x1, x2, _ = _elementwise_helper(x1, x2)
return x1 / x2
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")},
backend_version,
)
def fmin(
x1: paddle.Tensor,
x2: paddle.Tensor,
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if x1.dtype != x2.dtype:
x1, x2 = promote_types_of_inputs(x1, x2)
return paddle.fmin(x1, x2)
def _apply_for_real_and_imag(fn, x1, x2):
return fn(
fn(x1.real(), x2.real()),
fn(x1.imag(), x2.imag()),
)
@with_supported_dtypes(
{
"2.6.0 and below": (
"bool",
"float32",
"float64",
"int16",
"int32",
"int64",
"complex",
)
},
backend_version,
)
def greater(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if isinstance(x1, paddle.Tensor) and isinstance(x2, paddle.Tensor):
if paddle.is_complex(x1) and paddle.is_complex(x2):
real = paddle.greater_than(x1.real(), x2.real())
imag = paddle.greater_than(x1.imag(), x2.imag())
return paddle.logical_and(real, imag)
return paddle.greater_than(x1, x2)
@with_supported_dtypes(
{
"2.6.0 and below": (
"bool",
"float32",
"float64",
"int16",
"int32",
"int64",
"complex",
)
},
backend_version,
)
def greater_equal(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if isinstance(x1, paddle.Tensor) and isinstance(x2, paddle.Tensor):
if paddle.is_complex(x1) and paddle.is_complex(x2):
real = paddle.greater_equal(x1.real(), x2.real())
imag = paddle.greater_equal(x1.imag(), x2.imag())
return paddle.logical_and(real, imag)
return paddle.greater_equal(x1, x2)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"float32",
"float64",
"complex",
)
}
},
backend_version,
)
def acos(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
# From https://github.com/python/cpython/blob/39ef93edb9802dccdb6555d4209ac2e60875a011/Modules/cmathmodule.c#L178 # noqa
s1 = paddle_backend.sqrt(1 - x)
s2 = paddle_backend.sqrt(1 + x)
return paddle.complex(
2.0 * paddle.atan2(s1.real(), s2.real()),
paddle.asinh(s2.real() * s1.imag() - s2.imag() * s1.real()),
)
return paddle.acos(x)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("bool", "float32", "int32", "float64", "int64", "complex")
}
},
backend_version,
)
def logical_xor(
x1: paddle.Tensor, x2: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if paddle.is_complex(x1):
x1 = paddle.cast(x1, paddle.bool)
x2 = paddle.cast(x2, paddle.bool)
return paddle.logical_xor(x1, x2)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("bool", "float32", "int32", "float64", "int64", "complex")
}
},
backend_version,
)
def logical_and(
x1: paddle.Tensor, x2: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if paddle.is_complex(x1):
return _apply_for_real_and_imag(paddle.logical_and, x1, x2)
return paddle.logical_and(x1, x2)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "float32", "int32", "float64", "int64", "complex")},
backend_version,
)
def logical_or(
x1: paddle.Tensor, x2: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if paddle.is_complex(x1):
return _apply_for_real_and_imag(paddle.logical_or, x1, x2)
return paddle.logical_or(x1, x2)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"float32",
"float64",
"complex",
)
}
},
backend_version,
)
def acosh(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
# From https://github.com/python/cpython/blob/39ef93edb9802dccdb6555d4209ac2e60875a011/Modules/cmathmodule.c#L221 # noqa
s1 = paddle_backend.sqrt(paddle.complex(x.real() - 1, x.imag()))
s2 = paddle_backend.sqrt(paddle.complex(x.real() + 1, x.imag()))
return paddle.complex(
paddle.asinh(s1.real() * s2.real() + s1.imag() * s2.imag()),
2.0 * paddle.atan2(s1.imag(), s2.real()),
)
return paddle.acosh(x)
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64", "complex")}},
backend_version,
)
def sin(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
re = x.real()
im = x.imag()
return paddle.complex(
paddle.sin(re) * paddle.cosh(im), paddle.cos(re) * paddle.sinh(im)
)
return paddle.sin(x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int8", "int16", "int32", "int64")},
backend_version,
)
def negative(
x: Union[float, paddle.Tensor], /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return paddle.neg(x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")},
backend_version,
)
def not_equal(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.logical_not(paddle_backend.equal(x1, x2))
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64", "complex")}},
backend_version,
)
def tanh(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
tanh_a = paddle.tanh(x.real())
tan_b = paddle.tan(x.imag())
return paddle.divide(
paddle.complex(tanh_a, tan_b),
paddle.complex(
paddle.ones_like(tanh_a),
paddle.multiply(tanh_a, tan_b),
),
)
return paddle.tanh(x)
@with_supported_dtypes(
{
"2.6.0 and below": (
"uint8",
"int8",
"int32",
"int64",
"float32",
"float64",
"float16",
"bfloat16",
)
},
backend_version,
)
def floor_divide(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
return paddle.floor_divide(x1, x2)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "uint8", "int8", "int16", "int32", "int64")},
backend_version,
)
def bitwise_or(
x1: Union[int, bool, paddle.Tensor],
x2: Union[int, bool, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
return paddle.bitwise_or(x1, x2)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "complex")}, backend_version
)
def sinh(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
re = x.real()
im = x.imag()
return paddle.complex(
paddle.sinh(re) * paddle.cos(im), paddle.cosh(re) * paddle.sin(im)
)
return paddle.sinh(x)
def positive(
x: Union[float, paddle.Tensor], /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if not isinstance(x, paddle.Tensor):
x = paddle.to_tensor(
x, dtype=ivy.default_dtype(item=x, as_native=True)
).squeeze()
return x.clone()
@with_supported_dtypes(
{
"2.6.0 and below": (
"int32",
"int64",
"float32",
"float64",
"complex",
)
},
backend_version,
)
def square(
x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return paddle.square(x)
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64", "int32", "int64", "complex")}},
backend_version,
)
def pow(
x1: paddle.Tensor,
x2: Union[int, float, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if paddle.is_complex(x1):
# https://math.stackexchange.com/questions/476968/complex-power-of-a-complex-number
r = paddle.abs(x1)
theta = paddle.angle(x1)
res_mag = paddle.pow(r, x2.real()) / paddle.exp(x2.imag() * theta)
res_ang = paddle.log(r) * x2.imag() + theta * x2.real()
result = res_mag * paddle.complex(paddle.cos(res_ang), paddle.sin(res_ang))
return result.astype(ret_dtype)
return paddle.pow(x1, x2)
# Implementation based on TensorFlow's scalar_round_half_to_even_op logic
# Reference: https://github.com/tensorflow/tensorflow/blob/7f1050a6976d11bfb0bb37bdfc82350c0a238faa/tensorflow/core/kernels/cwise_ops.h#L510 # noqa: E501
def _round_half_to_even(x):
round_val = paddle_backend.floor(x + 0.5)
fraction = round_val - x
# Identify elements with a fractional part of 0.5
mask = paddle_backend.equal(fraction, paddle.to_tensor(0.5, dtype=fraction.dtype))
# Round to the nearest even number if the fraction is 0.5
even_round_val = 2 * paddle_backend.floor(0.5 * x + 0.5)
# Combine the results
return paddle.where(mask, even_round_val, round_val)
# This function aims to mimic the behavior of np.round similar to how tf.experimental.numpy.round does # noqa: E501
# Reference for tf.experimental.numpy.round:https://github.com/tensorflow/tensorflow/blob/v2.13.0/tensorflow/python/ops/numpy_ops/np_array_ops.py#L724 # noqa: E501
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("bfloat16", "float16", "complex")}}, backend_version
)
def round(
x: paddle.Tensor, /, *, decimals: int = 0, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
x = paddle.to_tensor(x, dtype=x.dtype)
dtype_ = x.dtype
factor = math.pow(10, decimals)
factor = paddle.to_tensor(factor)
# Handle floating point and complex numbers
if paddle.is_floating_point(x) or paddle.is_complex(x):
factor = paddle.to_tensor(factor)
factor = paddle.cast(factor, dtype_)
else:
float_dtype_ = paddle.float32 # paddle.get_default_dtype()
x = x.astype(float_dtype_)
factor = paddle.cast(factor, float_dtype_)
x = paddle.multiply(x, factor)
x = _round_half_to_even(x)
x = paddle.divide(x, factor)
return x.astype(dtype_)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "complex")}, backend_version
)
def trunc(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
return paddle.complex(paddle.trunc(x.real()), paddle.trunc(x.imag()))
return paddle.trunc(x)
@with_supported_dtypes({"2.6.0 and below": ("float64", "float32")}, backend_version)
def trapz(
y: paddle.Tensor,
/,
*,
x: Optional[paddle.Tensor] = None,
dx: Optional[float] = 1.0,
axis: Optional[int] = -1,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if x is None:
d = dx
else:
if x.ndim == 1:
d = paddle.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = paddle.diff(x, axis=axis)
slice1 = [slice(None)] * y.ndim
slice2 = [slice(None)] * y.ndim
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
with ivy.ArrayMode(False):
if y.shape[axis] < 2:
return ivy.zeros_like(ivy.squeeze(y, axis=axis))
ret = ivy.sum(
ivy.divide(
ivy.multiply(
d,
ivy.add(
ivy.get_item(y, tuple(slice1)), ivy.get_item(y, tuple(slice2))
),
),
2.0,
),
axis=axis,
)
return ret
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64", "int32", "int64", "complex")}},
backend_version,
)
def abs(
x: Union[float, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if not isinstance(x, paddle.Tensor):
x = paddle.to_tensor(x, dtype=ivy.default_dtype(item=x)).squeeze()
return paddle.abs(x)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float16",)}}, backend_version
)
def logaddexp(
x1: paddle.Tensor, x2: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
amax = paddle_backend.maximum(x1, x2)
return amax + paddle_backend.log(
paddle_backend.exp(x1 - amax) + paddle_backend.exp(x2 - amax)
).astype(ret_dtype)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float16",)}}, backend_version
)
def logaddexp2(
x1: Union[paddle.Tensor, float, list, tuple],
x2: Union[paddle.Tensor, float, list, tuple],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
with ivy.ArrayMode(False):
return ivy.log2(ivy.exp2(x1) + ivy.exp2(x2))
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"int32",
"int64",
"uint8",
"float16",
"float32",
"float64",
"bool",
)
}
},
backend_version,
)
def real(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
return paddle.real(x)
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64", "complex")}},
backend_version,
)
def tan(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
tanh_ix = paddle_backend.tanh(paddle.complex(-x.imag(), x.real()))
return paddle.complex(tanh_ix.imag(), -tanh_ix.real())
return paddle.tan(x)
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64", "complex")}},
backend_version,
)
def atan(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if x.dtype in [paddle.complex64, paddle.complex128]:
atanh_iz = paddle_backend.atanh(paddle.complex(-x.imag(), x.real()))
return paddle.complex(atanh_iz.imag(), -atanh_iz.real())
return paddle.atan(x)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int32",
"int64",
"float32",
"float64",
)
}
},
backend_version,
)
def atan2(
x1: paddle.Tensor, x2: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
return paddle.atan2(x1, x2).astype(ret_dtype)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "complex")},
backend_version,
)
def log(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
return paddle.complex(paddle.log(paddle.abs(x)), paddle.angle(x))
return paddle.log(x)
@with_supported_dtypes(
{"2.6.0 and below": ("int32", "int64", "float32", "float64", "complex")},
backend_version,
)
def exp(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
return paddle.multiply(
paddle.exp(x.real()),
paddle.complex(paddle.cos(x.imag()), paddle.sin(x.imag())),
)
return paddle.exp(x)
@with_supported_dtypes(
{"2.6.0 and below": ("int32", "int64", "float32", "float64", "complex")},
backend_version,
)
def exp2(
x: Union[paddle.Tensor, float, list, tuple],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
with ivy.ArrayMode(False):
return ivy.pow(2, x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, backend_version
)
def subtract(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
alpha: Optional[Union[int, float]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if alpha not in (1, None):
x2 = paddle_backend.multiply(x2, alpha)
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return paddle.subtract(x1, x2).astype(ret_dtype)
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64", "int32", "int64")}},
backend_version,
)
def remainder(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
modulus: bool = True,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if not modulus:
res = paddle_backend.divide(x1, x2)
res_floored = paddle_backend.where(
paddle_backend.greater_equal(res, 0.0),
paddle_backend.floor(res),
paddle_backend.ceil(res),
)
diff = paddle_backend.subtract(res, res_floored).astype(res.dtype)
return paddle_backend.round(paddle_backend.multiply(diff, x2)).astype(x1.dtype)
return paddle.remainder(x1, x2).astype(ret_dtype)
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64", "complex")}},
backend_version,
)
def atanh(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
return 0.5 * (paddle_backend.log(1 + x) - paddle_backend.log(1 - x))
return paddle.atanh(x)
def bitwise_right_shift(
x1: Union[int, bool, paddle.Tensor],
x2: Union[int, bool, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
return paddle.floor(x1.astype("float64") / 2 ** x2.astype("float64")).astype(
ret_dtype
)
def bitwise_left_shift(
x1: Union[int, bool, paddle.Tensor],
x2: Union[int, bool, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
return paddle.floor(x1.astype("float64") * 2 ** x2.astype("float64")).astype(
ret_dtype
)
# Extra #
# ------#
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, backend_version)
def erf(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
return paddle.erf(x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64", "complex")},
backend_version,
)
def minimum(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
use_where: bool = True,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if paddle.is_complex(x1):
real_comparison = paddle.real(x1) < paddle.real(x2)
imag_comparison = paddle_backend.logical_and(
paddle.real(x1) == paddle.real(x2), paddle.imag(x1) < paddle.imag(x2)
)
return paddle_backend.where(
paddle_backend.logical_or(real_comparison, imag_comparison), x1, x2
).astype(ret_dtype)
if use_where:
return paddle_backend.where(paddle_backend.less_equal(x1, x2), x1, x2).astype(
ret_dtype
)
return paddle.minimum(x1, x2).astype(ret_dtype)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64", "complex")},
backend_version,
)
def maximum(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
use_where: bool = True,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if paddle.is_complex(x1):
real_comparison = paddle.real(x1) > paddle.real(x2)
imag_comparison = paddle_backend.logical_and(
paddle.real(x1) == paddle.real(x2), paddle.imag(x1) > paddle.imag(x2)
)
return paddle_backend.where(
paddle_backend.logical_or(real_comparison, imag_comparison), x1, x2
).astype(ret_dtype)
if use_where:
return paddle_backend.where(
paddle_backend.greater_equal(x1, x2), x1, x2
).astype(ret_dtype)
return paddle.maximum(x1, x2).astype(ret_dtype)
@with_supported_dtypes(
{
"2.6.0 and below": (
"float32",
"float64",
)
},
backend_version,
)
def reciprocal(
x: Union[float, paddle.Tensor], /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return paddle.reciprocal(x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, backend_version
)
def deg2rad(
x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return paddle.deg2rad(x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, backend_version
)
def rad2deg(
x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return paddle.rad2deg(x)
def trunc_divide(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle_backend.trunc(paddle_backend.divide(x1, x2))
def isreal(
x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if paddle.is_complex(x):
return paddle.logical_not(x.imag().astype(bool))
else:
return paddle.ones_like(x, dtype="bool")
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64", "complex")},
backend_version,
)
def fmod(
x1: paddle.Tensor,
x2: paddle.Tensor,
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
res = paddle_backend.remainder(paddle_backend.abs(x1), paddle_backend.abs(x2))
return paddle_backend.where(paddle_backend.less(x1, 0), -res, res)
@with_supported_dtypes({"2.6.0 and below": ("int32", "int64")}, backend_version)
def lcm(
x1: paddle.Tensor,
x2: paddle.Tensor,
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.lcm(x1, x2)
@with_supported_dtypes(
{
"2.6.0 and below": (
"float32",
"float64",
"complex",
)
},
backend_version,
)
def angle(
input: paddle.Tensor,
/,
*,
deg: Optional[bool] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
result = paddle.angle(input)
if deg:
result = paddle.rad2deg(result)
return result
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("int32", "int64")}}, backend_version
)
def gcd(
x1: Union[paddle.Tensor, int, list, tuple],
x2: Union[paddle.Tensor, float, list, tuple],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2 = promote_types_of_inputs(x1, x2)
return paddle.gcd(x1, x2)
@with_supported_dtypes({"2.6.0 and below": ("complex",)}, backend_version)
def imag(
val: paddle.Tensor,
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.imag(val)
def nan_to_num(
x: paddle.Tensor,
/,
*,
copy: Optional[bool] = True,
nan: Optional[Union[float, int]] = 0.0,
posinf: Optional[Union[float, int]] = None,
neginf: Optional[Union[float, int]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
with ivy.ArrayMode(False):
if ivy.is_int_dtype(x):
if posinf is None:
posinf = ivy.iinfo(x).max
if neginf is None:
neginf = ivy.iinfo(x).min
elif ivy.is_float_dtype(x) or ivy.is_complex_dtype(x):
if posinf is None:
posinf = ivy.finfo(x).max
if neginf is None:
neginf = ivy.finfo(x).min
ret = ivy.where(ivy.isnan(x), paddle.to_tensor(nan, dtype=x.dtype), x)
ret = ivy.where(
ivy.logical_and(ivy.isinf(ret), ret > 0),
paddle.to_tensor(posinf, dtype=x.dtype),
ret,
)
ret = ivy.where(
ivy.logical_and(ivy.isinf(ret), ret < 0),
paddle.to_tensor(neginf, dtype=x.dtype),
ret,
)
if copy:
return ret.clone()
else:
x = ret
return x
| ivy/ivy/functional/backends/paddle/elementwise.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/elementwise.py",
"repo_id": "ivy",
"token_count": 17923
} | 24 |
# global
from typing import Optional, Union, Sequence
import paddle
from ivy import with_unsupported_device_and_dtypes
from ivy.functional.backends.paddle import backend_version
from ivy.utils.exceptions import IvyNotImplementedException
from ivy.functional.ivy.random import _check_bounds_and_get_shape
# local
import ivy
from paddle.device import core
from ivy import with_supported_device_and_dtypes
# dirichlet
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"uint8",
"float16",
"complex64",
"complex128",
"bool",
)
}
},
backend_version,
)
def dirichlet(
alpha: Union[paddle.Tensor, float, Sequence[float]],
/,
*,
size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
out: Optional[paddle.Tensor] = None,
seed: Optional[int] = None,
dtype: Optional[paddle.dtype] = None,
) -> paddle.Tensor:
size = size if size is not None else len(alpha)
dtype = dtype if dtype is not None else paddle.float64
if seed is not None:
paddle.seed(seed)
res = paddle.to_tensor(
paddle.distribution.Dirichlet(concentration=alpha).sample(shape=size),
dtype=dtype,
)
return res
# beta
def beta(
alpha: Union[float, paddle.Tensor],
beta: Union[float, paddle.Tensor],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,
device: core.Place = None,
seed: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if seed is not None:
paddle.seed(seed)
shape = _check_bounds_and_get_shape(alpha, beta, shape)
dtype = paddle.float32 if dtype is None else dtype
beta = paddle.cast(beta, alpha.dtype)
dist = paddle.distribution.Beta(alpha, beta)
sample = dist.sample(shape)
sample = paddle.cast(sample, dtype)
return sample
def gamma(
alpha: Union[float, paddle.Tensor],
beta: Union[float, paddle.Tensor],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,
device: core.Place = None,
seed: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
raise IvyNotImplementedException()
def poisson(
lam: Union[float, paddle.Tensor],
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: core.Place = None,
dtype: paddle.dtype = None,
seed: Optional[int] = None,
fill_value: Optional[Union[float, int]] = 0,
out: Optional[paddle.Tensor] = None,
):
raise IvyNotImplementedException()
# bernoulli
@with_supported_device_and_dtypes(
{
"2.5.0 and above": {
"cpu": ("float32", "float64"),
"gpu": ("bfloat16", "float16", "float32", "float64"),
},
"2.4.2 and below": {
"cpu": (
"float32",
"float64",
),
"gpu": ("float16", "float32", "float64"),
},
},
backend_version,
)
def bernoulli(
probs: Union[float, paddle.Tensor],
*,
logits: Union[float, paddle.Tensor] = None,
shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None,
device: core.Place = None,
dtype: paddle.dtype,
seed: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
dtype = dtype if dtype is not None else probs.dtype
if seed is not None:
paddle.seed(seed)
if probs is not None:
probs = probs
elif logits is not None:
probs = ivy.softmax(logits)
probs = paddle.cast(probs, dtype)
squeeze = len(probs.shape) == 0
probs = paddle.unsqueeze(probs, 0) if squeeze else probs
probs = paddle.maximum(probs, paddle.full_like(probs, 1e-6))
sample = paddle.bernoulli(probs)
sample = paddle.squeeze(sample, 0) if squeeze else sample
return sample
| ivy/ivy/functional/backends/paddle/experimental/random.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/experimental/random.py",
"repo_id": "ivy",
"token_count": 1787
} | 25 |
# global
import paddle
from typing import Optional, Union
# local
import ivy
from ivy.func_wrapper import with_unsupported_device_and_dtypes, with_supported_dtypes
from . import backend_version
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, backend_version
)
def argsort(
x: paddle.Tensor,
/,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.argsort(x, axis=axis, descending=descending)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, backend_version
)
def sort(
x: paddle.Tensor,
/,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.sort(x, axis=axis, descending=descending)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, backend_version
)
def searchsorted(
x: paddle.Tensor,
v: paddle.Tensor,
/,
*,
side="left",
sorter=None,
ret_dtype=paddle.int64,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
right = True if side == "right" else False
assert ivy.is_int_dtype(ret_dtype), TypeError(
"only Integer data types are supported for ret_dtype."
)
if sorter is not None:
assert ivy.is_int_dtype(sorter.dtype), TypeError(
f"Only signed integer data type for sorter is allowed, got {sorter.dtype}."
)
if ivy.as_native_dtype(sorter.dtype) not in [paddle.int32, paddle.int64]:
sorter = sorter.cast(paddle.int64)
x = paddle.take_along_axis(x, sorter, axis=-1)
if x.ndim != 1:
assert x.shape[:-1] == v.shape[:-1], RuntimeError(
"the first N-1 dimensions of x array and v array "
f"must match, got {x.shape} and {v.shape}"
)
return paddle.searchsorted(x, v, right=right).cast(ret_dtype)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("int8", "uint8", "int16", "float16", "complex")}},
backend_version,
)
def msort(
a: Union[paddle.Tensor, list, tuple], /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return paddle.sort(a, axis=0)
| ivy/ivy/functional/backends/paddle/sorting.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/sorting.py",
"repo_id": "ivy",
"token_count": 986
} | 26 |
import operator
from typing import Union, Optional, Tuple, List, Sequence
from numbers import Number
import tensorflow as tf
from tensorflow.python.ops.numpy_ops import np_math_ops
# local
import ivy
from ivy import promote_types_of_inputs
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from .. import backend_version
@with_unsupported_dtypes(
{
"2.13.0 and below": (
"complex64",
"complex128",
)
},
backend_version,
)
def amax(
x: tf.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[tf.Tensor] = None,
) -> tf.Tensor:
axis = tuple(axis) if isinstance(axis, list) else axis
return tf.experimental.numpy.amax(x, axis=axis, keepdims=keepdims)
@with_unsupported_dtypes(
{
"2.13.0 and below": (
"complex64",
"complex128",
)
},
backend_version,
)
def amin(
x: tf.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[tf.Tensor] = None,
) -> tf.Tensor:
axis = tuple(axis) if isinstance(axis, list) else axis
return tf.experimental.numpy.amin(x, axis=axis, keepdims=keepdims)
@with_supported_dtypes(
{"2.15.0 and below": ("float16", "float32", "float64")},
backend_version,
)
def lgamma(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.lgamma(x)
def sinc(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x = ivy.pi * x
return tf.cast(tf.where(x == 0, 1, tf.math.sin(x) / x), x.dtype)
@with_supported_dtypes(
{"2.15.0 and below": ("bfloat16", "float16", "float32", "float64")}, backend_version
)
def fmax(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = promote_types_of_inputs(x1, x2)
x1 = tf.where(tf.math.is_nan(x1), x2, x1)
x2 = tf.where(tf.math.is_nan(x2), x1, x2)
return tf.experimental.numpy.maximum(x1, x2)
@with_unsupported_dtypes(
{"2.15.0 and below": ("uint8", "uint16", "uint32", "uint64")}, backend_version
)
def float_power(
x1: Union[tf.Tensor, tf.Variable, float, list, tuple],
x2: Union[tf.Tensor, tf.Variable, float, list, tuple],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
if ivy.any(ivy.is_complex_dtype(x1)) or ivy.any(ivy.is_complex_dtype(x2)):
out_dtype = tf.complex128
else:
out_dtype = tf.float64
return tf.cast(tf.experimental.numpy.float_power(x1, x2), out_dtype)
def copysign(
x1: Union[tf.Tensor, tf.Variable, Number],
x2: Union[tf.Tensor, tf.Variable, Number],
/,
*,
out: Optional[tf.Tensor] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = promote_types_of_inputs(x1, x2)
# Cast our inputs to float to match numpy behaviour
if not ivy.is_float_dtype(x1):
x1 = tf.cast(x1, ivy.default_float_dtype(as_native=True))
x2 = tf.cast(x2, ivy.default_float_dtype(as_native=True))
# Replace any zero values with 1/the value, since tf.math.sign always
# returns 0 for positive or negative zero
signable_x2 = tf.where(tf.equal(x2, 0), tf.math.divide(1, x2), x2)
signs = tf.math.sign(signable_x2)
return tf.math.multiply(tf.math.abs(x1), signs)
def count_nonzero(
a: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
dtype: Optional[tf.DType] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if dtype is None:
return tf.math.count_nonzero(a, axis=axis, keepdims=keepdims, name=None)
return tf.math.count_nonzero(
a, axis=axis, keepdims=keepdims, dtype=dtype, name=None
)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def nansum(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[Tuple[int, ...], int]] = None,
dtype: Optional[tf.DType] = None,
keepdims: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
np_math_ops.enable_numpy_methods_on_tensor()
return tf.experimental.numpy.nansum(x, axis=axis, dtype=dtype, keepdims=keepdims)
def isclose(
a: Union[tf.Tensor, tf.Variable],
b: Union[tf.Tensor, tf.Variable],
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.experimental.numpy.isclose(
a, b, rtol=rtol, atol=atol, equal_nan=equal_nan
)
def signbit(
x: Union[tf.Tensor, tf.Variable, float, int, list, tuple],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.experimental.numpy.signbit(x)
def hypot(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.sqrt(tf.math.square(x1) + tf.math.square(x2))
def allclose(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> bool:
return tf.experimental.numpy.allclose(
x1, x2, rtol=rtol, atol=atol, equal_nan=equal_nan
)
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16",)}, backend_version)
def fix(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.cast(tf.where(x > 0, tf.math.floor(x), tf.math.ceil(x)), x.dtype)
@with_unsupported_dtypes({"2.15.0 and below": ("bflaot16", "float16")}, backend_version)
def nextafter(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.experimental.numpy.nextafter(x1, x2)
@with_unsupported_dtypes(
{"2.15.0 and below": ("uint8", "uint16", "uint32", "uint64")}, backend_version
)
def diff(
x: Union[tf.Tensor, tf.Variable, list, tuple],
/,
*,
n: int = 1,
axis: int = -1,
prepend: Optional[Union[tf.Tensor, tf.Variable, int, float, list, tuple]] = None,
append: Optional[Union[tf.Tensor, tf.Variable, int, float, list, tuple]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if n == 0:
return x
if prepend is not None:
x = tf.experimental.numpy.append(prepend, x, axis=axis if axis != -1 else None)
if append is not None:
x = tf.experimental.numpy.append(x, append, axis=axis if axis != -1 else None)
return tf.experimental.numpy.diff(x, n=n, axis=axis)
@with_supported_dtypes(
{
"2.15.0 and below": (
"float32",
"float64",
)
},
backend_version,
)
def zeta(
x: Union[tf.Tensor, tf.Variable],
q: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.zeta(x, q)
def _normalize_axis_index(ax: int, ndim: int) -> int:
if ax >= ndim or ax < -ndim:
raise ValueError("axis index is out of range")
return (ax + ndim) % ndim
def _normalize_axis_tuple(axis: Union[int, list, tuple], ndim: int) -> Tuple[int, ...]:
if type(axis) not in (tuple, list):
try:
axis = [operator.index(axis)]
except TypeError:
pass
axis = tuple(_normalize_axis_index(ax, ndim) for ax in axis)
if len(set(axis)) != len(axis):
raise ValueError("repeated axis")
return axis
def gradient(
x: tf.Tensor,
/,
*,
spacing: Union[int, list, tuple] = 1,
axis: Optional[Union[int, list, tuple]] = None,
edge_order: int = 1,
) -> Union[tf.Tensor, List[tf.Tensor]]:
# https://github.com/numpy/numpy/blob/v1.24.3/numpy/lib/function_base.py#L969-L1312
x = tf.experimental.numpy.asanyarray(x)
N = x.ndim # number of dimensions
if axis is None:
axes = tuple(range(N))
else:
axes = _normalize_axis_tuple(axis, N)
len_axes = len(axes)
n = (
-1
if spacing is None
else (0 if type(spacing) in (int, float) else len(spacing))
)
if n == -1:
# no spacing argument - use 1 in all axes
dx = [1.0] * len_axes
elif n == 0:
dx = [spacing] * len_axes
elif n == 1 and tf.experimental.numpy.ndim(spacing[0]) == 0:
# single scalar for all axes
dx = spacing * len_axes
elif n == len_axes:
# scalar or 1d array for each axis
dx = list(spacing)
for i, distances in enumerate(dx):
distances = tf.experimental.numpy.asanyarray(distances)
if distances.ndim == 0:
continue
elif distances.ndim != 1:
raise ValueError("distances must be either scalars or 1d")
if len(distances) != x.shape[axes[i]]:
raise ValueError(
"when 1d, distances must match the length of the corresponding"
f" dimension {len(distances)} {x.shape[axes[i]]}"
)
if distances.dtype.is_integer:
# Convert numpy integer types to float64 to avoid modular
# arithmetic in np.diff(distances).
distances = tf.cast(distances, tf.float64)
diffx = tf.experimental.numpy.diff(distances)
# if distances are constant reduce to the scalar case
# since it brings a consistent speedup
# cmp = diffx == diffx[0]
if tf.reduce_all(tf.equal(diffx, diffx[0])):
diffx = diffx[0]
# if tf.reduce_sum(tf.cast(cmp, tf.int32)) == cmp.numel():
# print(diffx, (diffx == diffx[0]))
# diffx = diffx[0]
dx[i] = diffx
else:
raise TypeError("invalid number of arguments")
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)] * N
slice2 = [slice(None)] * N
slice3 = [slice(None)] * N
slice4 = [slice(None)] * N
if x.dtype.is_integer:
x = tf.cast(x, tf.float64)
for axis, ax_dx in zip(axes, dx):
if x.shape[axis] < edge_order + 1:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least (edge_order + 1) elements are required."
)
# result allocation
out = x.numpy()
# spacing for the current axis
uniform_spacing = tf.experimental.numpy.ndim(ax_dx) == 0
# Numerical differentiation: 2nd order interior
slice1[axis] = slice(1, -1)
slice2[axis] = slice(None, -2)
slice3[axis] = slice(1, -1)
slice4[axis] = slice(2, None)
if uniform_spacing:
out[tuple(slice1)] = (x[tuple(slice4)] - x[tuple(slice2)]) / (2.0 * ax_dx)
else:
dx1 = ax_dx[0:-1]
dx2 = ax_dx[1:]
a = -(dx2) / (dx1 * (dx1 + dx2))
b = (dx2 - dx1) / (dx1 * dx2)
c = dx1 / (dx2 * (dx1 + dx2))
out[tuple(slice1)] = (
a * x[tuple(slice2)] + b * x[tuple(slice3)] + c * x[tuple(slice4)]
)
# Numerical differentiation: 1st order edges
if edge_order == 1:
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
dx_0 = ax_dx if uniform_spacing else ax_dx[0]
# 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
out[tuple(slice1)] = (x[tuple(slice2)] - x[tuple(slice3)]) / dx_0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
dx_n = ax_dx if uniform_spacing else ax_dx[-1]
# 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
out[tuple(slice1)] = (x[tuple(slice2)] - x[tuple(slice3)]) / dx_n
# Numerical differentiation: 2nd order edges
else:
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
if uniform_spacing:
a = -1.5 / ax_dx
b = 2.0 / ax_dx
c = -0.5 / ax_dx
else:
dx1 = ax_dx[0]
dx2 = ax_dx[1]
a = -(2.0 * dx1 + dx2) / (dx1 * (dx1 + dx2))
b = (dx1 + dx2) / (dx1 * dx2)
c = -dx1 / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]
out[tuple(slice1)] = (
a * x[tuple(slice2)] + b * x[tuple(slice3)] + c * x[tuple(slice4)]
)
slice1[axis] = -1
slice2[axis] = -3
slice3[axis] = -2
slice4[axis] = -1
if uniform_spacing:
a = 0.5 / ax_dx
b = -2.0 / ax_dx
c = 1.5 / ax_dx
else:
dx1 = ax_dx[-2]
dx2 = ax_dx[-1]
a = (dx2) / (dx1 * (dx1 + dx2))
b = -(dx2 + dx1) / (dx1 * dx2)
c = (2.0 * dx2 + dx1) / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
out[tuple(slice1)] = (
a * x[tuple(slice2)] + b * x[tuple(slice3)] + c * x[tuple(slice4)]
)
outvals.append(tf.convert_to_tensor(out))
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len_axes == 1:
return outvals[0]
else:
return outvals
@with_supported_dtypes(
{
"2.15.0 and below": (
"float16",
"float32",
"float64",
"complex64",
"complex128",
)
},
backend_version,
)
def xlogy(
x: Union[tf.Tensor, tf.Variable],
y: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x, y = promote_types_of_inputs(x, y)
return tf.math.xlogy(x, y)
def conj(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.conj(x)
@with_unsupported_dtypes({"2.15.0 and below": ("unsigned",)}, backend_version)
def ldexp(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable, int],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
out_dtype = x1.dtype
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
if tf.math.reduce_any(tf.math.less(x2, 0)):
pos_exp = tf.cast(tf.math.greater_equal(x2, 0), x2.dtype) * x2
neg_exp = tf.cast(tf.math.less(x2, 0), x2.dtype) * x2
ret = tf.cast(x1, pos_exp.dtype) * tf.math.pow(2, pos_exp)
ret = tf.cast(ret, neg_exp.dtype) / tf.math.pow(2, -neg_exp)
else:
x2 = tf.cast(x2, x1.dtype)
ret = x1 * tf.math.pow(2, x2)
return tf.cast(ret, out_dtype)
@with_unsupported_dtypes({"2.15.0 and below": ("unsigned",)}, backend_version)
def frexp(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[
Union[Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Variable, tf.Variable]]
] = None,
) -> Union[Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Variable, tf.Variable]]:
e = tf.math.floor(tf.math.log(tf.math.abs(x)) / tf.cast(tf.math.log(2.0), x.dtype))
e = tf.cast(e, x.dtype)
while tf.reduce_any(tf.abs(x / tf.math.pow(2, e)) >= 1):
e += tf.cast(tf.abs(x / tf.math.pow(2, e)) >= 1, e.dtype)
m = x / tf.math.pow(2, e)
e = tf.cast(e, tf.int32)
return m, e
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def modf(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Tuple[Union[tf.Tensor, tf.Variable], Union[tf.Tensor, tf.Variable]]:
integer_part = tf.math.floor(x)
fractional_part = x - integer_part
return fractional_part, integer_part
def digamma(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.digamma(x)
def erfc(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.erfc(x)
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def erfinv(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.erfinv(x)
| ivy/ivy/functional/backends/tensorflow/experimental/elementwise.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/experimental/elementwise.py",
"repo_id": "ivy",
"token_count": 8707
} | 27 |
"""Tensorflow gradient functions.
Collection of TensorFlow gradient functions, wrapped to fit Ivy syntax
and signature.
"""
# global
import tensorflow as tf
from typing import Sequence, Union, Optional, Callable
# local
import ivy
from ivy.func_wrapper import outputs_to_ivy_arrays, inputs_to_native_arrays
from ivy.functional.ivy.gradients import (
_get_required_float_variables,
_get_y_and_ret_idxs,
_get_native_y,
_set_duplicates,
_process_func_ret_and_grads,
)
def variable(x, /):
with tf.device(ivy.dev(x, as_native=True)):
return tf.Variable(x, trainable=True)
def is_variable(x, /, *, exclusive=False):
return isinstance(x, tf.Variable)
def variable_data(x: tf.Variable, /) -> tf.Variable:
return x.value()
def _grad_func(y, xs, xs_required, tape):
"""Gradient calculation function."""
# Creating a zero gradient nest for the case where no gradients are computed
grads_ = ivy.nested_map(
lambda x: ivy.to_native(ivy.zeros_like(x)),
xs_required,
include_derived=True,
shallow=False,
)
# Gradient calculation
grads = tape.gradient(y, xs_required)
# Returning zeros if no gradients are computed for consistent results
if isinstance(xs, ivy.NativeArray):
grads = grads_ if grads is None else grads
else:
grads = ivy.nested_map(
lambda x: 0 if x is None else x,
grads,
include_derived=True,
)
if isinstance(grads, ivy.Container):
grads += grads_
else:
grads = ivy.nested_multi_map(lambda x, _: (x[0] + x[1]), [grads, grads_])
return grads
def execute_with_gradients(
func,
xs: Union[tf.Tensor, tf.Variable],
/,
*,
retain_grads: bool = False,
xs_grad_idxs: Sequence[Sequence[Union[str, int]]] = ((0,),),
ret_grad_idxs: Sequence[Sequence[Union[str, int]]] = ((0,),),
):
# Conversion of required arrays to float variables and duplicate index chains
xs, xs_grad_idxs, xs_required, required_duplicate_index_chains, _ = (
_get_required_float_variables(xs, xs_grad_idxs)
)
# Creating a tape to record operations
with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape:
tape.watch(xs_required)
func_ret = func(xs)
# Getting the relevant outputs from the function return for gradient calculation
ret_grad_idxs, y, ret_idxs = _get_y_and_ret_idxs(
func_ret, ret_grad_idxs, reshape=False
)
if isinstance(y, ivy.NativeArray):
# Gradient calculation for a single output
grads = _set_duplicates(
ivy.to_ivy(_grad_func(y, xs, xs_required, tape)),
required_duplicate_index_chains,
)
else:
# Gradient calculation for multiple outputs
y = _get_native_y(y)
grads_ = ivy.nested_map(
lambda x: _grad_func(x, xs, xs_required, tape),
y,
include_derived=True,
shallow=False,
)
grads = grads_
if isinstance(ret_idxs, list) and len(ret_idxs):
grads = {
ret_idxs[i]: _set_duplicates(grad, required_duplicate_index_chains)
for i, grad in enumerate(grads_)
}
# Deleting the tape if not retaining gradients
if not retain_grads:
del tape
# Stop further gradient propagation if not retaining gradients
return _process_func_ret_and_grads(func_ret, grads, retain_grads)
def value_and_grad(func):
def grad_fn(xs):
grads = ivy.nested_map(
lambda x: ivy.zeros_like(x), xs, include_derived=True, shallow=False
)
with tf.GradientTape(watch_accessed_variables=False) as tape:
xs = ivy.nested_map(lambda x: ivy.to_native(x), xs, include_derived=True)
tape.watch(xs)
y = func(xs)
y = y.to_native(y)
grads_ = tape.gradient(y, xs)
grads_ = ivy.nested_map(
lambda x: ivy.to_ivy(x),
grads_,
include_derived=True,
)
grads_ = ivy.to_ivy(grads_)
grad_idxs = ivy.nested_argwhere(grads_, lambda x: ivy.is_ivy_array(x))
grad_array_vals = list(ivy.multi_index_nest(grads_, grad_idxs))
xs = ivy.to_ivy(xs)
if isinstance(xs, ivy.Array):
grads = grads_
else:
ivy.set_nest_at_indices(grads, grad_idxs, grad_array_vals)
y = ivy.to_ivy(y)
return y, grads
return grad_fn
def stop_gradient(
x: Union[tf.Tensor, tf.Variable],
/,
*,
preserve_type: bool = True,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
is_var = is_variable(x)
x = tf.stop_gradient(x)
if is_var and preserve_type:
return variable(x)
return x
def jac(func: Callable):
def grad_fn(x_in):
return ivy.to_native(
func(ivy.to_ivy(x_in, nested=True)), nested=True, include_derived=True
)
def callback_fn(x_in):
with tf.GradientTape(persistent=True) as tape:
ivy.nested_map(ivy.copy_array, x_in)
x_in = ivy.to_native(x_in, nested=True)
tape.watch(x_in)
y = grad_fn(x_in)
# Deal with multiple outputs
if not isinstance(y, ivy.NativeArray):
jacobian = ivy.nested_map(
lambda yi: ivy.to_ivy(
tape.jacobian(yi, x_in, unconnected_gradients="zero"),
nested=True,
),
y,
include_derived=True,
)
else:
jacobian = ivy.to_ivy(tape.jacobian(y, x_in))
return jacobian
return callback_fn
def grad(f, argnums=0):
if grad.nth == 0:
grad.f_original = f
def _nth_derivative(n):
@outputs_to_ivy_arrays
@inputs_to_native_arrays
def _inner(*args, **kwargs):
max_argnum = argnums if isinstance(argnums, int) else max(argnums)
if max_argnum >= len(args):
raise TypeError(
f"differentiating with respect to {argnums=} requires at least "
f"{max_argnum + 1} positional arguments to be passed by the "
f"caller, but got only {len(args)} positional arguments."
)
if isinstance(argnums, int):
x = args[argnums]
elif isinstance(argnums, (tuple, list)):
x = []
for i in argnums:
x.append(args[i])
else:
raise TypeError(
"argnums should be passed as int or a list/tuple of ints."
f" Found {type(argnums)}"
)
if n == 0:
ret = (
grad.f_original(*args, **kwargs)
if grad.f_original is not None
else f(*args, **kwargs)
)
grad.nth = 0
return ret
else:
with tf.GradientTape() as tape:
tape.watch(x)
y = _nth_derivative(n - 1)(*args, *kwargs)
ret = tape.gradient(y, x)
return ret
return _inner
grad.nth += 1
return _nth_derivative(grad.nth)
grad.f_original = None
grad.nth = 0
| ivy/ivy/functional/backends/tensorflow/gradients.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/gradients.py",
"repo_id": "ivy",
"token_count": 3786
} | 28 |
# global
import sys
import torch as torch
# local
import ivy
from ivy.func_wrapper import _dtype_from_version
backend_version = {"version": torch.__version__.split("+")[0]}
# Registering ivy.Array as trackable submodule
if hasattr(torch, "_dynamo"):
torch._dynamo.config.traceable_tensor_subclasses = (ivy.Array,)
# noinspection PyUnresolvedReferences
if not ivy.is_local():
_module_in_memory = sys.modules[__name__]
else:
_module_in_memory = sys.modules[ivy.import_module_path].import_cache[__name__]
use = ivy.utils.backend.ContextManager(_module_in_memory)
# wrap dunder methods of native tensors to return NotImplemented to prioritize Ivy array methods.
def dunder_wrapper(func):
def rep_method(*args, **kwargs):
for arg in args:
if ivy.is_ivy_array(arg):
return NotImplemented
return func(*args, **kwargs)
return rep_method
# check for previously imported torch module
modules_to_patch = []
tensors_to_patch = []
tmp_globals = dict(globals())
for name, value in tmp_globals.items():
if value == "torch.Tensor":
tensors_to_patch.append(name)
try:
if value.__name__ == "torch":
modules_to_patch.append(name)
except AttributeError:
pass
methods_to_patch = [
"__add__",
"__and__",
"__div__",
"__eq__",
"__floordiv__",
"__ge__",
"__gt__",
"__iadd__",
"__iand__",
"__idiv__",
"__ifloordiv__",
"__ilshift__",
"__imul__",
"__ior__",
"__ipow__",
"__irshift__",
"__isub__",
"__itruediv__",
"__ixor__",
"__le__",
"__lshift__",
"__lt__",
"__matmul__",
"__mul__",
"__or__",
"__pow__",
"__truediv__",
"__xor__",
"__ne__",
"__mod__",
]
for module in modules_to_patch:
for method in methods_to_patch:
exec(
module
+ ".Tensor."
+ method
+ " = dunder_wrapper("
+ module
+ ".Tensor."
+ method
+ ")"
)
for tensor in tensors_to_patch:
for method in methods_to_patch:
exec(tensor + "." + method + " = dunder_wrapper(" + tensor + "." + method + ")")
NativeArray = torch.Tensor
NativeDevice = torch.device
NativeDtype = torch.dtype
NativeShape = torch.Size
NativeSparseArray = torch.Tensor
# devices
valid_devices = ("cpu", "gpu")
invalid_devices = ("tpu",)
# native data types
native_int8 = torch.int8
native_int16 = torch.int16
native_int32 = torch.int32
native_int64 = torch.int64
native_uint8 = torch.uint8
native_bfloat16 = torch.bfloat16
native_float16 = torch.float16
native_float32 = torch.float32
native_float64 = torch.float64
native_complex64 = torch.complex64
native_complex128 = torch.complex128
native_double = native_float64
native_bool = torch.bool
# valid data types
# ToDo: Add complex dtypes to valid_dtypes and fix all resulting failures.
# update these to add new dtypes
valid_dtypes = {
"2.2 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.bfloat16,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
ivy.bool,
)
}
valid_numeric_dtypes = {
"2.2 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.bfloat16,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
)
}
valid_int_dtypes = {
"2.2 and below": (ivy.int8, ivy.int16, ivy.int32, ivy.int64, ivy.uint8)
}
valid_float_dtypes = {
"2.2 and below": (ivy.bfloat16, ivy.float16, ivy.float32, ivy.float64)
}
valid_uint_dtypes = {"2.2 and below": (ivy.uint8,)}
valid_complex_dtypes = {"2.2 and below": (ivy.complex64, ivy.complex128)}
# leave these untouched
valid_dtypes = _dtype_from_version(valid_dtypes, backend_version)
valid_numeric_dtypes = _dtype_from_version(valid_numeric_dtypes, backend_version)
valid_int_dtypes = _dtype_from_version(valid_int_dtypes, backend_version)
valid_float_dtypes = _dtype_from_version(valid_float_dtypes, backend_version)
valid_uint_dtypes = _dtype_from_version(valid_uint_dtypes, backend_version)
valid_complex_dtypes = _dtype_from_version(valid_complex_dtypes, backend_version)
# invalid data types
# update these to add new dtypes
invalid_dtypes = {
"2.2 and below": (
ivy.uint16,
ivy.uint32,
ivy.uint64,
)
}
invalid_numeric_dtypes = {"2.2 and below": (ivy.uint16, ivy.uint32, ivy.uint64)}
invalid_int_dtypes = {"2.2 and below": (ivy.uint16, ivy.uint32, ivy.uint64)}
invalid_float_dtypes = {"2.2 and below": ()}
invalid_uint_dtypes = {"2.2 and below": (ivy.uint16, ivy.uint32, ivy.uint64)}
invalid_complex_dtypes = {"2.2 and below": ()}
invalid_dtypes = _dtype_from_version(invalid_dtypes, backend_version)
# leave these untouched
invalid_numeric_dtypes = _dtype_from_version(invalid_numeric_dtypes, backend_version)
invalid_int_dtypes = _dtype_from_version(invalid_int_dtypes, backend_version)
invalid_float_dtypes = _dtype_from_version(invalid_float_dtypes, backend_version)
invalid_uint_dtypes = _dtype_from_version(invalid_uint_dtypes, backend_version)
invalid_complex_dtypes = _dtype_from_version(invalid_complex_dtypes, backend_version)
native_inplace_support = True
supports_gradients = True
def closest_valid_dtype(type=None, /, as_native=False):
if type is None:
type = ivy.default_dtype()
elif isinstance(type, str) and type in invalid_dtypes:
type = ivy.as_ivy_dtype(
{"uint16": ivy.uint8, "uint32": ivy.uint8, "uint64": ivy.uint8}[type]
)
return ivy.as_ivy_dtype(type) if not as_native else ivy.as_native_dtype(type)
backend = "torch"
def globals_getter_func(x=None):
if not x:
return globals()
else:
globals()[x[0]] = x[1]
ivy.func_wrapper.globals_getter_func = globals_getter_func
# local sub-modules
from . import activations
from .activations import *
from . import creation
from .creation import *
from . import data_type
from .data_type import *
from . import device
from .device import *
from . import elementwise
from .elementwise import *
from . import gradients
from .gradients import *
from . import general
from .general import *
from . import layers
from .layers import *
from . import linear_algebra as linalg
from .linear_algebra import *
from . import manipulation
from .manipulation import *
from . import random
from .random import *
from . import searching
from .searching import *
from . import set
from .set import *
from . import sorting
from .sorting import *
from . import statistical
from .statistical import *
from . import utility
from .utility import *
from . import experimental
from .experimental import *
from . import control_flow_ops
from .control_flow_ops import *
from . import norms
from .norms import *
from . import module
from .module import *
# sub-backends
from . import sub_backends
from .sub_backends import *
NativeModule = torch.nn.Module
| ivy/ivy/functional/backends/torch/__init__.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/__init__.py",
"repo_id": "ivy",
"token_count": 2966
} | 29 |
# global
from typing import Optional, Union, Tuple, List, Literal, Sequence, Callable
import torch
import math
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from . import backend_version
from ivy.functional.ivy.layers import (
_handle_padding,
_get_num_padded_values,
_validate_max_pool_params,
_depth_max_pooling_helper,
)
from ivy.functional.ivy.experimental.layers import (
_padding_ceil_mode,
_broadcast_pooling_helper,
)
def _determine_depth_max_pooling(x, kernel, strides, dims, data_format="channel_first"):
# Determine depth pooling
kernel, strides, depth_pooling = _depth_max_pooling_helper(
x.shape, kernel, strides, dims=dims, data_format=data_format
)
if depth_pooling:
x = torch.permute(x, (0, 2, 1, *range(3, dims + 2)))
return x, kernel, strides, depth_pooling
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def max_pool1d(
x: torch.Tensor,
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
dilation: Union[int, Tuple[int]] = 1,
ceil_mode: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
dims = 1
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
if data_format == "NWC":
x = x.permute(0, 2, 1)
kernel = [kernel[i] for i in [0, 2, 1]] if len(kernel) == (dims + 2) else kernel
strides = (
[strides[i] for i in [0, 2, 1]] if len(strides) == (dims + 2) else strides
)
# determine depth pooling
x, kernel, strides, depth_pooling = _determine_depth_max_pooling(
x, kernel, strides, dims, data_format="channel_first"
)
if isinstance(padding, str):
x_shape = list(x.shape[2:])
new_kernel = [dilation[0] * (kernel[0] - 1) + 1]
pad_w = _handle_padding(x_shape[0], strides[0], new_kernel[0], padding)
pad_list = [pad_w // 2, pad_w - pad_w // 2]
else:
if any(item != 0 for sublist in padding for item in sublist) and depth_pooling:
raise NotImplementedError(
"Nonzero explicit padding is not supported for depthwise max pooling"
)
pad_list = [item for sublist in padding[::-1] for item in sublist]
if all(pad_list[i] == pad_list[i + 1] for i in range(0, 2 * dims, 2)) and all(
pad <= kernel_size / 2 for pad, kernel_size in zip(pad_list[::-2], kernel)
):
res = torch.nn.functional.max_pool1d(
x, kernel, strides, pad_list[::-2], dilation, ceil_mode
)
else:
x = torch.nn.functional.pad(
x,
pad_list,
value=float("-inf"),
)
res = torch.nn.functional.max_pool1d(x, kernel, strides, 0, dilation, ceil_mode)
if depth_pooling:
res = torch.permute(res, (0, 2, 1))
if data_format == "NWC":
return res.permute(0, 2, 1)
return res
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
backend_version,
)
def max_pool2d(
x: torch.Tensor,
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
dilation: Union[int, Tuple[int, ...]] = 1,
ceil_mode: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
dims = 2
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
if data_format == "NHWC":
x = x.permute(0, 3, 1, 2)
kernel = (
[kernel[i] for i in [0, 3, 1, 2]] if len(kernel) == (dims + 2) else kernel
)
strides = (
[strides[i] for i in [0, 3, 1, 2]]
if len(strides) == (dims + 2)
else strides
)
# determine depth pooling
x, kernel, strides, depth_pooling = _determine_depth_max_pooling(
x, kernel, strides, dims, data_format="channel_first"
)
if isinstance(padding, str):
x_shape = list(x.shape[2:])
new_kernel = [kernel[i] + (kernel[i] - 1) * (dilation[i] - 1) for i in range(2)]
pad_h = _handle_padding(x_shape[0], strides[0], new_kernel[0], padding)
pad_w = _handle_padding(x_shape[1], strides[1], new_kernel[1], padding)
pad_list = [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
else:
if any(item != 0 for sublist in padding for item in sublist) and depth_pooling:
raise NotImplementedError(
"Nonzero explicit padding is not supported for depthwise max pooling"
)
pad_list = [item for sublist in padding[::-1] for item in sublist]
if all(pad_list[i] == pad_list[i + 1] for i in range(0, 2 * dims, 2)) and all(
pad <= kernel_size / 2 for pad, kernel_size in zip(pad_list[::-2], kernel)
):
res = torch.nn.functional.max_pool2d(
x, kernel, strides, pad_list[::-2], dilation, ceil_mode
)
else:
x = torch.nn.functional.pad(
x,
pad_list,
value=float("-inf"),
)
res = torch.nn.functional.max_pool2d(x, kernel, strides, 0, dilation, ceil_mode)
if depth_pooling:
res = torch.permute(res, (0, 2, 1, 3))
if data_format == "NHWC":
return res.permute(0, 2, 3, 1)
return res
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
backend_version,
)
def max_pool3d(
x: torch.Tensor,
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
dilation: Union[int, Tuple[int, ...]] = 1,
ceil_mode: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
dims = 3
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
if data_format == "NDHWC":
x = x.permute(0, 4, 1, 2, 3)
kernel = (
[kernel[i] for i in [0, 4, 1, 2, 3]]
if len(kernel) == (dims + 2)
else kernel
)
strides = (
[strides[i] for i in [0, 4, 1, 2, 3]]
if len(strides) == (dims + 2)
else strides
)
# determine depth pooling
x, kernel, strides, depth_pooling = _determine_depth_max_pooling(
x, kernel, strides, dims, data_format="channel_first"
)
if isinstance(padding, str):
x_shape = list(x.shape[2:])
new_kernel = [kernel[i] + (kernel[i] - 1) * (dilation[i] - 1) for i in range(3)]
pad_d = _handle_padding(x_shape[0], strides[0], new_kernel[0], padding)
pad_h = _handle_padding(x_shape[1], strides[1], new_kernel[1], padding)
pad_w = _handle_padding(x_shape[2], strides[2], new_kernel[2], padding)
pad_list = [
pad_w // 2,
pad_w - pad_w // 2,
pad_h // 2,
pad_h - pad_h // 2,
pad_d // 2,
pad_d - pad_d // 2,
]
else:
if any(item != 0 for sublist in padding for item in sublist) and depth_pooling:
raise NotImplementedError(
"Nonzero explicit padding is not supported for depthwise max pooling"
)
pad_list = [item for sublist in padding[::-1] for item in sublist]
if all(pad_list[i] == pad_list[i + 1] for i in range(0, 2 * dims, 2)) and all(
pad <= kernel_size / 2 for pad, kernel_size in zip(pad_list[::-2], kernel)
):
res = torch.nn.functional.max_pool3d(
x, kernel, strides, pad_list[::-2], dilation, ceil_mode
)
else:
x = torch.nn.functional.pad(
x,
pad_list,
value=float("-inf"),
)
res = torch.nn.functional.max_pool3d(x, kernel, strides, 0, dilation, ceil_mode)
if depth_pooling:
res = torch.permute(res, (0, 2, 1, 3, 4))
if data_format == "NDHWC":
return res.permute(0, 2, 3, 4, 1)
return res
def _add_ceil_pad_to_pad_list(num_pad, k, c):
return num_pad + (num_pad - ((k * num_pad) / (k - c)))
def _get_specific_pad(x_shape, kernel, strides, padding, dims):
if isinstance(padding, str):
if padding == "SAME":
pad_specific = [
_handle_padding(x_shape[i], strides[i], kernel[i], padding)
for i in range(dims - 1, -1, -1)
]
pad_list_top = [pad_specific[i] // 2 for i in range(dims)]
pad_list_bot = [pad_specific[i] - pad_specific[i] // 2 for i in range(dims)]
padding = [None] * len(pad_list_top) * 2
padding[::2] = pad_list_top
padding[1::2] = pad_list_bot
pad_specific = pad_specific[::-1]
else:
pad_specific = [0] * dims
padding = [0] * dims * 2
else:
if isinstance(padding, int):
padding = [(padding, padding)] * dims
pad_specific = [sum(padding[i]) for i in range(dims)]
padding = [item for sublist in padding for item in sublist[::-1]][::-1]
return padding, pad_specific
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def avg_pool1d(
x: torch.Tensor,
kernel: Union[int, Tuple[int]],
strides: Union[int, Tuple[int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if isinstance(strides, int):
strides = (strides,)
elif len(strides) == 1:
strides = (strides[0],)
if isinstance(kernel, int):
kernel = (kernel,)
elif len(kernel) == 1:
kernel = (kernel[0],)
if data_format in ("NWC", "NCL"):
x = x.permute(0, 2, 1)
if (
isinstance(padding, int)
or not isinstance(padding, str)
and padding[0][0] == padding[0][1]
) and not divisor_override:
if not isinstance(padding, int):
padding = padding[0][0]
res = torch.nn.functional.avg_pool1d(
x,
kernel,
strides,
padding,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
)
else:
x_shape = x.shape[2]
padding, pad_specific = _get_specific_pad(
[x_shape], kernel, strides, padding, 1
)
x = torch.nn.functional.pad(x, padding, value=0.0)
res = torch.nn.functional.avg_pool1d(x, kernel, strides, 0, ceil_mode)
if not count_include_pad and any(pad_specific):
num_padded_values = ivy.map(
_get_num_padded_values,
constant={
"p": pad_specific[0],
"n": x_shape,
"k": kernel[0],
"s": strides[0],
},
unique={
"i": torch.arange(res.shape[2]),
},
)
num_padded_values = torch.tensor(num_padded_values, dtype=res.dtype)
if ceil_mode:
_, c = _padding_ceil_mode(x_shape, kernel[0], padding, strides[0], True)
num_padded_values[-1] = _add_ceil_pad_to_pad_list(
num_padded_values[-1], kernel[0], c
)
res = (kernel[0] * res) / (kernel[0] - num_padded_values)
if data_format in ("NWC", "NCL"):
res = res.permute(0, 2, 1)
return res
def _adjust_num_padded_values_to_ceil(
pad_specific, num_padded_values, x_shape, kernel, strides, dims
):
for i in range(dims):
pad = [pad_specific[i] // 2, pad_specific[i] - pad_specific[i] // 2]
_, c = _padding_ceil_mode(x_shape[i], kernel[i], pad, strides[i], True)
num_padded_values[i][-1] = _add_ceil_pad_to_pad_list(
num_padded_values[i][-1], kernel[i], c
)
return num_padded_values
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
backend_version,
)
def avg_pool2d(
x: torch.Tensor,
kernel: Union[int, Tuple[int], Tuple[int, int]],
strides: Union[int, Tuple[int], Tuple[int, int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if isinstance(strides, int):
strides = (strides, strides)
elif len(strides) == 1:
strides = (strides[0], strides[0])
if isinstance(kernel, int):
kernel = (kernel, kernel)
elif len(strides) == 1:
kernel = (kernel[0], kernel[0])
if data_format == "NHWC":
x = x.permute(0, 3, 1, 2)
if (
isinstance(padding, int)
or not isinstance(padding, str)
and all(pad[0] == pad[1] for pad in padding)
):
if not isinstance(padding, int):
padding = [padding[0][0], padding[1][0]]
res = torch.nn.functional.avg_pool2d(
x,
kernel,
strides,
padding,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
divisor_override=divisor_override,
)
else:
x_shape = list(x.shape[2:])
padding, pad_specific = _get_specific_pad(x_shape, kernel, strides, padding, 2)
x = torch.nn.functional.pad(
x,
padding,
value=0.0,
)
res = torch.nn.functional.avg_pool2d(
x, kernel, strides, 0, ceil_mode, divisor_override=divisor_override
)
if not count_include_pad and any(pad_specific) and not divisor_override:
num_padded_values = [
ivy.map(
_get_num_padded_values,
constant={
"p": pad_specific[i],
"n": x_shape[i],
"k": kernel[i],
"s": strides[i],
},
unique={
"i": torch.arange(res.shape[i + 2]),
},
)
for i in range(2)
]
if ceil_mode:
for i in range(2):
num_padded_values = _adjust_num_padded_values_to_ceil(
pad_specific, num_padded_values, x_shape, kernel, strides, 2
)
num_padded_values1 = torch.tensor(num_padded_values[0], dtype=res.dtype)[
:, None
]
num_padded_values2 = torch.tensor(num_padded_values[1], dtype=res.dtype)[
None, :
]
num_padded_values = (
num_padded_values1 * kernel[1]
+ num_padded_values2 * kernel[0]
- num_padded_values1 * num_padded_values2
)
res = (kernel[0] * kernel[1] * res) / (
kernel[0] * kernel[1] - num_padded_values
)
if data_format == "NHWC":
return res.permute(0, 2, 3, 1)
return res
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
backend_version,
)
def avg_pool3d(
x: torch.Tensor,
kernel: Union[int, Tuple[int], Tuple[int, int, int]],
strides: Union[int, Tuple[int], Tuple[int, int, int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if isinstance(strides, int):
strides = (strides, strides, strides)
elif len(strides) == 1:
strides = (strides[0], strides[0], strides[0])
if isinstance(kernel, int):
kernel = (kernel, kernel, kernel)
elif len(kernel) == 1:
kernel = (kernel[0], kernel[0], kernel[0])
if data_format == "NDHWC":
x = x.permute(0, 4, 1, 2, 3)
if (
isinstance(padding, int)
or not isinstance(padding, str)
and all(pad[0] == pad[1] for pad in padding)
):
if not isinstance(padding, int):
padding = [padding[0][0], padding[1][0], padding[2][0]]
res = torch.nn.functional.avg_pool3d(
x,
kernel,
strides,
padding,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
divisor_override=divisor_override,
)
else:
x_shape = list(x.shape[2:])
padding, pad_specific = _get_specific_pad(x_shape, kernel, strides, padding, 3)
x = torch.nn.functional.pad(
x,
padding,
value=0.0,
)
res = torch.nn.functional.avg_pool3d(
x, kernel, strides, 0, ceil_mode, divisor_override=divisor_override
)
if not count_include_pad and any(pad_specific) and not divisor_override:
num_padded_values = [
torch.tensor(
ivy.map(
_get_num_padded_values,
constant={
"p": pad_specific[i],
"n": x_shape[i],
"k": kernel[i],
"s": strides[i],
},
unique={
"i": torch.arange(res.shape[i + 2]),
},
),
dtype=res.dtype,
)
for i in range(3)
]
if ceil_mode:
for i in range(3):
num_padded_values = _adjust_num_padded_values_to_ceil(
pad_specific, num_padded_values, x_shape, kernel, strides, 3
)
num_padded_values1 = num_padded_values[0].reshape((-1, 1, 1))
num_padded_values2 = num_padded_values[1].reshape((1, -1, 1))
num_padded_values3 = num_padded_values[2].reshape((1, 1, -1))
num_padded_values = (
num_padded_values1 * kernel[1] * kernel[2]
+ num_padded_values2 * kernel[0] * kernel[2]
+ num_padded_values3 * kernel[0] * kernel[1]
+ num_padded_values1 * num_padded_values2 * num_padded_values3
- num_padded_values1 * num_padded_values2 * kernel[2]
- num_padded_values1 * num_padded_values3 * kernel[1]
- num_padded_values2 * num_padded_values3 * kernel[0]
)
kernel_mul = kernel[0] * kernel[1] * kernel[2]
res = (kernel_mul * res) / (kernel_mul - num_padded_values)
if data_format == "NDHWC":
res = res.permute(0, 2, 3, 4, 1)
return res
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, backend_version)
def dct(
x: torch.Tensor,
/,
*,
type: Literal[1, 2, 3, 4] = 2,
n: Optional[int] = None,
axis: int = -1,
norm: Optional[Literal["ortho"]] = None,
out: Optional[torch.Tensor] = None,
) -> torch.tensor:
if norm not in (None, "ortho"):
raise ValueError("Norm must be either None or 'ortho'")
if axis < 0:
axis = axis + len(x.shape)
if n is not None:
signal_len = x.shape[axis]
if n <= signal_len:
local_idx = [slice(None)] * len(x.shape)
local_idx[axis] = slice(None, n)
x = x[local_idx]
else:
pad_idx = [0] * 2 * len(x.shape)
pad_idx[(len(pad_idx) - 1) - (2 * axis)] = n - signal_len
x = torch.nn.functional.pad(x, pad_idx)
real_zero = torch.tensor(0.0, dtype=x.dtype)
axis_dim = x.shape[axis]
axis_dim_float = torch.tensor(axis_dim, dtype=x.dtype)
if type == 1:
if norm:
raise ValueError("Normalization not supported for type-I DCT")
axis_idx = [slice(None)] * len(x.shape)
axis_idx[axis] = slice(1, -1)
x = torch.concat([x, x.flip(axis)[axis_idx]], dim=axis)
dct_out = torch.real(torch.fft.rfft(x, dim=axis))
return dct_out
elif type == 2:
scale_dims = [1] * len(x.shape)
scale_dims[axis] = axis_dim
complex_part = torch.arange(axis_dim_float) * math.pi * 0.5 / axis_dim_float
scale = 2.0 * torch.exp(
torch.complex(
real_zero,
-complex_part.type(real_zero.type()),
)
).view(scale_dims)
axis_idx = [slice(None)] * len(x.shape)
axis_idx[axis] = slice(None, axis_dim)
dct_out = torch.real(
torch.fft.rfft(x, n=2 * axis_dim, axis=axis)[axis_idx] * scale
)
if norm == "ortho":
n1 = 0.5 * torch.rsqrt(axis_dim_float)
n2 = n1 * math.sqrt(2.0)
sf = torch.nn.functional.pad(n1.unsqueeze(0), (0, axis_dim - 1), value=n2)
dct_out = sf.view(scale_dims) * dct_out
return dct_out
elif type == 3:
scale_dims = [1] * len(x.shape)
scale_dims[axis] = axis_dim
complex_part = torch.arange(axis_dim_float) * math.pi * 0.5 / axis_dim_float
scale = 2.0 * torch.exp(
torch.complex(real_zero, complex_part.type(real_zero.type()))
).view(scale_dims)
if norm == "ortho":
n1 = torch.sqrt(axis_dim_float)
n2 = n1 * math.sqrt(0.5)
scale_dims = [1] * len(x.shape)
scale_dims[axis] = axis_dim
sf = torch.nn.functional.pad(n1.unsqueeze(0), (0, axis_dim - 1), value=n2)
x = x * sf.view(scale_dims)
else:
x = x * axis_dim_float
axis_idx = [slice(None)] * len(x.shape)
axis_idx[axis] = slice(None, axis_dim)
dct_out = torch.real(
torch.fft.irfft(
scale * torch.complex(x, real_zero), n=2 * axis_dim, axis=axis
)
)[axis_idx]
return dct_out
elif type == 4:
dct_2 = dct(x, type=2, n=2 * axis_dim, axis=axis, norm=None)
axis_idx = [slice(None)] * len(x.shape)
axis_idx[axis] = slice(1, None, 2)
dct_out = dct_2[axis_idx]
if norm == "ortho":
dct_out *= math.sqrt(0.5) * torch.rsqrt(axis_dim_float)
return dct_out
def idct(
x: torch.Tensor,
/,
*,
type: Literal[1, 2, 3, 4] = 2,
n: Optional[int] = None,
axis: int = -1,
norm: Optional[Literal["ortho"]] = None,
out: Optional[torch.Tensor] = None,
) -> torch.tensor:
inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type]
return dct(x, type=inverse_type, n=n, axis=axis, norm=norm, out=out)
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
backend_version,
)
def fft(
x: torch.Tensor,
dim: int,
/,
*,
norm: str = "backward",
n: Optional[Union[int, Tuple[int]]] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if not isinstance(dim, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(dim)}"
)
if n is None:
n = x.shape[dim]
if n < -len(x.shape):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {dim}, expecting ranging"
" from {-len(x.shape)} to {len(x.shape)-1} "
)
if not isinstance(n, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(n)}"
)
if n <= 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {n}, expecting more than 1"
)
if norm not in {"backward", "ortho", "forward"}:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
if x.dtype in [torch.int64, torch.float64, torch.complex128]:
out_dtype = torch.complex128
else:
out_dtype = torch.complex64
return torch.fft.fft(x, n, dim, norm, out=out).to(dtype=out_dtype)
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
"complex",
)
},
backend_version,
)
def dropout(
x: torch.Tensor,
prob: float,
/,
*,
scale: bool = True,
dtype: torch.dtype = None,
training: bool = True,
seed: Optional[int] = None,
noise_shape: Optional[Sequence[int]] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x = ivy.astype(x, dtype) if dtype and x.dtype != dtype else x
if prob == 0 or not training:
return x
res = torch.nn.functional.dropout(x, prob, training=True)
res = torch.multiply(res, (1.0 - prob)) if not scale else res
return res
dropout.partial_mixed_handler = lambda x, prob, **kwargs: (
kwargs.get("noise_shape") is None and kwargs.get("seed") is None
)
@with_unsupported_dtypes(
{"2.2 and below": ("float16",)},
backend_version,
)
def dropout1d(
x: torch.Tensor,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NWC",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
is_batched = len(x.shape) == 3
if data_format == "NWC":
perm = (0, 2, 1) if is_batched else (1, 0)
x = torch.permute(x, perm)
res = torch.nn.functional.dropout1d(x, prob, training=training)
if data_format == "NWC":
res = torch.permute(res, perm)
return res
@with_unsupported_dtypes(
{"2.2 and below": ("float16",)},
backend_version,
)
def dropout2d(
x: torch.Tensor,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NHWC",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
is_batched = len(x.shape) == 4
if data_format == "NHWC":
perm = (0, 3, 1, 2) if is_batched else (2, 0, 1)
x = torch.permute(x, perm)
res = torch.nn.functional.dropout2d(x, prob, training=training)
if data_format == "NHWC":
perm = (0, 2, 3, 1) if is_batched else (1, 2, 0)
res = torch.permute(res, perm)
return res
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
backend_version,
)
def dropout3d(
x: torch.Tensor,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NDHWC",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
is_batched = len(x.shape) == 5
if data_format == "NDHWC":
perm = (0, 4, 1, 2, 3) if is_batched else (3, 0, 1, 2)
x = torch.permute(x, perm)
res = torch.nn.functional.dropout3d(x, prob, training=training)
if data_format == "NDHWC":
perm = (0, 2, 3, 4, 1) if is_batched else (1, 2, 3, 0)
res = torch.permute(res, perm)
return res
def ifft(
x: torch.Tensor,
dim: int,
*,
norm: str = "backward",
n: Optional[Union[int, Tuple[int]]] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if not isinstance(dim, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(dim)}"
)
if n is None:
n = x.shape[dim]
if n < -len(x.shape):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {dim}, expecting ranging"
" from {-len(x.shape)} to {len(x.shape)-1} "
)
if not isinstance(n, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(n)}"
)
if n <= 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {n}, expecting more than 1"
)
if norm not in {"backward", "ortho", "forward"}:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
return torch.fft.ifft(x, n, dim, norm, out=out).resolve_conj()
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, backend_version)
def embedding(
weights: torch.Tensor,
indices: torch.Tensor,
/,
*,
max_norm: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
ivy.utils.assertions.check_equal(
len(weights.shape), 2, message="weights must be 2-d", as_array=False
)
return torch.nn.functional.embedding(indices, weights, max_norm=max_norm)
embedding.support_native_out = False
def interpolate(
x: torch.Tensor,
size: Union[Sequence[int], int],
/,
*,
mode: Literal[
"linear",
"bilinear",
"trilinear",
"nd",
"nearest",
"area",
"nearest_exact",
"tf_area",
"tf_bicubic",
"bicubic",
"mitchellcubic",
"lanczos3",
"lanczos5",
"gaussian",
] = "linear",
scale_factor: Optional[Union[Sequence[int], int]] = None,
recompute_scale_factor: Optional[bool] = None,
align_corners: bool = False,
antialias: bool = False,
out: Optional[torch.Tensor] = None,
):
if mode not in ["linear", "bilinear", "bicubic", "trilinear"]:
align_corners = None
return torch.nn.functional.interpolate(
x,
size=size,
mode=mode,
align_corners=align_corners,
antialias=antialias,
scale_factor=scale_factor,
recompute_scale_factor=recompute_scale_factor,
)
interpolate.partial_mixed_handler = (
lambda *args, mode="linear", align_corners=False, **kwargs: mode
not in [
"tf_area",
"nd",
"tf_bicubic",
"mitchellcubic",
"lanczos3",
"lanczos5",
"gaussian",
]
and (mode in ["linear", "bilinear", "bicubic", "trilinear"] or not align_corners)
)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def adaptive_max_pool2d(
input: torch.Tensor, output_size: Union[Sequence[int], int]
) -> torch.Tensor:
return torch.nn.functional.adaptive_max_pool2d(input, output_size)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def adaptive_max_pool3d(
input: torch.Tensor, output_size: Union[Sequence[int], int]
) -> torch.Tensor:
return torch.nn.functional.adaptive_max_pool3d(input, output_size)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def adaptive_avg_pool1d(input, output_size):
return torch.nn.functional.adaptive_avg_pool1d(input, output_size)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def adaptive_avg_pool2d(input, output_size, /, *, data_format: str = "NHWC"):
squeeze = False
if input.ndim == 3:
input = torch.unsqueeze(input, 0)
squeeze = True
permuted_input = False
if data_format == "NHWC":
input = torch.permute(input, (0, input.ndim - 1, *range(1, input.ndim - 1)))
permuted_input = True
ret = torch.nn.functional.adaptive_avg_pool2d(input, output_size)
ret = torch.permute(ret, (0, *range(2, input.ndim), 1)) if permuted_input else ret
ret = torch.squeeze(ret, 0) if squeeze else ret
return ret
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def fft2(
x: torch.Tensor,
*,
s: Optional[Sequence[int]] = None,
dim: Sequence[int] = (-2, -1),
norm: str = "backward",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if not all(isinstance(j, int) for j in dim):
raise ivy.utils.exceptions.IvyError(
f"Expecting {dim} to be a sequence of integers <class integer>"
)
if s is None:
s = (x.shape[dim[0]], x.shape[dim[1]])
if all(j < -len(x.shape) for j in s):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {dim}, expecting ranging"
" from {-len(x.shape)} to {len(x.shape)-1} "
)
if not all(isinstance(j, int) for j in s):
raise ivy.utils.exceptions.IvyError(
f"Expecting {s} to be a sequence of integers <class integer>"
)
if all(j <= 1 for j in s):
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {s}, expecting s points larger than 1"
)
if norm not in {"backward", "ortho", "forward"}:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
return torch.tensor(
torch.fft.fft2(x, s, dim, norm, out=out), dtype=torch.complex128
)
def ifftn(
x: torch.Tensor,
s: Optional[Union[int, Tuple[int]]] = None,
axes: Optional[Union[int, Tuple[int]]] = None,
*,
norm: Optional[str] = "backward",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.fft.ifftn(x, s=s, dim=axes, norm=norm, out=out)
def rfft(
x: torch.Tensor,
/,
*,
n: Optional[int] = None,
axis: int = -1,
norm: Literal["backward", "ortho", "forward"] = "backward",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x = x.real
if x.dtype == torch.float16:
x = x.to(torch.float32)
ret = torch.fft.rfft(x, n=n, dim=axis, norm=norm)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def rfftn(
x: torch.Tensor,
s: Optional[Sequence[int]] = None,
axes: Optional[Sequence[int]] = None,
*,
norm: str = "backward",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if not all(isinstance(j, int) for j in axes):
raise ivy.utils.exceptions.IvyError(
f"Expecting {axes} to be a sequence of integers <class integer>"
)
if s is None:
s = (x.shape[axes[0]], x.shape[axes[1]])
if all(j < -len(x.shape) for j in s):
raise ivy.utils.exceptions.IvyError(
f"Invalid axes {axes}, expecting ranging"
f" from {-len(x.shape)} to {len(x.shape)-1}"
)
if not all(isinstance(j, int) for j in s):
raise ivy.utils.exceptions.IvyError(
f"Expecting {s} to be a sequence of integers <class integer>"
)
if all(j <= 1 for j in s):
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {s}, expecting s points larger than 1"
)
if norm not in {"backward", "ortho", "forward"}:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
return torch.tensor(
torch.fft.rfftn(x, s, axes, norm=norm, out=out), dtype=torch.complex128
)
# stft
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
backend_version,
)
def stft(
signals: torch.Tensor,
frame_length: int,
frame_step: int,
/,
*,
fft_length: Optional[int] = None,
window_fn: Optional[Callable] = None,
pad_end: Optional[bool] = False,
name: Optional[str] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if not isinstance(frame_length, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(frame_length)}"
)
if frame_length < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {frame_length}, expecting frame_length larger than or"
" equal to 1"
)
if not isinstance(frame_step, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(frame_step)}"
)
if frame_step < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {frame_length}, expecting frame_length larger than or"
" equal to 1"
)
if fft_length is not None:
if not isinstance(fft_length, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(fft_length)}"
)
if fft_length < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {frame_length}, expecting frame_length larger"
" than or equal to 1"
)
input_dtype = signals.dtype
if input_dtype == torch.float32:
dtype = torch.complex64
elif input_dtype == torch.float64:
dtype = torch.complex128
def stft_1D(signals, frame_length, frame_step, fft_length, pad_end):
if fft_length is None:
fft_length = 1
while fft_length < frame_length:
fft_length *= 2
num_samples = signals.shape[-1]
if pad_end:
num_samples = signals.shape[-1]
num_frames = -(-num_samples // frame_step)
pad_length = max(
0, frame_length + frame_step * (num_frames - 1) - num_samples
)
signals = torch.nn.functional.pad(signals, (0, pad_length))
else:
num_frames = 1 + (num_samples - frame_length) // frame_step
stft_result = []
if window_fn is None:
window = 1
else:
window = window_fn(frame_length)
for i in range(num_frames):
start = i * frame_step
end = start + frame_length
frame = signals[..., start:end]
windowed_frame = frame * window
pad_length = fft_length - frame_length
windowed_frame = torch.nn.functional.pad(windowed_frame, (0, pad_length))
windowed_frame = torch.tensor(windowed_frame, dtype=dtype)
fft_frame = torch.fft.fft(windowed_frame, axis=-1)
slit = int(fft_length // 2 + 1)
stft_result.append(fft_frame[..., 0:slit])
stft = torch.stack(stft_result, axis=0)
return stft
def stft_helper(nested_list, frame_length, frame_step, fft_length):
nested_list = nested_list
if len(nested_list.shape) > 1:
return [
stft_helper(sublist, frame_length, frame_step, fft_length)
for sublist in nested_list
]
else:
return stft_1D(nested_list, frame_length, frame_step, fft_length, pad_end)
to_return = stft_helper(signals, frame_length, frame_step, fft_length)
flat_list = [
item if isinstance(item, torch.Tensor) else torch.tensor(item)
for sublist in to_return
for item in sublist
]
result = torch.stack(flat_list)
original_shape = (len(to_return), len(to_return[0]))
result = result.view(original_shape + result.shape[1:])
return result
def sliding_window(
input: torch.Tensor,
kernel_size: Union[int, Tuple[int, int]],
/,
*,
stride: Union[int, Tuple[int, int]] = 1,
dilation: Union[int, Tuple[int, int]] = 1,
padding: Union[str, int, Tuple[int, int]] = 0,
) -> torch.Tensor:
if input.ndim != 4:
# convert input to 4D tensor as unfold only accepts 4D data
input_shape = input.shape
extend_dims = max(0, 4 - len(input_shape))
new_shape = (1,) * extend_dims + input_shape
input = input.reshape(new_shape).float()
stride = (stride,) * 2 if isinstance(stride, int) else tuple(stride) * 2
dilation = (dilation,) * 2 if isinstance(dilation, int) else tuple(dilation) * 2
kernel_size = (kernel_size,) * 2 if isinstance(kernel_size, int) else kernel_size
if len(kernel_size) < 2:
kernel_size = (kernel_size) * 2
# check padding and convert to right format
if isinstance(padding, str):
# convert padding from str to seq
if padding.upper() == "SAME":
pad_vals = []
for dim in input.shape:
pad_val = _handle_padding(
dim,
stride[0] if isinstance(stride, tuple) else stride,
kernel_size[0],
padding,
)
pad_vals.append(pad_val)
padding = pad_vals[:2]
else:
padding = 0
else:
padding = (padding,) * 2 if isinstance(padding, int) else padding
return torch.nn.functional.unfold(
input, kernel_size, dilation=dilation, padding=padding, stride=stride
)
def max_unpool1d(
input: torch.Tensor,
indices: torch.Tensor,
kernel_size: Union[Tuple[int], int],
/,
*,
strides: Optional[Union[int, Tuple[int]]] = None,
padding: Union[int, Tuple[int]] = 0,
data_format: Optional[str] = "NCW",
) -> torch.Tensor:
if strides is None:
strides = kernel_size
revert = False
if data_format in ["NCW", "NWC"]:
if data_format == "NWC":
input = input.permute(0, 2, 1)
indices = indices.permute(0, 2, 1)
revert = True
else:
raise ValueError(
f"data_format attr should be NCW or NWC but found {data_format}"
)
kernel_size = _broadcast_pooling_helper(kernel_size, "1d", name="kernel_size")
padding = _broadcast_pooling_helper(padding, "1d", name="padding")
strides = _broadcast_pooling_helper(strides, "1d", name="strides")
ret = torch.nn.functional.max_unpool1d(
input,
indices,
kernel_size,
strides,
padding,
)
if revert:
ret = ret.permute(0, 2, 1)
return ret
def _max_unpool1d_mixed_handler(input, indices, kernel_size, **kwargs):
dt = kwargs.get("data_format", "NCW")
inds = indices.permute(0, 2, 1) if dt == "NWC" else indices
flat_inds = inds.reshape((-1,))
stride = indices.shape[-1]
not_dup = True
for i in range(0, flat_inds.numel(), stride):
inds = flat_inds[i : (i + stride)]
inds = inds.unique()
if inds.numel() != stride:
not_dup = False
return not_dup
max_unpool1d.partial_mixed_handler = _max_unpool1d_mixed_handler
| ivy/ivy/functional/backends/torch/experimental/layers.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/experimental/layers.py",
"repo_id": "ivy",
"token_count": 21097
} | 30 |
# global
import math
from numbers import Number
from typing import Iterable, List, Optional, Sequence, Tuple, Union
import torch
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
# noinspection PyProtectedMember
from ivy.functional.ivy.manipulation import _calculate_out_shape
from . import backend_version
def _reshape_fortran_torch(x, shape):
if len(x.shape) > 0:
x = x.permute(*reversed(range(len(x.shape))))
return x.reshape(shape[::-1]).permute(list(range(len(shape)))[::-1])
# Array API Standard #
# -------------------#
def concat(
xs: Union[Tuple[torch.Tensor, ...], List[torch.Tensor]],
/,
*,
axis: int = 0,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if axis is None:
return torch.cat([torch.flatten(x) for x in xs], dim=0, out=out)
return torch.cat(xs, dim=axis, out=out)
concat.support_native_out = True
def expand_dims(
x: torch.Tensor,
/,
*,
copy: Optional[bool] = None,
axis: Union[int, Sequence[int]] = 0,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
out_shape = _calculate_out_shape(axis, x.shape)
# torch.reshape since it can operate on contiguous and non_contiguous tensors
return x.reshape(out_shape)
def flip(
x: torch.Tensor,
/,
*,
copy: Optional[bool] = None,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if copy:
x = x.clone()
num_dims = len(x.shape)
if not num_dims:
return x
if axis is None:
new_axis = list(range(num_dims))
else:
new_axis = axis
if isinstance(new_axis, int):
new_axis = [new_axis]
else:
new_axis = new_axis
new_axis = [item + num_dims if item < 0 else item for item in new_axis]
return torch.flip(x, new_axis)
def permute_dims(
x: torch.Tensor,
/,
axes: Tuple[int, ...],
*,
copy: Optional[bool] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.permute(x, axes)
@with_unsupported_dtypes(
{"2.2 and below": ("bfloat16",)},
backend_version,
)
def reshape(
x: torch.Tensor,
/,
shape: Union[ivy.NativeShape, Sequence[int]],
*,
copy: Optional[bool] = None,
order: str = "C",
allowzero: bool = True,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if copy:
x = x.clone()
ivy.utils.assertions.check_elem_in_list(order, ["C", "F"])
if not allowzero:
shape = [
new_s if con else old_s
for new_s, con, old_s in zip(shape, torch.tensor(shape) != 0, x.shape)
]
if order == "F":
return _reshape_fortran_torch(x, shape)
return torch.reshape(x, shape)
def roll(
x: torch.Tensor,
/,
shift: Union[int, Sequence[int]],
*,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if torch.is_tensor(axis):
axis = axis.tolist()
# manually cover the case when shift is int, and axis is a tuple/list
if isinstance(shift, int) and (type(axis) in [list, tuple]):
shift = [shift for _ in range(len(axis))]
if isinstance(shift, torch.Tensor):
shift = shift.tolist()
return torch.roll(x, shift, axis)
def squeeze(
x: torch.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
copy: Optional[bool] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if isinstance(axis, int):
if x.size(dim=axis) > 1:
raise ValueError(
f"Expected dimension of size [{-x.dim()}, {x.dim()}], but found"
f" dimension size {axis}"
)
if x.shape[axis] != 1:
raise ivy.utils.exceptions.IvyException(
f"Expected size of axis to be 1 but was {x.shape[axis]}"
)
return torch.squeeze(x, axis)
if axis is None:
if copy:
newarr = torch.clone(x)
return torch.squeeze(newarr)
return torch.squeeze(x)
newarr = torch.clone(x)
if isinstance(axis, tuple):
axis = list(axis)
normalise_axis = [
(len(x.shape) - abs(element)) if element < 0 else element for element in axis
]
normalise_axis.sort()
axis_updated_after_squeeze = [dim - key for (key, dim) in enumerate(normalise_axis)]
dim = x.dim()
for i in axis_updated_after_squeeze:
shape = x.shape[i]
if shape > 1 and (shape < -dim or dim <= shape):
raise ValueError(
f"Expected dimension of size [{-dim}, {dim}], but found dimension size"
f" {shape}"
)
else:
if copy:
newarr = torch.squeeze(newarr, i)
else:
x = torch.squeeze(x, i)
if copy:
return newarr
return x
def stack(
arrays: Union[Tuple[torch.Tensor], List[torch.Tensor]],
/,
*,
axis: int = 0,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.stack(arrays, axis, out=out)
stack.support_native_out = True
# Extra #
# ------#
def split(
x: torch.Tensor,
/,
*,
copy: Optional[bool] = None,
num_or_size_splits: Optional[Union[int, List[int], torch.Tensor]] = None,
axis: int = 0,
with_remainder: bool = False,
) -> List[torch.Tensor]:
if x.shape == ():
if num_or_size_splits is not None and num_or_size_splits != 1:
raise ivy.utils.exceptions.IvyException(
"input array had no shape, but num_sections specified was"
f" {num_or_size_splits}"
)
return [x]
dim_size: int = x.shape[axis]
if num_or_size_splits is None:
num_or_size_splits = 1
elif isinstance(num_or_size_splits, torch.Tensor):
num_or_size_splits = num_or_size_splits.to(torch.int64).tolist()
elif isinstance(num_or_size_splits, int):
if with_remainder:
num_chunks = x.shape[axis] / num_or_size_splits
num_chunks_int = math.floor(num_chunks)
remainder = num_chunks - num_chunks_int
if remainder == 0:
num_or_size_splits = torch.round(
torch.tensor(dim_size) / torch.tensor(num_or_size_splits)
)
else:
num_or_size_splits = tuple(
[num_or_size_splits] * num_chunks_int
+ [int(remainder * num_or_size_splits)]
)
else:
num_or_size_splits = torch.round(
torch.tensor(dim_size) / torch.tensor(num_or_size_splits)
)
elif isinstance(num_or_size_splits, list):
if num_or_size_splits[-1] == -1:
# infer the final size split
remaining_size = dim_size - sum(num_or_size_splits[:-1])
num_or_size_splits[-1] = remaining_size
num_or_size_splits = tuple(num_or_size_splits)
return list(torch.split(x, num_or_size_splits, axis))
@with_unsupported_dtypes({"2.2 and below": ("int8", "int16", "uint8")}, backend_version)
def repeat(
x: torch.Tensor,
/,
repeats: Union[int, Iterable[int]],
*,
axis: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if len(x.shape) == 0 and axis in [0, -1]:
axis = None
repeats = torch.tensor(repeats)
return torch.repeat_interleave(x, repeats, axis)
def tile(
x: torch.Tensor, /, repeats: Sequence[int], *, out: Optional[torch.Tensor] = None
) -> torch.Tensor:
if isinstance(repeats, torch.Tensor):
repeats = repeats.detach().cpu().numpy().tolist()
return x.repeat(repeats)
def constant_pad(
x: torch.Tensor,
/,
pad_width: List[List[int]],
*,
value: Number = 0.0,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if 0 in x.shape:
new_shape = [s + sum(pad_width[i]) for i, s in enumerate(x.shape)]
return torch.ones(new_shape, dtype=x.dtype) * value
if x.shape == ():
x = x.unsqueeze(0)
if isinstance(pad_width, torch.Tensor):
pad_width = pad_width.detach().cpu().numpy().tolist()
pad_width_flat: List[int] = []
for pad_width_sec in reversed(pad_width):
for item in pad_width_sec:
pad_width_flat.append(item)
return torch.nn.functional.pad(x, pad_width_flat, mode="constant", value=value)
def zero_pad(
x: torch.Tensor,
/,
pad_width: List[List[int]],
*,
out: Optional[torch.Tensor] = None,
):
return constant_pad(x, pad_width, value=0.0)
def swapaxes(
x: torch.Tensor,
axis0: int,
axis1: int,
/,
*,
copy: Optional[bool] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.transpose(x, axis0, axis1)
@with_unsupported_dtypes(
{"2.2 and below": ("bool", "float16", "complex")}, backend_version
)
def clip(
x: torch.Tensor,
/,
x_min: Optional[Union[Number, torch.Tensor]] = None,
x_max: Optional[Union[Number, torch.Tensor]] = None,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
promoted_type = x.dtype
if x_min is not None:
if not hasattr(x_min, "dtype"):
x_min = ivy.array(x_min).data
promoted_type = ivy.as_native_dtype(ivy.promote_types(x.dtype, x_min.dtype))
if x_max is not None:
if not hasattr(x_max, "dtype"):
x_max = ivy.array(x_max).data
promoted_type = ivy.as_native_dtype(
ivy.promote_types(promoted_type, x_max.dtype)
)
x_max = x_max.to(promoted_type)
x = x.to(promoted_type)
if x_min is not None:
x_min = x_min.to(promoted_type)
return torch.clamp(x, x_min, x_max, out=out)
clip.support_native_out = True
def unstack(
x: torch.Tensor,
/,
*,
copy: Optional[bool] = None,
axis: int = 0,
keepdims: bool = False,
) -> List[torch.Tensor]:
if x.shape == ():
if copy:
newarr = torch.clone(x)
return [newarr]
return [x]
ret = list(torch.unbind(x, axis))
if keepdims:
return [r.unsqueeze(axis) for r in ret]
return ret
| ivy/ivy/functional/backends/torch/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/manipulation.py",
"repo_id": "ivy",
"token_count": 4849
} | 31 |
from . import numpy
from . import array
from . import tree_util
| ivy/ivy/functional/frontends/jax/_src/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/_src/__init__.py",
"repo_id": "ivy",
"token_count": 18
} | 32 |
from . import non_linear_activations
from .non_linear_activations import *
| ivy/ivy/functional/frontends/jax/nn/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/nn/__init__.py",
"repo_id": "ivy",
"token_count": 21
} | 33 |
from . import probability
from . import transformer
| ivy/ivy/functional/frontends/mindspore/nn/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mindspore/nn/__init__.py",
"repo_id": "ivy",
"token_count": 10
} | 34 |
import ivy
from ivy.functional.frontends.mxnet.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def diagonal(a, offset=0, axis1=0, axis2=1):
return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)
| ivy/ivy/functional/frontends/mxnet/numpy/symbol.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mxnet/numpy/symbol.py",
"repo_id": "ivy",
"token_count": 95
} | 35 |
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
import ivy.functional.frontends.numpy as np_frontend
all_complex_dtypes = ["complex64", "complex128"]
all_float_dtypes = [
"float16",
"float32",
"float64",
]
# dtypes as string
all_int_dtypes = ["int8", "int16", "int32", "int64"]
all_uint_dtypes = ["uint8", "uint16", "uint32", "uint64"]
@to_ivy_arrays_and_back
def can_cast(from_, to, casting="safe"):
ivy.utils.assertions.check_elem_in_list(
casting,
["no", "equiv", "safe", "same_kind", "unsafe"],
message="casting must be one of [no, equiv, safe, same_kind, unsafe]",
)
if ivy.is_array(from_):
from_ = ivy.as_ivy_dtype(ivy.dtype(from_))
elif isinstance(from_, (str, type)):
from_ = ivy.as_ivy_dtype(from_)
elif isinstance(from_, np_frontend.dtype):
from_ = from_._ivy_dtype
else:
raise ivy.utils.exceptions.IvyException(
"from_ must be one of dtype, dtype specifier, scalar, or array"
)
if isinstance(to, (str, type)):
to = ivy.as_ivy_dtype(to)
elif isinstance(to, np_frontend.dtype):
to = to._ivy_dtype
else:
raise ivy.utils.exceptions.IvyException("to must be dtype or dtype specifier")
if casting in ["no", "equiv"]:
return from_ == to
if casting == "safe" and to in np_frontend.numpy_casting_rules[from_]:
return True
if casting == "same_kind":
if from_ == to or "bool" in from_:
return True
if "int" in from_ and ("float" in to or "complex" in to):
return True
elif "float" in from_ and ("float" in to or "complex" in to):
return True
elif "uint" in from_ and ("int" in to or "float" in to or "complex" in to):
return True
elif "int" in from_ and "int" in to and "uint" not in to:
return True
else:
return to in np_frontend.numpy_casting_rules[from_]
if casting == "unsafe":
return True
return False
def min_scalar_type(a, /):
if ivy.is_array(a) and a.shape == ():
a = a.item()
if np_frontend.isscalar(a):
validation_dtype = type(a)
if "int" in validation_dtype.__name__:
for dtype in all_uint_dtypes:
if np_frontend.iinfo(dtype).min <= a <= np_frontend.iinfo(dtype).max:
return np_frontend.dtype(dtype)
for dtype in all_int_dtypes:
if np_frontend.iinfo(dtype).min <= a <= np_frontend.iinfo(dtype).max:
return np_frontend.dtype(dtype)
elif "float" in validation_dtype.__name__:
for dtype in all_float_dtypes:
if np_frontend.finfo(dtype).min <= a <= np_frontend.finfo(dtype).max:
return np_frontend.dtype(dtype)
elif "complex" in validation_dtype.__name__:
for dtype in all_complex_dtypes:
if np_frontend.finfo(dtype).min <= a <= np_frontend.finfo(dtype).max:
return np_frontend.dtype(dtype)
else:
return np_frontend.dtype(validation_dtype)
else:
return np_frontend.dtype(a.dtype)
def promote_types(type1, type2, /):
if isinstance(type1, np_frontend.dtype):
type1 = type1._ivy_dtype
if isinstance(type2, np_frontend.dtype):
type2 = type2._ivy_dtype
return np_frontend.dtype(np_frontend.promote_numpy_dtypes(type1, type2))
@to_ivy_arrays_and_back
def result_type(*arrays_and_dtypes):
if len(arrays_and_dtypes) == 0:
raise ivy.utils.exceptions.IvyException(
"At least one array or dtype must be provided"
)
if len(arrays_and_dtypes) == 1:
if isinstance(arrays_and_dtypes[0], np_frontend.dtype):
return arrays_and_dtypes[0]
else:
return np_frontend.dtype(arrays_and_dtypes[0].dtype)
else:
res = (
arrays_and_dtypes[0]
if not ivy.is_array(arrays_and_dtypes[0])
else np_frontend.dtype(arrays_and_dtypes[0].dtype)
)
for elem in arrays_and_dtypes:
if ivy.is_array(elem):
elem = np_frontend.dtype(elem.dtype)
res = promote_types(res, elem)
return res
| ivy/ivy/functional/frontends/numpy/data_type_routines/general.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/data_type_routines/general.py",
"repo_id": "ivy",
"token_count": 2120
} | 36 |
# local
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
from_zero_dim_arrays_to_scalar,
)
from ivy.func_wrapper import with_unsupported_dtypes
# det
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def det(a):
return ivy.det(a)
# matrix_rank
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def matrix_rank(A, tol=None, hermitian=False):
return ivy.matrix_rank(A, atol=tol, hermitian=hermitian)
# solve
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def norm(x, ord=None, axis=None, keepdims=False):
if axis is None and (ord is not None):
if x.ndim not in (1, 2):
raise ValueError("Improper number of dimensions to norm.")
else:
if x.ndim == 1:
ret = ivy.vector_norm(x, axis=axis, keepdims=keepdims, ord=ord)
else:
ret = ivy.matrix_norm(x, axis=axis, keepdims=keepdims, ord=ord)
elif axis is None and ord is None:
x = ivy.flatten(x)
ret = ivy.vector_norm(x, axis=0, keepdims=keepdims, ord=2)
if isinstance(axis, int):
ret = ivy.vector_norm(x, axis=axis, keepdims=keepdims, ord=ord)
elif isinstance(axis, tuple):
ret = ivy.matrix_norm(x, axis=axis, keepdims=keepdims, ord=ord)
return ret
# slogdet
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def slogdet(a):
sign, logabsdet = ivy.slogdet(a)
return sign, logabsdet
# trace
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def trace(a, offset=0, axis1=0, axis2=1, out=None):
ret = ivy.trace(a, offset=offset, axis1=axis1, axis2=axis2, out=out)
return ret
| ivy/ivy/functional/frontends/numpy/linalg/norms_and_other_numbers.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/linalg/norms_and_other_numbers.py",
"repo_id": "ivy",
"token_count": 849
} | 37 |
# local
from collections import namedtuple
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def append(arr, values, axis=None):
if axis is None:
return ivy.concat((ivy.flatten(arr), ivy.flatten(values)), axis=0)
else:
return ivy.concat((arr, values), axis=axis)
@to_ivy_arrays_and_back
def trim_zeros(filt, trim="fb"):
first = 0
trim = trim.upper()
if "F" in trim:
for i in filt:
if i != 0.0:
break
else:
first = first + 1
last = ivy.shape(filt)[0]
if "B" in trim:
for i in filt[::-1]:
if i != 0.0:
break
else:
last = last - 1
return filt[first:last]
@to_ivy_arrays_and_back
def unique(
array, /, return_index=False, return_inverse=False, return_counts=False, axis=None
):
results = ivy.unique_all(array, axis=axis)
fields = ["values"]
if return_index:
fields.append("indices")
if return_inverse:
fields.append("inverse_indices")
if return_counts:
fields.append("counts")
Results = namedtuple("Results", fields)
values = [results.values]
if return_index:
values.append(results.indices)
if return_inverse:
values.append(results.inverse_indices)
if return_counts:
values.append(results.counts)
return Results(*values)
| ivy/ivy/functional/frontends/numpy/manipulation_routines/adding_and_removing_elements.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/manipulation_routines/adding_and_removing_elements.py",
"repo_id": "ivy",
"token_count": 684
} | 38 |
# global
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_out,
handle_numpy_dtype,
handle_numpy_casting,
from_zero_dim_arrays_to_scalar,
)
# --- Helpers --- #
# --------------- #
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _conj(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.conj(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
def angle(z, deg=False):
angle = ivy.angle(z, deg=deg)
if deg and len(z.shape) == 0:
angle = ivy.astype(angle, ivy.float64)
return angle
@to_ivy_arrays_and_back
def imag(val):
return ivy.imag(val)
@to_ivy_arrays_and_back
def real(val):
return ivy.real(val)
| ivy/ivy/functional/frontends/numpy/mathematical_functions/handling_complex_numbers.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/mathematical_functions/handling_complex_numbers.py",
"repo_id": "ivy",
"token_count": 483
} | 39 |
from . import Generator
from .Generator import *
from . import RandomState
from .RandomState import *
from . import functions
from .functions import *
| ivy/ivy/functional/frontends/numpy/random/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/random/__init__.py",
"repo_id": "ivy",
"token_count": 37
} | 40 |
import ivy
from ivy.functional.frontends.onnx.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def Abs(input):
return ivy.abs(input)
@to_ivy_arrays_and_back
def Acos(input):
return ivy.acos(input)
@to_ivy_arrays_and_back
def Acosh(input):
return ivy.acosh(input)
@to_ivy_arrays_and_back
def Add(x1, x2):
return ivy.add(x1, x2)
@to_ivy_arrays_and_back
def Asin(input):
return ivy.asin(input)
| ivy/ivy/functional/frontends/onnx/elementwise.py/0 | {
"file_path": "ivy/ivy/functional/frontends/onnx/elementwise.py",
"repo_id": "ivy",
"token_count": 207
} | 41 |
from . import activation
from .activation import *
from . import common
from .common import *
from . import conv
from .conv import *
from . import distance
from .distance import *
from . import extension
from .extension import *
from . import input
from .input import *
from . import loss
from .loss import *
from . import norm
from .norm import *
from . import pooling
from .pooling import *
from . import vision
from .vision import *
| ivy/ivy/functional/frontends/paddle/nn/functional/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/nn/functional/__init__.py",
"repo_id": "ivy",
"token_count": 113
} | 42 |
# local
from ..creation import * # noqa: F401
| ivy/ivy/functional/frontends/paddle/tensor/creation.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/tensor/creation.py",
"repo_id": "ivy",
"token_count": 16
} | 43 |
import ivy
from .generic import NDFrame
class Series(NDFrame):
def __init__(
self,
data,
index=None,
dtype=None,
name=None,
copy=False,
fastpath=False,
columns=None,
*args,
**kwargs,
):
super().__init__(
data,
index,
columns=None,
dtype=dtype,
name=name,
copy=copy,
*args,
**kwargs,
)
assert self.array.ndim == 1, "Series Data must be 1-dimensional"
def __repr__(self):
series_name = f"{self.name} " if self.name is not None else ""
return (
f"frontends.pandas.Series {series_name}({self.array.to_list()},"
f" index={self.index.array.to_list()})"
)
def __getitem__(self, index_val):
if isinstance(index_val, slice):
return Series(
self.array[index_val],
index=self.index[index_val],
name=self.name,
dtype=self.dtype,
copy=self.copy,
)
return self.array[self.index.index(index_val)].item()
def __getattr__(self, item):
if item in self.index:
return self[item]
else:
return super().__getattr__(item)
def __len__(self):
return len(self.array)
def sum(self, axis=None, skipna=True, numeric_only=False, min_count=0, **kwargs):
_array = self.array
if min_count > 0:
if ivy.has_nans(_array):
number_values = _array.size - ivy.sum(ivy.isnan(_array))
else:
number_values = _array.size
if min_count > number_values:
return ivy.nan
if skipna:
return ivy.nansum(_array)
return _array.sum()
def mean(self, axis=None, skipna=True, numeric_only=False, **kwargs):
if skipna:
return ivy.nanmean(self.array)
return self.array.mean()
def add(self, other, level=None, fill_value=None, axis=0):
# todo add level (with multiindex) and fill_value (with wrapper)
# todo handle data alignment
new_array = ivy.add(self.array, other.array)
return Series(new_array)
def get(self, key, default=None):
if key in self.index:
return self[key]
return default
def keys(self):
return self.index
| ivy/ivy/functional/frontends/pandas/series.py/0 | {
"file_path": "ivy/ivy/functional/frontends/pandas/series.py",
"repo_id": "ivy",
"token_count": 1272
} | 44 |
from .linalg import *
from . import interpolative
| ivy/ivy/functional/frontends/scipy/linalg/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/linalg/__init__.py",
"repo_id": "ivy",
"token_count": 14
} | 45 |
from .spatial import *
from . import distance
from . import transform
| ivy/ivy/functional/frontends/scipy/spatial/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/spatial/__init__.py",
"repo_id": "ivy",
"token_count": 17
} | 46 |
import ivy
import numbers
from ivy.functional.frontends.numpy.func_wrapper import outputs_to_frontend_arrays
@outputs_to_frontend_arrays
def make_circles(
n_samples=100, *, shuffle=True, noise=None, random_state=None, factor=0.8
):
# numbers.Integral also includes bool
if isinstance(n_samples, numbers.Integral):
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
elif isinstance(n_samples, tuple):
n_samples_out, n_samples_in = n_samples
outer_circ_x = ivy.cos(
ivy.linspace(0, 2 * ivy.pi, num=n_samples_out, endpoint=False)
)
outer_circ_y = ivy.sin(
ivy.linspace(0, 2 * ivy.pi, num=n_samples_out, endpoint=False)
)
inner_circ_x = (
ivy.cos(ivy.linspace(0, 2 * ivy.pi, num=n_samples_in, endpoint=False)) * factor
)
inner_circ_y = (
ivy.sin(ivy.linspace(0, 2 * ivy.pi, num=n_samples_in, endpoint=False)) * factor
)
X = ivy.concat(
[
ivy.stack([outer_circ_x, outer_circ_y], axis=1),
ivy.stack([inner_circ_x, inner_circ_y], axis=1),
],
axis=0,
)
y = ivy.concat(
[
ivy.zeros(n_samples_out, dtype=ivy.int32),
ivy.ones(n_samples_in, dtype=ivy.int32),
],
axis=0,
)
return X, y
@outputs_to_frontend_arrays
def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None):
if isinstance(n_samples, numbers.Integral):
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
elif isinstance(n_samples, tuple):
n_samples_out, n_samples_in = n_samples
outer_circ_x = ivy.cos(ivy.linspace(0, ivy.pi, n_samples_out))
outer_circ_y = ivy.sin(ivy.linspace(0, ivy.pi, n_samples_out))
inner_circ_x = 1 - ivy.cos(ivy.linspace(0, ivy.pi, n_samples_in))
inner_circ_y = 1 - ivy.sin(ivy.linspace(0, ivy.pi, n_samples_in)) - 0.5
X = ivy.concat(
[
ivy.stack([outer_circ_x, outer_circ_y], axis=1),
ivy.stack([inner_circ_x, inner_circ_y], axis=1),
],
axis=0,
)
y = ivy.concat(
[
ivy.zeros(n_samples_out, dtype=ivy.int32),
ivy.ones(n_samples_in, dtype=ivy.int32),
],
axis=0,
)
return X, y
| ivy/ivy/functional/frontends/sklearn/datasets/_samples_generator.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/datasets/_samples_generator.py",
"repo_id": "ivy",
"token_count": 1205
} | 47 |
import ivy.functional.frontends.tensorflow as tf_frontend
def add(x, y, name=None):
return tf_frontend.math.add(x, y, name=name)
| ivy/ivy/functional/frontends/tensorflow/__operators__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/__operators__.py",
"repo_id": "ivy",
"token_count": 53
} | 48 |
# local
import ivy
from ivy.functional.frontends.tensorflow import check_tensorflow_casting
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.functional.frontends.tensorflow.func_wrapper import (
to_ivy_arrays_and_back,
handle_tf_dtype,
)
import ivy.functional.frontends.tensorflow as tf_frontend
@to_ivy_arrays_and_back
def adjoint(matrix, name=None):
return ivy.adjoint(matrix)
@to_ivy_arrays_and_back
def band_part(input, num_lower, num_upper, name=None):
m, n = ivy.meshgrid(
ivy.arange(input.shape[-2]), ivy.arange(input.shape[-1]), indexing="ij"
)
mask = ((num_lower < 0) | ((m - n) <= num_lower)) & (
(num_upper < 0) | ((n - m) <= num_upper)
)
return ivy.where(mask, input, ivy.zeros_like(input))
@to_ivy_arrays_and_back
def cholesky(input, name=None):
def symmetrize(input):
# TODO : Take Hermitian transpose after complex numbers added
return (input + ivy.swapaxes(input, -1, -2)) / 2
input = symmetrize(input)
return ivy.cholesky(input)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, "tensorflow")
def cholesky_solve(chol, rhs, name=None):
chol, rhs = check_tensorflow_casting(chol, rhs)
y = ivy.solve(chol, rhs)
return ivy.solve(ivy.matrix_transpose(chol), y)
@to_ivy_arrays_and_back
def cross(a, b, name=None):
return ivy.cross(a, b)
@to_ivy_arrays_and_back
def det(input, name=None):
return ivy.det(input)
@to_ivy_arrays_and_back
def diag(
diagonal,
/,
k=0,
*,
num_rows=None,
num_cols=None,
padding_value=0,
align="RIGHT_LEFT",
name="diag",
):
# TODO: Implement ivy.matrix_diag in ivy API
diagonal = ivy.array(diagonal)
shape = list(diagonal.shape)
shape[-1] += abs(k)
output = ivy.full(shape + [shape[-1]], padding_value)
if k > 0:
for i in range(shape[-1]):
try:
output[..., i, i + k] = diagonal[..., i]
except IndexError:
break
else:
for i in range(shape[-1]):
try:
output[..., i + abs(k), i] = diagonal[..., i]
except IndexError:
break
size = 1
for dim in output.shape:
size *= dim
if (num_cols and num_rows) and (size == (num_cols * num_rows)):
output = ivy.reshape(output, (num_rows, num_cols))
return ivy.astype(output, ivy.dtype(diagonal))
@to_ivy_arrays_and_back
def eig(tensor, name=None):
return ivy.eig(tensor)
@to_ivy_arrays_and_back
def eigh(tensor, name=None):
return ivy.eigh(tensor)
@to_ivy_arrays_and_back
@with_supported_dtypes(
{"2.15.0 and below": ("float32", "float64", "complex64", "complex128")},
"tensorflow",
)
def eigvals(tensor, name=None):
return ivy.eigvals(tensor)
@to_ivy_arrays_and_back
def eigvalsh(tensor, name=None):
return ivy.eigvalsh(tensor)
@to_ivy_arrays_and_back
def einsum(equation, *inputs, **kwargs):
return tf_frontend.einsum(equation, *inputs, **kwargs)
def expm(input, name=None):
return ivy.matrix_exp(input)
@handle_tf_dtype
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, "tensorflow")
def eye(num_rows, num_columns=None, batch_shape=None, dtype=ivy.float32, name=None):
return ivy.eye(num_rows, num_columns, batch_shape=batch_shape, dtype=dtype)
@with_supported_dtypes({"2.15.0 and below": ("float32", "float64")}, "tensorflow")
@to_ivy_arrays_and_back
def global_norm(t_list, name=None):
l2_norms = [ivy.sqrt(ivy.sum(ivy.square(t))) ** 2 for t in t_list if t is not None]
return ivy.sqrt(ivy.sum(ivy.asarray(l2_norms, dtype=ivy.dtype(l2_norms[0]))))
@to_ivy_arrays_and_back
@with_supported_dtypes(
{
"2.15.0 and below": (
"float32",
"float64",
"complex64",
"complex128",
)
},
"tensorflow",
)
def inv(input, adjoint=False, name=None):
return ivy.inv(input, adjoint=adjoint)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.15.0 and below": ("float32", "float64")}, "tensorflow")
def l2_normalize(x, axis=None, epsilon=1e-12, name=None):
square_sum = ivy.sum(ivy.square(x), axis=axis, keepdims=True)
x_inv_norm = ivy.reciprocal(ivy.sqrt(ivy.maximum(square_sum, epsilon)))
return ivy.multiply(x, x_inv_norm)
@to_ivy_arrays_and_back
@with_supported_dtypes(
{"2.15.0 and below": ("float16", "float32", "float64", "complex64", "complex128")},
"tensorflow",
)
def logdet(matrix, name=None):
return ivy.det(matrix).log()
@to_ivy_arrays_and_back
def lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None):
return ivy.lu_matrix_inverse(
ivy.lu_reconstruct(lower_upper, perm), validate_args=validate_args, name=name
)
@to_ivy_arrays_and_back
@with_supported_dtypes(
{
"2.15.0 and below": (
"float16",
"float32",
"float64",
"int32",
"complex64",
"complex128",
)
},
"tensorflow",
)
def matmul(
a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
output_type=None,
name=None,
):
if transpose_a and adjoint_a:
raise ivy.utils.exceptions.IvyException(
"Only one of `transpose_a` and `adjoint_a` can be True. "
"Received `transpose_a`=True, `adjoint_a`=True."
)
if transpose_b and adjoint_b:
raise ivy.utils.exceptions.IvyException(
"Only one of `transpose_b` and `adjoint_b` can be True. "
"Received `transpose_b`=True, `adjoint_b`=True."
)
return ivy.matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b,
)
@to_ivy_arrays_and_back
def matrix_rank(a, tol=None, validate_args=False, name=None):
# TODO:The tests will fail because output shapes mismatch
# DO NOT for any reason change anything with the backend function
# all the fixes must be here as the backend function is
# working as expected and in compliance with Array API
return ivy.astype(ivy.matrix_rank(a, atol=tol), ivy.int32)
@to_ivy_arrays_and_back
def matrix_transpose(a, name="matrix_transpose", conjugate=False):
if conjugate:
return ivy.adjoint(a)
return ivy.matrix_transpose(a)
@with_supported_dtypes({"2.15.0 and below": ("float32", "float64")}, "tensorflow")
@to_ivy_arrays_and_back
def norm(tensor, ord="euclidean", axis=None, keepdims=None, name=None):
keepdims = keepdims or False
# Check if it's a matrix norm
if (type(axis) in [tuple, list]) and (len(axis) == 2):
return ivy.matrix_norm(tensor, ord=ord, axis=axis, keepdims=keepdims)
# Else resort to a vector norm
return ivy.vector_norm(tensor, ord=ord, axis=axis, keepdims=keepdims)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.15.0 and below": ("float32", "float64")}, "tensorflow")
def normalize(tensor, ord="euclidean", axis=None, name=None):
tensor = tf_frontend.convert_to_tensor(
tensor, dtype=ivy.dtype(tensor), dtype_hint="Any"
)
_norm = norm(tensor, ord=ord, axis=axis, keepdims=True)
normalized = tf_frontend.math.divide(tensor, _norm)
return normalized, _norm
@to_ivy_arrays_and_back
def pinv(a, rcond=None, validate_args=False, name=None):
return ivy.pinv(a, rtol=rcond)
@to_ivy_arrays_and_back
def qr(input, /, *, full_matrices=False, name=None):
return ivy.qr(input)
@to_ivy_arrays_and_back
@with_supported_dtypes(
{
"2.15.0 and below": (
"bfloat16",
"half",
"float32",
"float64",
"int32",
"int64",
"float16",
"float32",
"float64",
"complex64",
"complex128",
)
},
"tensorflow",
)
def set_diag(input, diagonal, /, *, k=0, align="RIGHT_LEFT", name=None):
# TODO:
# 1. Add support for different k values and align options
# 2. Add support for input tensors with ranks larger than 3
# Convert input and diagonal to Ivy array format
input, diagonal = map(ivy.array, (input, diagonal))
# Check if the input tensor has a rank larger than 3
if input.ndim > 3:
raise ivy.utils.exceptions.IvyNotImplementedException(
"Input tensor must have rank less than or equal to 3.\nInput shape:"
f" {input.shape}"
)
# Check if the first dimension of the input and diagonal match
if input.shape[0] != diagonal.shape[0]:
raise ivy.utils.exceptions.IvyValueError(
"Number of diagonal vectors must match the number of matrices in the"
f" input.\nInput shape: {input.shape}, Diagonal shape: {diagonal.shape}"
)
# Handle the case where input is a 2D matrix
if input.ndim < 3:
# Check the diagonal length matches the first dimension of the matrix
if input.shape[0] != diagonal.shape[0]:
raise ivy.utils.exceptions.IvyValueError(
"Length of the diagonal vector must match the first dimension of the"
f" matrix.\nMatrix shape: {input.shape}, Diagonal shape:"
f" {diagonal.shape}"
)
input[range(input.shape[0]), range(input.shape[0])] = diagonal
else:
for matrix, new_diagonal in zip(input, diagonal):
# Check the diagonal length matches the first dimension of the matrix
if matrix.shape[0] != new_diagonal.shape[0]:
raise ivy.utils.exceptions.IvyValueError(
"Length of the diagonal vector must match the first dimension of"
f" the matrix.\nMatrix shape: {matrix.shape}, Diagonal shape:"
f" {new_diagonal.shape}"
)
matrix[range(matrix.shape[0]), range(matrix.shape[0])] = new_diagonal
return input
@to_ivy_arrays_and_back
def slogdet(input, name=None):
return ivy.slogdet(input)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, "tensorflow")
def solve(matrix, rhs, /, *, adjoint=False, name=None):
matrix, rhs = check_tensorflow_casting(matrix, rhs)
return ivy.solve(matrix, rhs, adjoint=adjoint)
@to_ivy_arrays_and_back
def svd(a, /, *, full_matrices=False, compute_uv=True, name=None):
return ivy.svd(a, compute_uv=compute_uv, full_matrices=full_matrices)
@to_ivy_arrays_and_back
@with_supported_dtypes(
{
"2.15.0 and below": (
"bfloat16",
"half",
"float32",
"float64",
"int32",
"int64",
"complex64",
"complex128",
)
},
"tensorflow",
)
def tensor_diag(diagonal, /, *, name=None):
diagonal = ivy.array(diagonal)
rank = ivy.matrix_rank(diagonal)
if rank > 1:
raise ValueError("wrong tensor rank, at most 1")
return ivy.diag(diagonal)
@to_ivy_arrays_and_back
@with_supported_dtypes(
{
"2.15.0 and below": (
"float32",
"float64",
"int32",
"int64",
"complex64",
"complex128",
)
},
"tensorflow",
)
def tensor_diag_part(input, name=None):
shape = ivy.shape(input, as_array=True)
rank = len(shape)
if rank % 2 != 0:
raise ValueError("Wrong tensor rank, rank must be even.")
rank = len(shape)
rank_half = int(rank / 2)
half_shape = shape[:rank_half]
prod = 1
for i in range(rank_half):
if shape[i] != shape[i + rank_half]:
raise ValueError(
f"Invalid shape {shape}: dimensions at {i} and {i+rank_half} do not"
" match."
)
prod *= half_shape[i]
reshaped = ivy.reshape(input, (prod, prod))
diagonal = ivy.diagonal(reshaped)
return ivy.reshape(diagonal, tuple(half_shape))
@to_ivy_arrays_and_back
@with_supported_dtypes(
{"2.15.0 and below": ("float32", "float64", "int32")}, "tensorflow"
)
def tensordot(a, b, axes, name=None):
a, b = check_tensorflow_casting(a, b)
if not ivy.isscalar(axes):
axes = ivy.to_list(axes)
return ivy.tensordot(a, b, axes=axes)
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"float16",
"bfloat16",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
)
},
"tensorflow",
)
def tensorsolve(a, b, axes):
return ivy.tensorsolve(a, b, axes=axes)
@to_ivy_arrays_and_back
def trace(x, name=None):
return ivy.trace(x, axis1=-2, axis2=-1)
@to_ivy_arrays_and_back
@with_supported_dtypes(
{
"2.13.0 and below": (
"float32",
"float64",
"complex64",
"complex128",
)
},
"tensorflow",
)
def tridiagonal_solve(
diagonals,
rhs,
diagonals_format="compact",
transpose_rhs=False,
conjugate_rhs=False,
name=None,
partial_pivoting=True,
perturb_singular=False,
):
if transpose_rhs is True:
rhs_copy = ivy.matrix_transpose(rhs)
if conjugate_rhs is True:
rhs_copy = ivy.conj(rhs)
if not transpose_rhs and not conjugate_rhs:
rhs_copy = ivy.array(rhs)
if diagonals_format == "matrix":
return ivy.solve(diagonals, rhs_copy)
elif diagonals_format in ["sequence", "compact"]:
diagonals = ivy.array(diagonals)
dim = diagonals[0].shape[0]
diagonals[[0, -1], [-1, 0]] = 0
dummy_idx = [0, 0]
indices = ivy.array(
[
[(i, i + 1) for i in range(dim - 1)] + [dummy_idx],
[(i, i) for i in range(dim)],
[dummy_idx] + [(i + 1, i) for i in range(dim - 1)],
]
)
constructed_matrix = ivy.scatter_nd(
indices, diagonals, shape=ivy.array([dim, dim])
)
return ivy.solve(constructed_matrix, rhs_copy)
else:
raise ValueError("Unexpected diagonals_format")
| ivy/ivy/functional/frontends/tensorflow/linalg.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/linalg.py",
"repo_id": "ivy",
"token_count": 6895
} | 49 |
# global
import sys
from numbers import Number
from typing import Union, Tuple, Iterable
# local
import ivy
from ivy.utils.exceptions import handle_exceptions
from ivy.functional.frontends import set_frontend_to_specific_version
# Constructing dtypes are required as ivy.<dtype>
# will change dynamically on the backend and may not be available
int8 = ivy.IntDtype("int8")
int16 = ivy.IntDtype("int16")
int32 = ivy.IntDtype("int32")
int64 = ivy.IntDtype("int64")
uint8 = ivy.UintDtype("uint8")
uint16 = ivy.UintDtype("uint16")
uint32 = ivy.UintDtype("uint32")
uint64 = ivy.UintDtype("uint64")
bfloat16 = ivy.FloatDtype("bfloat16")
float16 = ivy.FloatDtype("float16")
float32 = ivy.FloatDtype("float32")
float64 = ivy.FloatDtype("float64")
complex64 = ivy.ComplexDtype("complex64")
complex128 = ivy.ComplexDtype("complex128")
bool = ivy.Dtype("bool")
# type aliases
char = int8
short = int16
int = int32
long = int64
half = float16
float = float32
double = float64
# data type promotion
torch_promotion_table = {
(uint8, uint8): uint8,
(uint8, int8): int16,
(uint8, int16): int16,
(uint8, int32): int32,
(uint8, int64): int64,
(uint8, float16): float16,
(uint8, float32): float32,
(uint8, float64): float64,
(uint8, bool): uint8,
(uint8, bfloat16): bfloat16,
(uint8, complex64): complex64,
(uint8, complex128): complex128,
(int8, uint8): int16,
(int8, int8): int8,
(int8, int16): int16,
(int8, int32): int32,
(int8, int64): int64,
(int8, float16): float16,
(int8, float32): float32,
(int8, float64): float64,
(int8, bool): int8,
(int8, bfloat16): bfloat16,
(int8, complex64): complex64,
(int8, complex128): complex128,
(int16, uint8): int16,
(int16, int8): int16,
(int16, int16): int16,
(int16, int32): int32,
(int16, int64): int64,
(int16, float16): float16,
(int16, float32): float32,
(int16, float64): float64,
(int16, bool): int16,
(int16, bfloat16): bfloat16,
(int16, complex64): complex64,
(int16, complex128): complex128,
(int32, uint8): int32,
(int32, int8): int32,
(int32, int16): int32,
(int32, int32): int32,
(int32, int64): int64,
(int32, float16): float16,
(int32, float32): float32,
(int32, float64): float64,
(int32, bool): int32,
(int32, bfloat16): bfloat16,
(int32, complex64): complex64,
(int32, complex128): complex128,
(int64, uint8): int64,
(int64, int8): int64,
(int64, int16): int64,
(int64, int32): int64,
(int64, int64): int64,
(int64, float16): float16,
(int64, float32): float32,
(int64, float64): float64,
(int64, bool): int64,
(int64, bfloat16): bfloat16,
(int64, complex64): complex64,
(int64, complex128): complex128,
(float16, uint8): float16,
(float16, int8): float16,
(float16, int16): float16,
(float16, int32): float16,
(float16, int64): float16,
(float16, float16): float16,
(float16, float32): float32,
(float16, float64): float64,
(float16, bool): float16,
(float16, bfloat16): float32,
(float16, complex64): complex64,
(float16, complex128): complex128,
(float32, uint8): float32,
(float32, int8): float32,
(float32, int16): float32,
(float32, int32): float32,
(float32, int64): float32,
(float32, float16): float32,
(float32, float32): float32,
(float32, float64): float64,
(float32, bool): float32,
(float32, bfloat16): float32,
(float32, complex64): complex64,
(float32, complex128): complex128,
(float64, uint8): float64,
(float64, int8): float64,
(float64, int16): float64,
(float64, int32): float64,
(float64, int64): float64,
(float64, float16): float64,
(float64, float32): float64,
(float64, float64): float64,
(float64, bool): float64,
(float64, bfloat16): float64,
(float64, complex64): complex128,
(float64, complex128): complex128,
(bool, uint8): uint8,
(bool, int8): int8,
(bool, int16): int16,
(bool, int32): int32,
(bool, int64): int64,
(bool, float16): float16,
(bool, float32): float32,
(bool, float64): float64,
(bool, bool): bool,
(bool, bfloat16): bfloat16,
(bool, complex64): complex64,
(bool, complex128): complex128,
(bfloat16, uint8): bfloat16,
(bfloat16, int8): bfloat16,
(bfloat16, int16): bfloat16,
(bfloat16, int32): bfloat16,
(bfloat16, int64): bfloat16,
(bfloat16, float16): float32,
(bfloat16, float32): float32,
(bfloat16, float64): float64,
(bfloat16, bool): bfloat16,
(bfloat16, bfloat16): bfloat16,
(bfloat16, complex64): complex64,
(bfloat16, complex128): complex128,
(complex64, uint8): complex64,
(complex64, int8): complex64,
(complex64, int16): complex64,
(complex64, int32): complex64,
(complex64, int64): complex64,
(complex64, float16): complex64,
(complex64, float32): complex64,
(complex64, float64): complex128,
(complex64, bool): complex64,
(complex64, bfloat16): complex64,
(complex64, complex64): complex64,
(complex64, complex128): complex128,
(complex128, uint8): complex128,
(complex128, int8): complex128,
(complex128, int16): complex128,
(complex128, int32): complex128,
(complex128, int64): complex128,
(complex128, float16): complex128,
(complex128, float32): complex128,
(complex128, float64): complex128,
(complex128, bool): complex128,
(complex128, bfloat16): complex128,
(complex128, complex64): complex128,
(complex128, complex128): complex128,
}
@handle_exceptions
def promote_types_torch(
type1: Union[ivy.Dtype, ivy.NativeDtype],
type2: Union[ivy.Dtype, ivy.NativeDtype],
/,
) -> ivy.Dtype:
"""Promote the datatypes type1 and type2, returning the data type they
promote to.
Parameters
----------
type1
the first of the two types to promote
type2
the second of the two types to promote
Returns
-------
ret
The type that both input types promote to
"""
try:
ret = torch_frontend.torch_promotion_table[
(ivy.as_ivy_dtype(type1), ivy.as_ivy_dtype(type2))
]
except KeyError as e:
raise ivy.utils.exceptions.IvyException(
"these dtypes are not type promotable"
) from e
return ret
@handle_exceptions
def promote_types_of_torch_inputs(
x1: Union[ivy.Array, Number, Iterable[Number]],
x2: Union[ivy.Array, Number, Iterable[Number]],
/,
) -> Tuple[ivy.Array, ivy.Array]:
"""Promote the dtype of the given native array inputs to a common dtype
based on type promotion rules.
While passing float or integer values or any other non-array input
to this function, it should be noted that the return will be an
array-like object. Therefore, outputs from this function should be
used as inputs only for those functions that expect an array-like or
tensor-like objects, otherwise it might give unexpected results.
"""
if ivy.isscalar(x1) and ivy.is_int_dtype(x1):
x1 = ivy.asarray(x1, dtype="int64")
elif ivy.isscalar(x1):
x1 = ivy.asarray(x1)
if ivy.isscalar(x2) and ivy.is_int_dtype(x2):
x2 = ivy.asarray(x2, dtype="int64")
elif ivy.isscalar(x2):
x2 = ivy.asarray(x2)
type1 = ivy.default_dtype(item=x1).strip("u123456789")
type2 = ivy.default_dtype(item=x2).strip("u123456789")
if x1.shape != () and x2.shape == () and type1 == type2:
x2 = ivy.asarray(
x2, dtype=x1.dtype, device=ivy.default_device(item=x1, as_native=False)
)
elif x1.shape == () and x2.shape != () and type1 == type2:
x1 = ivy.asarray(
x1, dtype=x2.dtype, device=ivy.default_device(item=x2, as_native=False)
)
elif x1.dtype != x2.dtype:
promoted = promote_types_torch(x1.dtype, x2.dtype)
if x1.dtype != promoted:
x1 = x1.astype(promoted)
if x2.dtype != promoted:
x2 = x2.astype(promoted)
return x1, x2
from . import nn
from .nn.functional import softmax, relu, lstm
from . import special
from . import tensor
from .tensor import *
from . import blas_and_lapack_ops
from .blas_and_lapack_ops import *
from . import comparison_ops
from .comparison_ops import *
from . import creation_ops
from .creation_ops import *
from . import dtype
from .dtype import *
from . import indexing_slicing_joining_mutating_ops
from .indexing_slicing_joining_mutating_ops import *
from . import locally_disabling_gradient_computation
from .locally_disabling_gradient_computation import *
from . import miscellaneous_ops
from .miscellaneous_ops import *
from . import pointwise_ops
from .pointwise_ops import *
from . import random_sampling
from .random_sampling import *
from . import reduction_ops
from .reduction_ops import *
from . import spectral_ops
from .spectral_ops import *
from . import tensor_functions
from .tensor_functions import *
from . import utilities
from .utilities import *
from . import linalg
from . import func
from .func import *
_frontend_array = tensor
# setting to specific version #
# --------------------------- #
if ivy.is_local():
module = ivy.utils._importlib.import_cache[__name__]
else:
module = sys.modules[__name__]
__version__ = set_frontend_to_specific_version(module)
| ivy/ivy/functional/frontends/torch/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/__init__.py",
"repo_id": "ivy",
"token_count": 3918
} | 50 |
import ivy
from ivy.func_wrapper import with_supported_device_and_dtypes, with_supported_dtypes
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
# --- Helpers --- #
# --------------- #
def _extract_states(states, batch_sizes):
h = []
for i in range(states.shape[1]):
h.append(states[int(batch_sizes[i] - 1), i])
h = ivy.expand_dims(ivy.stack(h, axis=0), axis=0)
return h
def _lstm_full(
input,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
):
ret = ivy.lstm(
input,
hx,
params,
num_layers,
dropout,
train,
bidirectional,
batch_first=batch_first,
has_ih_bias=has_biases,
has_hh_bias=has_biases,
)
return ret[1], ret[2][0], ret[2][1]
def _lstm_packed(
data,
batch_sizes,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
):
ret = ivy.lstm(
data,
hx,
params,
num_layers,
dropout,
train,
bidirectional,
batch_sizes=batch_sizes,
has_ih_bias=has_biases,
has_hh_bias=has_biases,
)
return ret[1], ret[2][0], ret[2][1]
# --- Main --- #
# ------------ #
@with_supported_device_and_dtypes(
{"2.2 and below": {"cpu": ("float32", "float64")}},
"torch",
)
@to_ivy_arrays_and_back
def lstm(*args, **kwargs):
if "batch_sizes" in kwargs or (len(args) >= 4 and not isinstance(args[3], bool)):
return _lstm_packed(*args, **kwargs)
else:
return _lstm_full(*args, **kwargs)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, "torch")
def multi_head_attention_forward(
query,
key,
value,
embed_dim_to_check,
num_heads,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
add_zero_attn,
dropout_p,
out_proj_weight,
out_proj_bias,
training=True,
key_padding_mask=None,
need_weights=True,
attn_mask=None,
use_separate_proj_weight=False,
q_proj_weight=None,
k_proj_weight=None,
v_proj_weight=None,
static_k=None,
static_v=None,
average_attn_weights=True,
is_causal=False,
):
embed_dim = query.shape[-1]
assert (
embed_dim == embed_dim_to_check
), f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
return ivy.multi_head_attention(
query,
key=key,
value=value,
batch_first=False,
num_heads=num_heads,
attention_mask=attn_mask,
in_proj_weights=in_proj_weight if not use_separate_proj_weight else None,
q_proj_weights=q_proj_weight,
k_proj_weights=k_proj_weight,
v_proj_weights=v_proj_weight,
out_proj_weights=out_proj_weight,
in_proj_bias=in_proj_bias,
out_proj_bias=out_proj_bias,
is_causal=is_causal and not (need_weights or key_padding_mask is not None),
key_padding_mask=key_padding_mask,
bias_k=bias_k,
bias_v=bias_v,
static_k=static_k,
static_v=static_v,
add_zero_attn=add_zero_attn,
return_attention_weights=need_weights,
average_attention_weights=average_attn_weights,
dropout=dropout_p,
training=training,
)
| ivy/ivy/functional/frontends/torch/nn/functional/layer_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/functional/layer_functions.py",
"repo_id": "ivy",
"token_count": 1714
} | 51 |
import ivy
from ivy.func_wrapper import with_supported_dtypes, with_unsupported_dtypes
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def bartlett_window(
window_length,
periodic=True,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False
):
# this implementation is based on scipy.signal.windows.bartlett
# https://github.com/scipy/scipy/blob/v1.11.2/scipy/signal/windows/_windows.py#L625-L721
if int(window_length) != window_length or window_length < 0:
raise ValueError("Window length must be a non-negative integer")
elif window_length == 1:
return ivy.ones(window_length)
else:
N = window_length + 1 if periodic else window_length
res = ivy.arange(0, N, dtype=dtype)
res = ivy.where(
ivy.less_equal(res, (N - 1) / 2.0),
2.0 * res / (N - 1),
2.0 - 2.0 * res / (N - 1),
)
return res[:-1] if periodic else res
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.51.0 and below": ("float32", "float64")}, "torch")
def blackman_window(
window_length,
periodic=True,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False
):
return ivy.blackman_window(window_length, periodic=periodic, dtype=dtype)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"1.11.0 and below": ("float16",)}, "torch")
def hamming_window(
window_length,
periodic=True,
alpha=0.54,
beta=0.46,
):
return ivy.hamming_window(
window_length,
periodic=periodic,
alpha=alpha,
beta=beta,
)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.51.0 and below": ("float32", "float64")}, "torch")
def kaiser_window(
window_length,
periodic=True,
beta=12.0,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False
):
return ivy.kaiser_window(window_length, periodic=periodic, beta=beta, dtype=dtype)
| ivy/ivy/functional/frontends/torch/spectral_ops.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/spectral_ops.py",
"repo_id": "ivy",
"token_count": 898
} | 52 |
from .core import Booster
def train(
params,
dtrain,
dlabel,
num_boost_round=10,
*,
evals=None,
obj=None,
feval=None,
maximize=None,
early_stopping_rounds=None,
evals_result=None,
verbose_eval=True,
xgb_model=None,
callbacks=None,
custom_metric=None,
):
"""Train a booster with given parameters.
Parameters
----------
params
Booster params.
dtrain
Data to be trained.
dlabel
Training labels.
num_boost_round
Number of boosting iterations.
evals
List of validation sets for which metrics will be evaluated during training.
Validation metrics will help us track the performance of the model.
obj
Custom objective function.
feval
Deprecated.
maximize
Whether to maximize feval.
early_stopping_rounds
Activates early stopping. Validation metric needs to improve at least once in
every **early_stopping_rounds** round(s) to continue training.
Requires at least one item in **evals**.
The method returns the model from the last iteration (not the best one). Use
custom callback or model slicing if the best model is desired.
If there's more than one item in **evals**, the last entry will be used for
early stopping.
If there's more than one metric in the **eval_metric** parameter given in
**params**, the last metric will be used for early stopping.
If early stopping occurs, the model will have two additional fields:
``bst.best_score``, ``bst.best_iteration``.
evals_result
This dictionary stores the evaluation results of all the items in watchlist.
verbose_eval
Requires at least one item in **evals**.
If **verbose_eval** is True then the evaluation metric on the validation set is
printed at each boosting stage.
If **verbose_eval** is an integer then the evaluation metric on the validation
set is printed at every given **verbose_eval** boosting stage. The last boosting
stage / the boosting stage found by using **early_stopping_rounds** is also
printed.
Example: with ``verbose_eval=4`` and at least one item in **evals**, an
evaluation metric is printed every 4 boosting stages, instead of every boosting
stage.
xgb_model
Xgb model to be loaded before training (allows training continuation).
callbacks
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks.
custom_metric
Custom metric function.
Returns
-------
Booster : a trained booster model
"""
# this function creates an instance of Booster and calls its update method
# to learn model parameters
# ToDo: add handling for callbacks and write training history
bst = Booster(params, cache=[dtrain, dlabel], model_file=xgb_model)
for i in range(num_boost_round):
bst.update(dtrain, dlabel, iteration=i, fobj=obj)
return bst
| ivy/ivy/functional/frontends/xgboost/training.py/0 | {
"file_path": "ivy/ivy/functional/frontends/xgboost/training.py",
"repo_id": "ivy",
"token_count": 1096
} | 53 |
# global
import functools
from typing import Callable, Union, Sequence
# local
import ivy
from ivy import (
inputs_to_ivy_arrays,
handle_nestable,
handle_array_like_without_promotion,
handle_array_function,
)
from ivy.utils.exceptions import handle_exceptions
def _correct_ivy_callable(func):
# get the current backend of the given ivy callable
if ivy.nested_any(
func,
lambda x: hasattr(x, "__module__")
and x.__module__.startswith("ivy")
and not x.__module__.startswith("ivy.functional.frontends"),
):
return ivy.__dict__[func.__name__]
return func
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def reduce(
operand: Union[ivy.Array, ivy.NativeArray],
init_value: Union[int, float],
computation: Callable,
/,
*,
axes: Union[int, Sequence[int]] = 0,
keepdims: bool = False,
) -> ivy.Array:
"""Reduces the input array's dimensions by applying a function along one or
more axes.
Parameters
----------
operand
The array to act on.
init_value
The value with which to start the reduction.
computation
The reduction function.
axes
The dimensions along which the reduction is performed.
keepdims
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one.
Returns
-------
ret
The reduced array.
Examples
--------
>>> x = ivy.array([[1, 2, 3], [4, 5, 6]])
>>> ivy.reduce(x, 0, ivy.add, 0)
ivy.array([6, 15])
>>> x = ivy.array([[1, 2, 3], [4, 5, 6]])
>>> ivy.reduce(x, 0, ivy.add, 1)
ivy.array([5, 7, 9])
"""
axes = (axes,) if isinstance(axes, int) else axes
axes = [a + operand.ndim if a < 0 else a for a in axes]
axes = sorted(axes, reverse=True)
init_value = ivy.array(init_value)
op_dtype = operand.dtype
computation = _correct_ivy_callable(computation)
for axis in axes:
temp = ivy.moveaxis(operand, axis, 0).reshape((operand.shape[axis], -1))
temp = functools.reduce(computation, temp, init_value)
operand = ivy.reshape(temp, operand.shape[:axis] + operand.shape[axis + 1 :])
if keepdims:
operand = ivy.expand_dims(operand, axis=axes)
return operand.astype(op_dtype)
reduce.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
| ivy/ivy/functional/ivy/experimental/general.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/general.py",
"repo_id": "ivy",
"token_count": 1126
} | 54 |
"""Collection of general Ivy functions."""
# global
import gc
import inspect
import itertools
import math
from functools import wraps
from numbers import Number
from typing import (
Callable,
Any,
Union,
List,
Tuple,
Dict,
Iterable,
Optional,
Sequence,
Literal,
)
import einops
import ml_dtypes # noqa
import numpy as np
# local
import ivy
from ivy.utils.backend import current_backend, backend_stack
from ivy.functional.ivy.gradients import _is_variable
from ivy.utils.exceptions import handle_exceptions
from ivy.func_wrapper import (
handle_array_function,
inputs_to_ivy_arrays,
inputs_to_native_arrays,
to_native_arrays_and_back,
inputs_to_native_shapes,
outputs_to_ivy_shapes,
outputs_to_ivy_arrays,
handle_out_argument,
handle_nestable,
handle_array_like_without_promotion,
handle_view_indexing,
handle_device,
handle_partial_mixed_function,
handle_backend_invalid,
)
from ivy.functional.ivy.device import dev
FN_CACHE = {}
INF = float("inf")
precise_mode_stack = []
queue_timeout_stack = []
array_mode_stack = []
shape_array_mode_stack = []
nestable_mode_stack = []
exception_trace_mode_stack = []
inplace_mode_stack = []
trace_mode_dict = {
"frontend": "ivy/functional/frontends",
"ivy": "ivy/",
"full": "",
"none": "",
}
show_func_wrapper_trace_mode_stack = []
min_denominator_stack = []
min_base_stack = []
tmp_dir_stack = []
# Extra #
# ------#
class PreciseMode:
"""Precise Mode Context Manager."""
# noinspection PyShadowingNames
def __init__(self, precise_mode: bool):
self._precise_mode = precise_mode
def __enter__(self):
set_precise_mode(self._precise_mode)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
unset_precise_mode()
if self and (exc_type is not None):
raise exc_val
return self
ivy.precise_mode = precise_mode_stack[-1] if precise_mode_stack else True
@handle_exceptions
def set_precise_mode(mode: bool) -> None:
"""Set the mode of whether to use a promotion table that avoids any
precision loss or a compute efficient table that avoids most wider-than-
necessary promotions.
Parameter
---------
mode
boolean whether to use high precision promotion table
Examples
--------
>>> ivy.set_precise_mode(False)
>>> ivy.precise_mode
False
>>> ivy.set_precise_mode(True)
>>> ivy.precise_mode
True
"""
global precise_mode_stack
ivy.utils.assertions.check_isinstance(mode, bool)
precise_mode_stack.append(mode)
ivy.__setattr__("precise_mode", mode, True)
_update_promotion_table(precise=mode)
@handle_exceptions
def unset_precise_mode() -> None:
"""Reset the mode of whether to use a promotion table that avoids any
precision loss or a compute efficient table that avoids most wider-than-
necessary promotions.
Examples
--------
>>> ivy.set_precise_mode(False)
>>> ivy.precise_mode
False
>>> ivy.unset_precise_mode()
>>> ivy.precise_mode
True
"""
global precise_mode_stack
if precise_mode_stack:
precise_mode_stack.pop(-1)
mode = precise_mode_stack[-1] if precise_mode_stack else True
ivy.__setattr__("precise_mode", mode, True)
_update_promotion_table(precise=mode)
def _update_promotion_table(precise):
"""Update the current datatype promotion table."""
if precise:
ivy.promotion_table = {
**ivy.array_api_promotion_table,
**ivy.common_extra_promotion_table,
**ivy.precise_extra_promotion_table,
}
else:
ivy.promotion_table = {
**ivy.array_api_promotion_table,
**ivy.common_extra_promotion_table,
**ivy.extra_promotion_table,
}
class ArrayMode:
"""Array Mode Context Manager."""
# noinspection PyShadowingNames
def __init__(self, array_mode):
self._array_mode = array_mode
def __enter__(self):
set_array_mode(self._array_mode)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
unset_array_mode()
if self and (exc_type is not None):
raise exc_val
return self
def get_referrers_recursive(
item: object,
*,
depth: int = 0,
max_depth: Optional[int] = None,
seen_set: Optional[set] = None,
local_set: Optional[set] = None,
) -> ivy.Container:
"""Recursively retrieve referrers for an object.
This function recursively fetches referrers for the specified `item` up to a given
`max_depth`.
Parameters
----------
item
The object for which referrers should be retrieved.
depth
Current depth in the recursion. (default is 0)
max_depth
Maximum depth of recursion. If `None`, there's no depth limit. (default is None)
seen_set
Set of seen referrer IDs to prevent duplicates. (default is None)
local_set
Set of local referrer IDs to avoid redundancy. (default is None)
Returns
-------
ret
A container representing referrers and their sub-referrers, respecting the
`max_depth`.
Examples
--------
>>> import gc
>>> example_function = lambda: (obj := [1, 2, 3]) and ivy.get_referrers_recursive(obj, max_depth=2)
>>> result = example_function()
>>> print(result)
{repr:[1,2,3]}
""" # noqa: E501
seen_set = ivy.default(seen_set, set())
local_set = ivy.default(local_set, set())
ret_cont = ivy.Container(
repr=str(item).replace(" ", ""),
alphabetical_keys=False,
keyword_color_dict={"repr": "magenta"},
)
referrers = [
ref
for ref in gc.get_referrers(item)
if not (
isinstance(ref, dict)
and min(k in ref for k in ["depth", "max_depth", "seen_set", "local_set"])
)
]
local_set.add(str(id(referrers)))
for ref in referrers:
ref_id = str(id(ref))
if ref_id in local_set or hasattr(ref, "cell_contents"):
continue
seen = ref_id in seen_set
seen_set.add(ref_id)
def get_referrers_recursive_inner():
return get_referrers_recursive(
ref,
depth=depth + 1,
max_depth=max_depth,
seen_set=seen_set,
local_set=local_set,
)
this_repr = "tracked" if seen else str(ref).replace(" ", "")
if not seen and (not max_depth or depth < max_depth):
val = ivy.Container(
repr=this_repr,
alphabetical_keys=False,
keyword_color_dict={"repr": "magenta"},
)
refs = get_referrers_recursive_inner()
for k, v in refs.items():
val[k] = v
else:
val = this_repr
ret_cont[str(ref_id)] = val
return ret_cont
@handle_exceptions
@handle_backend_invalid
def is_native_array(
x: Union[ivy.Array, ivy.NativeArray], /, *, exclusive: bool = False
) -> bool:
"""Determine whether the input x is an :class:`ivy.NativeArray` instance.
Parameters
----------
x
The input to check
exclusive
Whether to check if the data type is exclusively an array, rather than a
variable or traced array.
Returns
-------
ret
Boolean, whether or not x is an :class:`ivy.NativeArray`.
Examples
--------
>>> x = ivy.array([0, 1, 2])
>>> ivy.is_native_array(x)
False
>>> x = ivy.native_array([9.1, -8.3, 2.8, 3.0])
>>> ivy.is_native_array(x, exclusive=True)
True
"""
try:
return current_backend(x).is_native_array(x, exclusive=exclusive)
except ValueError:
return False
@handle_exceptions
@handle_backend_invalid
def is_ivy_array(
x: Union[ivy.Array, ivy.NativeArray], /, *, exclusive: Optional[bool] = False
) -> bool:
"""Determine whether the input x is a valid Ivy Array.
Parameters
----------
x
The input to check
exclusive
Whether to check if the data type is exclusively an array, rather than a
variable or traced array.
Returns
-------
ret
Boolean, whether or not x is a valid Ivy Array.
Examples
--------
>>> x = ivy.array([0, 1, 2])
>>> ivy.is_ivy_array(x)
True
>>> x = ivy.native_array([9.1, -8.3, 2.8, 3.0])
>>> ivy.is_ivy_array(x, exclusive=True)
False
"""
return isinstance(x, ivy.Array) and ivy.is_native_array(x.data, exclusive=exclusive)
@handle_exceptions
@handle_backend_invalid
def is_array(x: Any, /, *, exclusive: bool = False) -> bool:
"""Determine whether the input x is either an Ivy Array or a Native Array.
Parameters
----------
x
The input to check
exclusive
Whether to check if the data type is exclusively an array, rather than a
variable or traced array.
Returns
-------
ret
Boolean, whether or not x is an array.
Examples
--------
>>> x = ivy.array([0, 1, 2])
>>> print(ivy.is_array(x))
True
>>> x = ivy.native_array([9.1, -8.3, 2.8, 3.0])
>>> print(ivy.is_array(x, exclusive=True))
True
>>> x = [2, 3]
>>> print(ivy.is_array(x))
False
"""
return ivy.is_ivy_array(x, exclusive=exclusive) or ivy.is_native_array(
x, exclusive=exclusive
)
@handle_exceptions
def is_ivy_container(x: Any, /) -> bool:
"""Determine whether the input x is an Ivy Container.
Parameters
----------
x
The input to check
Returns
-------
ret
Boolean, whether or not x is an ivy container.
Examples
--------
>>> x = ivy.Container()
>>> print(ivy.is_ivy_container(x))
True
>>> x = [2, 3]
>>> print(ivy.is_ivy_container(x))
False
"""
return isinstance(x, ivy.Container)
ivy.array_mode = array_mode_stack[-1] if array_mode_stack else True
@handle_exceptions
def set_array_mode(mode: bool) -> None:
"""Set the mode of whether to convert inputs to ivy.NativeArray, then
convert outputs back to ivy.Array.
It Stops the conversion of ivy.NativeArray to ivy.Array in the
case when it is set to False.
Parameter
---------
mode
boolean whether to perform ivy.Array conversions
Examples
--------
>>> ivy.set_array_mode(False)
>>> ivy.array_mode
False
>>> ivy.set_array_mode(True)
>>> ivy.array_mode
True
"""
global array_mode_stack
ivy.utils.assertions.check_isinstance(mode, bool)
array_mode_stack.append(mode)
ivy.__setattr__("array_mode", mode, True)
@handle_exceptions
def unset_array_mode() -> None:
"""Reset the mode of converting inputs to ivy.NativeArray, then converting
outputs back to ivy.Array to the previous state.
Examples
--------
>>> ivy.set_array_mode(False)
>>> ivy.array_mode
False
>>> ivy.unset_shape_array_mode()
>>> ivy.array_mode
True
"""
global array_mode_stack
if array_mode_stack:
array_mode_stack.pop(-1)
mode = array_mode_stack[-1] if array_mode_stack else True
ivy.__setattr__("array_mode", mode, True)
ivy.nestable_mode = nestable_mode_stack[-1] if nestable_mode_stack else True
@handle_exceptions
def set_nestable_mode(mode: bool) -> None:
"""Set the mode of whether to check if function inputs are ivy.Container.
Parameter
---------
mode
boolean whether to check if function inputs are ivy.Container
Examples
--------
>>> ivy.set_nestable_mode(False)
>>> ivy.nestable_mode
False
>>> ivy.set_nestable_mode(True)
>>> ivy.nestable_mode
True
"""
global nestable_mode_stack
ivy.utils.assertions.check_isinstance(mode, bool)
nestable_mode_stack.append(mode)
ivy.__setattr__("nestable_mode", mode, True)
@handle_exceptions
def unset_nestable_mode() -> None:
"""Reset the mode of whether to check if function inputs are ivy.Container
to the previous state.
Examples
--------
>>> ivy.set_nestable_mode(False)
>>> ivy.nestable_mode
False
>>> ivy.unset_nestable_mode()
>>> ivy.nestable_mode
True
"""
global nestable_mode_stack
if nestable_mode_stack:
nestable_mode_stack.pop(-1)
mode = nestable_mode_stack[-1] if nestable_mode_stack else True
ivy.__setattr__("nestable_mode", mode, True)
ivy.exception_trace_mode = (
exception_trace_mode_stack[-1] if exception_trace_mode_stack else "full"
)
@handle_exceptions
def set_exception_trace_mode(mode: Literal["ivy", "full", "frontend"]) -> None:
"""Set the mode of whether to show frontend-truncated exception stack
traces, ivy- truncated exception stack traces or full exception stack
traces.
Parameter
---------
mode
str exception trace mode, one of `ivy`, `full` or `frontend`
Examples
--------
>>> ivy.set_exception_trace_mode("ivy")
>>> ivy.exception_trace_mode
'ivy'
>>> ivy.set_exception_trace_mode("full")
>>> ivy.exception_trace_mode
'full'
"""
global exception_trace_mode_stack
trace_modes = list(trace_mode_dict.keys())
ivy.utils.assertions.check_elem_in_list(
mode, trace_modes, False, f"trace mode must be one of {trace_modes}"
)
exception_trace_mode_stack.append(mode)
ivy.__setattr__("exception_trace_mode", mode, True)
@handle_exceptions
def unset_exception_trace_mode() -> None:
"""Reset the trace mode to the previously set mode.
Examples
--------
>>> ivy.set_exception_trace_mode("ivy")
>>> ivy.exception_trace_mode
'ivy'
>>> ivy.unset_exception_trace_mode()
>>> ivy.exception_trace_mode
'full'
"""
global exception_trace_mode_stack
if exception_trace_mode_stack:
exception_trace_mode_stack.pop(-1)
mode = exception_trace_mode_stack[-1] if exception_trace_mode_stack else "full"
ivy.__setattr__("exception_trace_mode", mode, True)
ivy.show_func_wrapper_trace_mode = (
show_func_wrapper_trace_mode_stack[-1]
if show_func_wrapper_trace_mode_stack
else True
)
@handle_exceptions
def set_show_func_wrapper_trace_mode(mode: bool) -> None:
"""Set the mode of whether to show the full stack trace with function
wrapping traces.
Parameter
---------
mode
boolean whether to perform ivy.Array conversions
Examples
--------
>>> ivy.set_show_func_wrapper_trace_mode(False)
>>> ivy.show_func_wrapper_trace_mode
False
>>> ivy.set_show_func_wrapper_trace_mode(True)
>>> ivy.show_func_wrapper_trace_mode
True
"""
global show_func_wrapper_trace_mode_stack
ivy.utils.assertions.check_isinstance(mode, bool)
show_func_wrapper_trace_mode_stack.append(mode)
ivy.__setattr__("show_func_wrapper_trace_mode", mode, True)
@handle_exceptions
def unset_show_func_wrapper_trace_mode() -> None:
"""Reset the mode of whether to show the full stack trace with function
wrapping traces.
Examples
--------
>>> ivy.set_show_func_wrapper_trace_mode(False)
>>> ivy.show_func_wrapper_trace_mode
False
>>> ivy.unset_show_func_wrapper_trace_mode()
>>> ivy.show_func_wrapper_trace_mode
True
"""
global show_func_wrapper_trace_mode_stack
if show_func_wrapper_trace_mode_stack:
show_func_wrapper_trace_mode_stack.pop(-1)
mode = (
show_func_wrapper_trace_mode_stack[-1]
if show_func_wrapper_trace_mode_stack
else True
)
ivy.__setattr__("show_func_wrapper_trace_mode", mode, True)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_native_arrays
@handle_array_function
@handle_device
def array_equal(
x0: Union[ivy.Array, ivy.NativeArray],
x1: Union[ivy.Array, ivy.NativeArray],
/,
) -> bool:
"""Determine whether two input arrays are equal across all elements.
Parameters
----------
x0
The first input array to compare.
x1
The second input array to compare.
Returns
-------
ret
Boolean, whether or not the input arrays are equal across all elements.
Examples
--------
>>> x = ivy.array([1,0,1])
>>> y = ivy.array([1,0,-1])
>>> z = ivy.array_equal(x,y)
>>> print(z)
False
>>> a = ivy.array([1, 2])
>>> b = ivy.array([1, 2])
>>> c = ivy.array_equal(a,b)
>>> print(c)
True
>>> i = ivy.array([1, 2])
>>> j = ivy.array([1, 2, 3])
>>> k = ivy.array_equal(i,j)
>>> print(k)
False
"""
return current_backend(x0).array_equal(x0, x1)
@handle_exceptions
@handle_nestable
@inputs_to_ivy_arrays
@handle_array_function
def all_equal(
*xs: Iterable[Any], equality_matrix: bool = False
) -> Union[bool, ivy.Array, ivy.NativeArray]:
"""Determine whether the inputs are all equal.
Parameters
----------
xs
inputs to compare.
equality_matrix
Whether to return a matrix of equalities comparing each input with every other.
Default is ``False``.
Returns
-------
ret
Boolean, whether or not the inputs are equal, or matrix array of booleans if
equality_matrix=True is set.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x1 = ivy.array([1, 1, 0, 0, 1, -1])
>>> x2 = ivy.array([1, 1, 0, 0, 1, -1])
>>> y = ivy.all_equal(x1, x2)
>>> print(y)
True
>>> x1 = ivy.array([0, 0])
>>> x2 = ivy.array([0, 0])
>>> x3 = ivy.array([1, 0])
>>> y = ivy.all_equal(x1, x2, x3, equality_matrix=True)
>>> print(y)
ivy.array([[ True, True, False],
[ True, True, False],
[False, False, True]])
With one :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([0, 0, -1, 1, 0]),
... b=ivy.array([0, 0, -1, 1, 0]))
>>> x2 = ivy.array([0, 0, -1, 1, 0])
>>> y = ivy.all_equal(x1, x2, equality_matrix=False)
>>> print(y)
{
a: True,
b: True
}
With multiple :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([1, 0, 1, 1]),
... b=ivy.array([1, 0, 0, 1]))
>>> x2 = ivy.Container(a=ivy.array([1, 0, 1, 1]),
... b=ivy.array([1, 0, -1, -1]))
>>> y = ivy.all_equal(x1, x2, equality_matrix=False)
>>> print(y)
{
a: True,
b: False
}
"""
equality_fn = ivy.array_equal if ivy.is_array(xs[0]) else lambda a, b: a == b
if equality_matrix:
num_arrays = len(xs)
mat = [[None for _ in range(num_arrays)] for _ in range(num_arrays)]
for i, xa in enumerate(xs):
for j_, xb in enumerate(xs[i:]):
j = j_ + i
res = equality_fn(xa, xb)
if ivy.is_native_array(res):
# noinspection PyTypeChecker
res = ivy.to_scalar(res)
# noinspection PyTypeChecker
mat[i][j] = res
# noinspection PyTypeChecker
mat[j][i] = res
return ivy.array(mat)
x0 = xs[0]
for x in xs[1:]:
if not equality_fn(x0, x):
return False
return True
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_native_arrays
@handle_array_function
@handle_device
def to_numpy(
x: Union[ivy.Array, ivy.NativeArray], /, *, copy: bool = True
) -> np.ndarray:
"""Convert an array into a numpy array.
Parameters
----------
x
input array
copy
whether to copy the array to a new address or not.
Default is ``True``.
Returns
-------
ret
a numpy array copying all the element of the array ``x``.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([-1, 0, 1])
>>> y = ivy.to_numpy(x, copy=True)
>>> print(y)
[-1 0 1]
>>> x = ivy.array([[-1, 0, 1],[-1, 0, 1], [1,0,-1]])
>>> y = ivy.to_numpy(x, copy=True)
>>> print(y)
[[-1 0 1]
[-1 0 1]
[ 1 0 -1]]
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1, 0, 1]))
>>> y = ivy.to_numpy(x)
>>> print(y)
{
a: array([-1, 0, 1], dtype=int32)
}
>>> x = ivy.Container(a=ivy.array([[-1.0, 0., 1.], [-1, 0, 1], [1, 0, -1]]),
... b=ivy.array([[-1, 0, 0], [1, 0, 1], [1, 1, 1]]))
>>> y = ivy.to_numpy(x)
>>> print(y)
{
a: array([[-1., 0., 1.],
[-1., 0., 1.],
[1., 0., -1.]], dtype=float32),
b: array([[-1, 0, 0],
[1, 0, 1],
[1, 1, 1]], dtype=int32)
}
"""
return current_backend(x).to_numpy(x, copy=copy)
@handle_exceptions
@handle_nestable
def isscalar(x: Any, /) -> bool:
return np.isscalar(x)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_native_arrays
@handle_array_function
@handle_device
def to_scalar(x: Union[ivy.Array, ivy.NativeArray], /) -> Number:
"""Convert an array with a single element into a scalar.
Parameters
----------
x
Input array with a single element.
Returns
-------
ret
a scalar copying the element of the array ``x``.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([3])
>>> y = ivy.to_scalar(x)
>>> print(y)
3
With a mix of :class:`ivy.Container` and :class:`ivy.Array` input:
>>> x = ivy.Container(a=ivy.array([-1]), b=ivy.array([3]))
>>> y = ivy.to_scalar(x)
>>> print(y)
{
a: -1,
b: 3
}
>>> x = ivy.Container(a=ivy.array([1]), b=ivy.array([0]),
... c=ivy.array([-1]))
>>> y = ivy.to_scalar(x)
>>> print(y)
{
a: 1,
b: 0,
c: -1
}
"""
return current_backend(x).to_scalar(x)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_native_arrays
@handle_array_function
@handle_device
def to_list(x: Union[ivy.Array, ivy.NativeArray], /) -> List:
"""Create a (possibly nested) list from input array.
Parameters
----------
x
Input array.
Returns
-------
ret
A list representation of the input array ``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1, 0, 1])
>>> y = ivy.to_list(x)
>>> print(y)
[-1, 0, 1]
>>> x = ivy.array([[ 1.1, 2.2, 3.3],
... [-4.4, -5.5, -6.6]])
>>> y = ivy.to_list(x)
>>> print(y)
[[1.100000023841858,2.200000047683716,3.299999952316284],
[-4.400000095367432,-5.5,-6.599999904632568]]
>>> x = ivy.array([[[-1, 0, 1],
... [ 1, 0, -1]],
... [[ 1, -1, 0],
... [ 1, 0, -1]]])
>>> y = ivy.to_list(x)
>>> print(y)
[[[-1, 0, 1], [1, 0, -1]], [[1, -1, 0], [1, 0, -1]]]
With a mix of :class:`ivy.Container` and :class:`ivy.Array` input:
>>> x = ivy.Container(a=ivy.array([-1, 0, 1]))
>>> y = ivy.to_list(x)
>>> print(y)
{
a: [-1, 0, 1]
}
>>> x = ivy.Container(a=ivy.array([[-1, 0, 1],
... [-1, 0, 1],
... [1, 0, -1]]))
>>> y = ivy.to_list(x)
>>> print(y)
{
a: [[-1, 0, 1], [-1, 0, 1], [1,0,-1]]
}
>>> x = ivy.Container(a=ivy.array([[[-1, 0, 1],[1, 0, -1]],
... [[1, -1, 0],[1, 0, -1]]]))
>>> y = ivy.to_list(x)
>>> print(y)
{
a: [[[-1, 0, 1], [1, 0, -1]], [[1, -1, 0], [1, 0, -1]]]
}
"""
return current_backend(x).to_list(x)
@handle_exceptions
@handle_nestable
@inputs_to_ivy_arrays
@handle_array_function
def clip_vector_norm(
x: Union[ivy.Array, ivy.NativeArray],
max_norm: float,
/,
*,
p: float = 2.0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Clips (limits) the vector p-norm of an array.
Parameters
----------
x
Input array containing elements to clip.
max_norm
The maximum value of the array norm.
p
The p-value for computing the p-norm.
Default is 2.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
An array with the vector norm downscaled to the max norm if needed.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0., 1., 2.])
>>> y = ivy.clip_vector_norm(x, 2.0)
>>> print(y)
ivy.array([0. , 0.894, 1.79 ])
>>> x = ivy.array([0.5, -0.7, 2.4])
>>> y = ivy.clip_vector_norm(x, 3.0, p=1.0)
>>> print(y)
ivy.array([ 0.417, -0.583, 2. ])
>>> x = ivy.array([[[0., 0.], [1., 3.], [2., 6.]],
... [[3., 9.], [4., 12.], [5., 15.]]])
>>> y = ivy.zeros(((2, 3, 2)))
>>> ivy.clip_vector_norm(x, 4.0, p=1.0, out=y)
>>> print(y)
ivy.array([[[0. , 0. ],
[0.0667, 0.2 ],
[0.133 , 0.4 ]],
[[0.2 , 0.6 ],
[0.267 , 0.8 ],
[0.333 , 1. ]]])
>>> x = ivy.array([[1.1, 2.2, 3.3],
... [-4.4, -5.5, -6.6]])
>>> ivy.clip_vector_norm(x, 1.0, p=3.0, out=x)
>>> print(x)
ivy.array([[ 0.131, 0.263, 0.394],
[-0.526, -0.657, -0.788]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> y = ivy.clip_vector_norm(x, 2.0)
>>> print(y)
{
a: ivy.array([0., 0.894, 1.79]),
b: ivy.array([0.849, 1.13, 1.41])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> max_norm = ivy.Container(a=2, b=3)
>>> y = ivy.clip_vector_norm(x, max_norm)
>>> print(y)
{
a: ivy.array([0., 0.894, 1.79]),
b: ivy.array([1.27279221, 1.69705628, 2.12132034])
}
"""
norm = ivy.vector_norm(x, keepdims=True, ord=p)
ratio = ivy.stable_divide(max_norm, norm)
if ratio < 1:
ret = ratio * x
else:
ret = ivy.copy_array(x)
if out is not None:
ret = ivy.inplace_update(out, ret)
return ret
@handle_exceptions
@handle_nestable
@inputs_to_ivy_arrays
@handle_array_function
def clip_matrix_norm(
x: Union[ivy.Array, ivy.NativeArray],
max_norm: float,
/,
*,
p: float = 2.0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Clips (limits) the matrix norm of an array.
Parameters
----------
x
Input array containing elements to clip.
max_norm
The maximum value of the array norm.
p
The p-value for computing the p-norm.
Default is 2.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
An array with the matrix norm downscaled to the max norm if needed.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[0., 1., 2.]])
>>> y = ivy.clip_matrix_norm(x, 2.0)
>>> print(y)
ivy.array([[0. , 0.894, 1.79 ]])
>>> x = ivy.array([[0.1, -1.2, 3.7], [0., 7.3, -0.5]])
>>> y = ivy.clip_matrix_norm(x, 3.0, p=1.0)
>>> print(y)
ivy.array([[ 0.0353, -0.424 , 1.31 ],
[ 0. , 2.58 , -0.176 ]])
>>> x = ivy.array([[[5., 4.], [-2., 6.]],
... [[3., 7.], [0., -5.]]])
>>> y = ivy.empty((2, 2, 2))
>>> y = ivy.clip_matrix_norm(x, 0.5, p=2.0)
>>> print(y)
ivy.array([[[ 0.339, 0.271],
[-0.135, 0.406]],
[[ 0.168, 0.391],
[ 0. , -0.279]]])
>>> x = ivy.array([[0., 1.],
... [2., 3.]])
>>> ivy.clip_matrix_norm(x, 5.0, p=1.0, out=x)
>>> print(x)
ivy.array([[0., 1.],
[2., 3.]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[0., 1., 2.]]),
... b=ivy.array([[3., 4., 5.]]))
>>> y = ivy.clip_matrix_norm(x, 2.0)
>>> print(y)
{
a: ivy.array([[0., 0.894, 1.79]]),
b: ivy.array([[0.849, 1.13, 1.41]])
}
"""
norms = ivy.matrix_norm(x, ord=p, keepdims=True)
ratios = ivy.minimum(ivy.stable_divide(max_norm, norms), 1.0)
return ivy.multiply(ratios, x, out=out)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def fourier_encode(
x: Union[ivy.Array, ivy.NativeArray],
max_freq: Union[float, ivy.Array, ivy.NativeArray],
/,
*,
num_bands: int = 4,
linear: bool = False,
concat: bool = True,
flatten: bool = False,
) -> Union[ivy.Array, ivy.NativeArray, Tuple]:
"""Pad an array with fourier encodings.
Parameters
----------
x
Input array to encode.
max_freq
The maximum frequency of the encoding.
num_bands
The number of frequency bands for the encoding.
Default is 4.
linear
Whether to space the frequency bands linearly as opposed to geometrically.
Default is ``False``.
concat
Whether to concatenate the position, sin and cos values, or return separately.
Default is ``True``.
flatten
Whether to flatten the position dimension into the batch dimension.
Default is False.
Returns
-------
ret
New array with the final dimension expanded, and the encodings stored in this
channel.
Examples
--------
>>> x = ivy.array([1,2,3])
>>> y = 1.5
>>> z = ivy.fourier_encode(x,y)
>>> print(z)
ivy.array([[ 1.0000000e+00, 1.2246468e-16, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, -1.0000000e+00, 1.0000000e+00, 1.0000000e+00,
1.0000000e+00],
[ 2.0000000e+00, -2.4492936e-16, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00,
1.0000000e+00],
[ 3.0000000e+00, 3.6739404e-16, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, -1.0000000e+00, 1.0000000e+00, 1.0000000e+00,
1.0000000e+00]])
>>> x = ivy.array([3,10])
>>> y = 2.5
>>> z = ivy.fourier_encode(x, y, num_bands=3)
>>> print(z)
ivy.array([[ 3.0000000e+00, 3.6739404e-16, 3.6739404e-16, 3.6739404e-16,
-1.0000000e+00, -1.0000000e+00, -1.0000000e+00],
[ 1.0000000e+01, -1.2246468e-15, -1.2246468e-15, -1.2246468e-15,
1.0000000e+00, 1.0000000e+00, 1.0000000e+00]])
"""
x_in = x
dim = x.shape[-1]
x = ivy.expand_dims(x, axis=-1)
orig_x = x
if linear:
scales = ivy.linspace(1.0, max_freq / 2, num_bands, device=dev(x))
elif ivy.backend == "torch" and isinstance(max_freq, float):
scales = ivy.logspace(
0.0,
ivy.log(ivy.array(max_freq / 2)) / math.log(10),
num_bands,
base=10,
device=dev(x),
)
else:
scales = ivy.logspace(
0.0,
ivy.log(max_freq / 2) / math.log(10),
num_bands,
base=10,
device=dev(x),
)
scales = ivy.astype(scales, ivy.dtype(x))
scales = scales[(*((None,) * (len(x.shape) - len(scales.shape))), Ellipsis)]
x = x * scales * math.pi
sin_x = ivy.sin(x)
cos_x = ivy.cos(x)
if flatten:
orig_x = x_in
sin_x = ivy.reshape(sin_x, [-1, num_bands * dim])
cos_x = ivy.reshape(cos_x, [-1, num_bands * dim])
if concat:
return ivy.concat([orig_x, sin_x, cos_x], axis=-1)
return sin_x, cos_x
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def value_is_nan(
x: Union[ivy.Array, ivy.NativeArray, Number],
/,
*,
include_infs: bool = True,
) -> bool:
"""Determine whether the single valued array or scalar is of nan type.
Parameters
----------
x
The input to check Input array.
include_infs
Whether to include infs and -infs in the check.
Default is ``True``.
Returns
-------
ret
Boolean as to whether the input value is a nan or not.
Examples
--------
>>> x = ivy.array([451])
>>> y = ivy.value_is_nan(x)
>>> print(y)
False
>>> x = ivy.array([float('inf')])
>>> y = ivy.value_is_nan(x)
>>> print(y)
True
>>> x = ivy.array([float('inf')])
>>> y = ivy.value_is_nan(x, include_infs=False)
>>> print(y)
False
>>> x = ivy.array([float('nan')])
>>> y = ivy.value_is_nan(x, include_infs=False)
>>> print(y)
True
>>> x = ivy.array([0])
>>> y = ivy.value_is_nan(x)
>>> print(y)
False
"""
x_scalar = ivy.to_scalar(x) if ivy.is_array(x) else x
if x_scalar != x:
return True
if include_infs and (x_scalar in [INF, -INF]):
return True
return False
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def has_nans(
x: Union[ivy.Array, ivy.NativeArray], /, *, include_infs: bool = True
) -> bool:
"""Determine whether the array contains any nans, as well as infs or -infs
if specified.
Parameters
----------
x
Input array.
include_infs
Whether to include ``+infinity`` and ``-infinity`` in the check.
Default is ``True``.
Returns
-------
ret
Boolean as to whether the array contains nans.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.has_nans(x)
>>> print(y)
False
>>> x = ivy.array([float('nan'), 2, 3])
>>> y = ivy.has_nans(x)
>>> print(y)
True
>>> x = ivy.array([float('inf'), 2, 3])
>>> y = ivy.has_nans(x)
>>> print(y)
True
>>> x = ivy.array([float('inf'), 2, 3])
>>> y = ivy.has_nans(x, include_infs=False)
>>> print(y)
False
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = ivy.has_nans(x)
>>> print(y)
{
a: False,
b: False
}
"""
return ivy.value_is_nan(ivy.sum(x), include_infs=include_infs)
@handle_exceptions
def exists(x: Any, /) -> bool:
"""Check as to whether the input is None or not.
Parameters
----------
x
Input to check.
Returns
-------
ret
True if x is not None, else False.
Examples
--------
With :code:`Any` input:
>>> x = None
>>> y = ivy.exists(x)
>>> print(y)
False
>>> x = ""
>>> y = ivy.exists(x)
>>> print(y)
True
>>> x = []
>>> y = ivy.exists(x)
>>> print(y)
True
>>> x = 1
>>> y = ivy.exists(x)
>>> print(y)
True
>>> x = "abc"
>>> y = ivy.exists(x)
>>> print(y)
True
>>> x = [1, 0, -1, 1]
>>> y = ivy.exists(x)
>>> print(y)
True
>>> x = ivy.array([1, 2, 3, 1.2])
>>> y = ivy.exists(x)
>>> print(y)
True
With a mix of :class:`ivy.Container` and :code:`Any` input:
>>> x = ivy.Container(a=None, b=None)
>>> y = ivy.exists(x)
>>> print(y)
True
>>> x = ivy.Container(a=None, b="")
>>> y = ivy.exists(x)
>>> print(y)
True
>>> x = ivy.Container(a=123, b="")
>>> y = ivy.exists(x)
>>> print(y)
True
"""
return x is not None
@handle_exceptions
def default(
x: Any,
/,
default_val: Any,
*,
catch_exceptions: bool = False,
rev: bool = False,
with_callable: bool = False,
) -> Any:
"""Return x provided it exists (is not None), else returns default value.
Parameters
----------
x
Input which may or may not exist (be None).
default_val
The default value.
catch_exceptions
Whether to catch exceptions from callable x.
Default is ``False``.
rev
Whether to reverse the input x and default_val.
Default is ``False``.
with_callable
Whether either of the arguments might be callable functions.
Default is ``False``.
Returns
-------
ret
x if x exists (is not None), else default.
Examples
--------
With :code:`Any` input:
>>> x = None
>>> y = ivy.default(x, "default_string")
>>> print(y)
default_string
>>> x = ""
>>> y = ivy.default(x, "default_string")
>>> print(y)
>>> x = ivy.array([4, 5, 6])
>>> y = ivy.default(x, ivy.array([1, 2, 3]), rev=True)
>>> print(y)
ivy.array([1, 2, 3])
>>> x = lambda: ivy.array([1, 2, 3])
>>> y = ivy.default(x, ivy.array([4, 5, 6]), with_callable=True)
>>> print(y)
ivy.array([1, 2, 3])
>>> x = lambda: None
>>> y = ivy.default(x, lambda: ivy.array([1, 2, 3]), with_callable=True)
>>> print(y)
ivy.array([1, 2, 3])
>>> x = lambda: None
>>> y = ivy.default(x, lambda: ivy.array([1, 2, 3]), catch_exceptions=True)
>>> print(y)
ivy.array([1, 2, 3])
>>> x = lambda a, b: a + b
>>> y = ivy.default(x, lambda: ivy.array([1, 2, 3]), with_callable=True,
... catch_exceptions=True)
>>> print(y)
ivy.array([1, 2, 3])
>>> x = lambda a, b: a + b
>>> y = ivy.default(x, lambda: ivy.array([1, 2, 3]), with_callable=True,
... catch_exceptions=True, rev=True)
>>> print(y)
ivy.array([1, 2, 3])
"""
with_callable = catch_exceptions or with_callable
if rev:
x, default_val = default_val, x
if with_callable:
x_callable = callable(x)
default_callable = callable(default_val)
else:
x_callable = False
default_callable = False
if catch_exceptions:
# noinspection PyBroadException
try:
x = x() if x_callable else x
except Exception:
return default_val() if default_callable else default_val
else:
x = x() if x_callable else x
return x if exists(x) else default_val() if default_callable else default_val
@handle_exceptions
def to_ivy_shape(shape: Union[ivy.Shape, ivy.NativeShape]) -> ivy.Shape:
"""Return the input shape in ivy.Shape form.
Parameters
----------
shape
The input to be converted
Returns
-------
ret
the input in ivy.Shape form
"""
if isinstance(shape, ivy.Shape):
return shape
return ivy.Shape(shape)
@handle_exceptions
def to_native_shape(
shape: Union[ivy.Array, ivy.Shape, ivy.NativeShape, tuple, int, list],
) -> ivy.NativeShape:
"""Return the input shape in its native backend framework form.
Parameters
----------
shape
The input to be converted
Returns
-------
ret
the input in its native framework form
"""
native_shape_type = (ivy.NativeShape,)
if ivy.current_backend_str() == "torch":
native_shape_type += (tuple,)
if len(backend_stack) != 0 and isinstance(shape, native_shape_type):
return shape
ivy.utils.assertions.check_isinstance(
shape, (int, list, tuple, ivy.Array, ivy.NativeArray, ivy.Shape)
)
if isinstance(shape, int):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif is_array(shape):
shape = ivy.to_numpy(shape).tolist()
elif isinstance(shape, ivy.Shape):
shape = shape.shape
ivy.utils.assertions.check_all(
[isinstance(v, int) for v in shape if not is_array(v)],
"shape must take integers only",
as_array=False,
)
ivy.utils.assertions.check_true(
not is_array(shape) or ivy.is_int_dtype(shape), "shape must take integers only"
)
return ivy.NativeShape(shape) if len(backend_stack) != 0 else ivy.Shape(shape)
@handle_exceptions
@handle_nestable
def try_else_none(fn: Callable, *args: Any, **kwargs: Any) -> Union[Callable, None]:
"""Try and return the function, otherwise return None if an exception was
raised during function execution.
Parameters
----------
fn
Function to try and call and return.
args
list of arguments.
kwargs
dictionary of keyword arguments
Returns
-------
Either the function itself or None if an exception was raised
during function execution.
Examples
--------
with a function that is executed without any exception:
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([4, 5, 6])
>>> z = ivy.try_else_none(ivy.add, x, y)
>>> print(z.__name__)
add
with a function that is executed with an exception:
>>> x = ivy.array([1, 2, 3])
>>> y = 'hemant'
>>> z = ivy.try_else_none(ivy.add,x, y)
>>> print(z)
None
"""
try:
_ = fn(*args, **kwargs)
return fn
except Exception:
return None
@handle_exceptions
def arg_names(receiver):
"""Get the expected keyword arguments for a function or class constructor.
Parameters
----------
receiver
Function or class constructor
Returns
-------
ret
List containing the keyword arguments' names for a function or class constructor
Examples
--------
>>> x = ivy.arg_names(ivy.tan)
>>> print(x)
['x', 'out']
>>> x = ivy.arg_names(ivy.optimizers.Adam)
>>> print(x)
['lr', 'beta1', 'beta2', 'epsilon', 'inplace',
'stop_gradients', 'trace_on_next_step', 'device']
"""
return list(inspect.signature(receiver).parameters.keys())
@handle_exceptions
def match_kwargs(
kwargs: Dict, *receivers: Iterable[Callable], allow_duplicates: bool = False
) -> Union[List[Dict], Dict]:
"""Match keyword arguments to either class or function receivers.
Parameters
----------
kwargs
Keyword arguments to match.
receivers
Functions and/or classes to match the keyword arguments to.
allow_duplicates
Whether to allow one keyword argument to be used for multiple receivers.
Default is ``False``.
Returns
-------
ret
Sequence of keyword arguments split as best as possible.
Examples
--------
>>> o = ivy.zeros(3)
>>> kwargs = {'out': o, 'bias': ivy.arange(3)}
>>> x = ivy.match_kwargs(kwargs, ivy.add, ivy.linear)
>>> print(x)
[{'out': ivy.array([0., 0., 0.])}, {'bias': ivy.array([0, 1, 2])}]
>>> o = ivy.zeros(3)
>>> kwargs = {'out': o, 'bias': ivy.arange(3)}
>>> x = ivy.match_kwargs(kwargs, ivy.linear, ivy.add)
>>> print(x)
[{'out': ivy.array([0., 0., 0.]), 'bias': ivy.array([0, 1, 2])}, {}]
"""
split_kwargs = []
for receiver in receivers:
expected_kwargs = arg_names(receiver)
found_kwargs = {k: v for k, v in kwargs.items() if k in expected_kwargs}
if not allow_duplicates:
for k in found_kwargs:
del kwargs[k]
split_kwargs.append(found_kwargs)
if len(split_kwargs) == 1:
return split_kwargs[0]
return split_kwargs
@handle_exceptions
def cache_fn(func: Callable) -> Callable:
"""Cache function outputs.
A decorator to wrap a function, such that computed outputs are cached to avoid
recalculating them later.
Parameters
----------
func
The function to wrap, whose output should be cached for later.
Returns
-------
ret
The newly cache wrapped function.
Examples
--------
With positional arguments only:
>>> def my_sum(val1:float, val2:float)->float: return val1 + val2
>>> cached_sum = ivy.cache_fn(my_sum)
>>> print(cached_sum(3, 5))
8
With keyword arguments:
>>> def line_eq(x:float, /, *, slp:float=2, itc:float=0)->float: return x*slp+itc
>>> cached_line_eq = ivy.cache_fn(line_eq)
>>> print(cached_line_eq(3, itc=5, slp=2))
11
"""
global FN_CACHE
if func not in FN_CACHE:
FN_CACHE[func] = {}
@wraps(func)
def cached_fn(*args, **kwargs):
key = "".join(
([f"{str(i)}, " for i in args] + [" kw, "])
+ [f"{str(i)}, " for i in sorted(kwargs.items())]
)
cache = FN_CACHE[func]
if key in cache:
return cache[key]
ret = func(*args, **kwargs)
cache[key] = ret
return ret
return cached_fn
@handle_exceptions
def current_backend_str() -> Union[str, None]:
"""Return framework string.
Returns
-------
ret
The framework string.
"""
fw = current_backend()
if not backend_stack:
return ""
return fw.current_backend_str()
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def einops_rearrange(
x: Union[ivy.Array, ivy.NativeArray],
pattern: str,
/,
*,
out: Optional[ivy.Array] = None,
**axes_lengths: Dict[str, int],
) -> ivy.Array:
"""Perform einops rearrange operation on input array x.
Parameters
----------
x
Input array to be re-arranged.
pattern
Rearrangement pattern.
axes_lengths
Any additional specifications for dimensions.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
New array with einops.rearrange having been applied.
Examples
--------
With :class:`ivy.Array` instance method:
>>> x = ivy.array([[1, 2, 3],
... [-4, -5, -6]])
>>> y = x.einops_rearrange("height width -> width height")
>>> print(y)
ivy.array([[ 1, -4],
[ 2, -5],
[ 3, -6]])
>>> x = ivy.array([[[ 1, 2, 3],
... [ 4, 5, 6]],
... [[ 7, 8, 9],
... [10, 11, 12]]])
>>> y = x.einops_rearrange("c h w -> c (h w)")
>>> print(y)
ivy.array([[ 1, 2, 3, 4, 5, 6],
[ 7, 8, 9, 10, 11, 12]])
>>> x = ivy.array([[1, 2, 3, 4, 5, 6],
... [7, 8, 9, 10, 11, 12]])
>>> y = ivy.zeros((4,3))
>>> x.einops_rearrange("c (h w) -> (c h) w", out=y, h=2, w=3)
>>> print(y)
ivy.array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[-4.47, 0.93, -3.34],
... [3.66, 24.29, 3.64]]),
... b=ivy.array([[4.96, 1.52, -10.67],
... [4.36, 13.96, 0.3]]))
>>> y = ivy.einops_rearrange(x, 'a b -> b a')
>>> print(y)
{
a: ivy.array([[-4.46999979, 3.66000009],
[0.93000001, 24.29000092],
[-3.33999991, 3.6400001]]),
b: ivy.array([[4.96000004, 4.36000013],
[1.51999998, 13.96000004],
[-10.67000008, 0.30000001]])
}
With varying pattern:
Suppose we have a set of 32 images in "h w c" format (height-width-channel)
and concatenate images along height (vertical axis), 960 = 32 * 30
>>> images = ivy.asarray([ivy.random_normal(shape=(30, 40, 3)) for _ in range(32)])
>>> x = ivy.einops_rearrange(images, 'b h w c -> (b h) w c')
>>> print(x.shape)
(960, 40, 3)
# Concatenate images along horizontal axis, 1280 = 32 * 40
>>> images = ivy.asarray([ivy.random_normal(shape=(30, 40, 3)) for _ in range(32)])
>>> x = ivy.einops_rearrange(images, 'b h w c -> h (b w) c')
>>> print(x.shape)
(30, 1280, 3)
# Reorder axes to "b c h w" format for deep learning
>>> images = ivy.asarray([ivy.random_normal(shape=(30, 40, 3)) for _ in range(32)])
>>> x = ivy.einops_rearrange(images, 'b h w c -> b c h w')
>>> print(x.shape)
(32, 3, 30, 40)
# Flatten each image into a vector, 3600 = 30 * 40 * 3
>>> images = ivy.asarray([ivy.random_normal(shape=(30, 40, 3)) for _ in range(32)])
>>> x = ivy.einops_rearrange(images, 'b h w c -> b (c h w)')
>>> print(x.shape)
(32, 3600)
# Split each image into 4 smaller (top-left, top-right, bottom-left, bottom-right),
# 128 = 32 * 2 * 2
>>> images = ivy.asarray([ivy.random_normal(shape=(30, 40, 3)) for _ in range(32)])
>>> x = ivy.einops_rearrange(images, 'b (h1 h) (w1 w) c -> (b h1 w1) h w c',
... h1=2, w1=2)
>>> print(x.shape)
(128, 15, 20, 3)
# Space-to-depth operation
>>> images = ivy.asarray([ivy.random_normal(shape=(30, 40, 3)) for _ in range(32)])
>>> x = ivy.einops_rearrange(images, 'b (h h1) (w w1) c -> b h w (c h1 w1)', h1=2,
... w1=2)
>>> print(x.shape)
(32, 15, 20, 12)
"""
ret = einops.rearrange(x._data, pattern, **axes_lengths)
ret = ivy.array(ret, dtype=x.dtype)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_native_arrays
@handle_array_function
def einops_reduce(
x: Union[ivy.Array, ivy.NativeArray],
pattern: str,
reduction: Union[str, Callable],
/,
*,
out: Optional[ivy.Array] = None,
**axes_lengths: Dict[str, int],
) -> ivy.Array:
"""Perform einops reduce operation on input array x.
Parameters
----------
x
Input array to be reduced.
pattern
Reduction pattern.
reduction
One of available reductions ('min', 'max', 'sum', 'mean', 'prod'), or callable.
axes_lengths
Any additional specifications for dimensions.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
New array with einops.reduce having been applied.
This function is *nestable*, and therefore also accepts :code:'ivy.Container'
instance in place of the argument.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[-4.47, 0.93, -3.34],
... [3.66, 24.29, 3.64]])
>>> reduced = ivy.einops_reduce(x, 'a b -> b', 'mean')
>>> print(reduced)
ivy.array([-0.40499985, 12.61000061, 0.1500001 ])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[-4.47, 0.93, -3.34],
... [3.66, 24.29, 3.64]]),
... b=ivy.array([[4.96, 1.52, -10.67],
... [4.36, 13.96, 0.3]]))
>>> reduced = ivy.einops_reduce(x, 'a b -> a', 'mean')
>>> print(reduced)
{
a: ivy.array([-2.29333329, 10.53000069]),
b: ivy.array([-1.39666676, 6.20666695])
}
"""
ret = einops.reduce(x, pattern, reduction, **axes_lengths)
ret = ivy.array(ret, dtype=x.dtype)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
# IMPORTANT: assign attribute directly to function instead of wrapper here
einops_reduce.unsupported_dtypes = {
"torch": ("float16",),
"tensorflow": ("complex",),
"paddle": ("complex", "uint8", "int8", "int16", "float16"),
}
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def einops_repeat(
x: Union[ivy.Array, ivy.NativeArray],
pattern: str,
/,
*,
out: Optional[ivy.Array] = None,
**axes_lengths: Dict[str, int],
) -> ivy.Array:
"""Perform einops repeat operation on input array x.
Parameters
----------
x
Input array to be repeated.
pattern
Rearrangement pattern.
axes_lengths
Any additional specifications for dimensions.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
New array with einops.repeat having been applied.
This function is *nestable*, and therefore also accepts :code:'ivy.Container'
instance in place of the argument.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3, 4])
>>> repeated = ivy.einops_repeat(x, 'a -> b a', b=2)
>>> print(repeated)
ivy.array([[1, 2, 3, 4],
[1, 2, 3, 4]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[4,5],
... [1, 3]]),
... b=ivy.array([[9, 10],
... [4, 2]]))
>>> repeated = ivy.einops_repeat(x, 'h w -> h (c w)', c=2)
>>> print(repeated)
{
a: ivy.array([[4, 5, 4, 5],
[1, 3, 1, 3]]),
b: ivy.array([[9, 10, 9, 10],
[4, 2, 4, 2]])
}
"""
ret = einops.repeat(x._data, pattern, **axes_lengths)
ret = ivy.array(ret, dtype=x.dtype)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
ivy.min_denominator = min_denominator_stack[-1] if min_denominator_stack else 1e-12
@handle_exceptions
@handle_array_function
def set_min_denominator(val: float) -> None:
"""Set the global minimum denominator used by ivy for numerically stable
division.
Parameters
----------
val
The value to set the global minimum denominator to.
Examples
--------
>>> x = ivy.min_denominator
>>> print(x)
1e-12
>>> ivy.set_min_denominator(1e-13)
>>> y = ivy.min_denominator
>>> print(y)
1e-13
"""
global min_denominator_stack
ivy.utils.assertions.check_isinstance(val, (int, float))
min_denominator_stack.append(val)
ivy.__setattr__("min_denominator", val, True)
@handle_exceptions
def unset_min_denominator() -> None:
"""Reset the global minimum denominator used by ivy for numerically stable
division to the previous value.
Examples
--------
>>> ivy.set_min_denominator(1e-10)
>>> y = ivy.min_denominator
>>> print(y)
1e-10
>>> ivy.unset_min_denominator()
>>> ivy.min_denominator
1e-12
"""
global min_denominator_stack
if min_denominator_stack:
min_denominator_stack.pop(-1)
val = min_denominator_stack[-1] if min_denominator_stack else 1e-12
ivy.__setattr__("min_denominator", val, True)
ivy.min_base = min_base_stack[-1] if min_base_stack else 1e-05
@handle_exceptions
@handle_array_function
def set_min_base(val: float) -> None:
"""Set the global minimum base used by ivy for numerically stable power
raising.
Parameters
----------
val
The new value to set the minimum base to.
Examples
--------
Retrieve the minimum base
>>> x = ivy.min_base
>>> print(x)
1e-05
>>> # Set the minimum base to 1e-04:
>>> ivy.set_min_base(1e-04)
Retrieve the minimum base:
>>> y = ivy.min_base
>>> print(y)
1e-04
>>> # unset the min_base
>>> ivy.unset_min_base()
"""
global min_base_stack
# Ensure val is an instance of 'float' or 'int'
ivy.utils.assertions.check_isinstance(val, (int, float))
# Access and modify min_base_stack
min_base_stack.append(val)
# Set the min_base attribute
ivy.__setattr__("min_base", val, True)
@handle_exceptions
def unset_min_base() -> None:
"""Reset the global minimum base used by ivy for numerically stable power
raising to the previous value.
Examples
--------
>>> ivy.set_min_base(1e-07)
>>> y = ivy.min_base
>>> print(y)
1e-07
>>> ivy.unset_min_base()
>>> ivy.min_base
1e-05
"""
global min_base_stack
if min_base_stack:
min_base_stack.pop(-1)
val = min_base_stack[-1] if min_base_stack else 1e-05
ivy.__setattr__("min_base", val, True)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def stable_divide(
numerator: Union[Number, ivy.Array, ivy.NativeArray],
denominator: Union[Number, ivy.Array, ivy.NativeArray],
/,
*,
min_denominator: Union[Number, ivy.Array, ivy.NativeArray] = None,
) -> Union[Number, ivy.Array]:
"""Divide the numerator by the denominator, with min denominator added to
the denominator for numerical stability.
Parameters
----------
numerator
The numerator of the division.
denominator
The denominator of the division.
min_denominator
The minimum denominator to use, use global ivy._MIN_DENOMINATOR (1e-12)
by default.
Returns
-------
ret
The new item following the numerically stable division.
Examples
--------
With :code:`int` input:
>>> x = ivy.stable_divide(1, 2)
>>> print(x)
0.49999999999975
>>> x = ivy.stable_divide(1, 4, min_denominator=1)
>>> print(x)
0.2
With float input:
>>> x = ivy.stable_divide(5.0, 3.33)
>>> print(x)
1.5015015015010504
With :code:`complex` input:
>>> x = ivy.stable_divide(1+1j, 1-1j)
>>> print(x)
(5.000444502911705e-13+0.9999999999995j)
With :class:`ivy.Array` input:
>>> x = ivy.asarray([[10., 20., 30.],
... [40., 50., 60.]])
>>> y = ivy.stable_divide(x, 10.)
>>> print(y)
ivy.array([[1., 2., 3.],
[4., 5., 6.]])
>>> x = ivy.asarray([1,2,3])
>>> y = np.array((1., 3., 5.))
>>> z = ivy.stable_divide(x, y)
>>> print(z)
ivy.array([1. , 0.667, 0.6 ])
>>> x = ivy.asarray([1., 2., 4.])
>>> y = ivy.asarray([1., 0.5, 0.25])
>>> z = ivy.asarray([0.01, 0.02, 0.03])
>>> w = ivy.stable_divide(x, y, min_denominator=z)
>>> print(w)
ivy.array([ 0.99, 3.85, 14.3 ])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.asarray([10., 15.]), b=ivy.asarray([20., 25.]))
>>> y = ivy.stable_divide(x, 0.5)
>>> print(y)
{
a: ivy.array([20., 30.]),
b: ivy.array([40., 50.])
}
>>> x = ivy.Container(a=ivy.asarray([1., 2.]), b=ivy.asarray([3., 4.]))
>>> y = ivy.Container(a=ivy.asarray([0.5, 2.5]), b=ivy.asarray([3.5, 0.4]))
>>> z = ivy.stable_divide(x, y)
>>> print(z)
{
a: ivy.array([2., 0.8]),
b: ivy.array([0.857, 10.])
}
"""
return numerator / (denominator + default(min_denominator, ivy.min_denominator))
@handle_exceptions
@handle_nestable
@inputs_to_ivy_arrays
@handle_array_function
def stable_pow(
base: Union[Number, ivy.Array, ivy.NativeArray],
exponent: Union[Number, ivy.Array, ivy.NativeArray],
/,
*,
min_base: Optional[float] = None,
) -> Any:
"""Raise the base by the power, with ivy.min_base added to the base when
exponent > 1 for numerical stability.
Parameters
----------
base
The base number.
exponent
The exponent number.
min_base
The minimum base to use, use global ivy.min_base by default.
Returns
-------
ret
The new item following the numerically stable power.
Examples
--------
With :code:`int` input:
>>> x = ivy.stable_pow(2, 2)
>>> print(x)
ivy.array(4.00004)
>>> x = ivy.stable_pow(2, 2, min_base=2)
>>> print(x)
ivy.array(16)
With float input:
>>> x = ivy.stable_pow(4.0, .5)
>>> print(x)
ivy.array(2.00000262)
With :code:`complex` input:
>>> x = ivy.stable_pow(3+4j, 2j)
>>> print(x)
ivy.array(-0.15605032-0.01208451j)
With :class:`ivy.Array` input:
>>> x = ivy.asarray([[2, 4],
... [6, 8]])
>>> y = ivy.stable_pow(x, 2)
>>> print(y)
ivy.array([[ 4.00004, 16.00008],
[36.00012, 64.00016]])
>>> x = ivy.asarray([2, 4, 6])
>>> y = ivy.asarray([2, 3, 4])
>>> z = ivy.stable_pow(x, y)
>>> print(z)
ivy.array([ 4.00004, 64.00048, 1296.00864])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.asarray([2, 4]), b=ivy.asarray([6, 8]))
>>> y = ivy.stable_pow(x, 2)
>>> print(y)
{
a: ivy.array([4.00004, 16.00008]),
b: ivy.array([36.00012, 64.00016])
}
>>> x = ivy.Container(a=ivy.asarray([2, 4]), b=ivy.asarray([6, 8]))
>>> y = ivy.Container(a=ivy.asarray([1, 3]), b=ivy.asarray([4, 5]))
>>> z = ivy.stable_pow(x, y)
>>> print(z)
{
a: ivy.array([2.00001, 64.00048]),
b: ivy.array([1296.00864, 32768.2048])
}
"""
return_dtype = ivy.promote_types(
ivy.default_dtype(item=base),
ivy.default_dtype(item=default(min_base, ivy.min_base)),
)
return_dtype = ivy.promote_types(return_dtype, ivy.default_dtype(item=exponent))
ret = (base + default(min_base, ivy.min_base)) ** ivy.array(exponent)
return ret.astype(return_dtype)
stable_pow.unsupported_dtypes = ("bfloat16",)
@handle_exceptions
def get_all_arrays_in_memory() -> List[Union[ivy.Array, ivy.NativeArray]]:
"""Get all arrays which are currently alive.
Returns
-------
ret
All arrays which are alive.
Examples
--------
>>> ivy.get_all_arrays_in_memory()
[]
>>> x = ivy.get_all_arrays_in_memory()
>>> x
[]
>>> y = ivy.array([0, 1, 2])
>>> x
[ivy.array([0, 1, 2])]
"""
all_arrays = []
for obj in gc.get_objects():
try:
if ivy.current_backend_str() in ["", "numpy"]:
if ivy.is_ivy_array(obj):
all_arrays.append(obj)
else:
if ivy.is_native_array(obj):
all_arrays.append(obj)
except Exception:
pass
return all_arrays
@handle_exceptions
def num_arrays_in_memory() -> int:
"""Return the number of arrays which are currently alive.
Returns
-------
ret
Number of all arrays which are alive.
Examples
--------
>>> ivy.num_arrays_in_memory()
0
>>> x = ivy.num_arrays_in_memory()
>>> x
0
>>> y = ivy.array([0, 1, 2])
>>> x
1
"""
return len(get_all_arrays_in_memory())
@handle_exceptions
def print_all_arrays_in_memory():
"""Print all native Ivy arrays in memory to the console.
Gets all the native Ivy arrays which are currently alive(in the
garbage collector) from get_all_arrays_in_memory() function and
prints them to the console.
"""
for arr in get_all_arrays_in_memory():
print(type(arr), arr.shape)
ivy.queue_timeout = queue_timeout_stack[-1] if queue_timeout_stack else 15.0
@handle_exceptions
@handle_array_function
def set_queue_timeout(timeout: float):
"""Set a timeout value (in seconds) for the global queue.
Set the global queue timeout value (in seconds) Default value without this function
being called is 15 seconds.
Parameters
----------
timeout
The timeout when waiting for containers to arrive from the queues.
To be set in seconds.
Examples
--------
>>> x = ivy.set_queue_timeout(10)
>>> x = ivy.queue_timeout
>>> print(x)
10.0
>>> ivy.set_queue_timeout(30)
>>> y = ivy.queue_timeout
>>> print(y)
30
"""
global queue_timeout_stack
ivy.utils.assertions.check_isinstance(timeout, (int, float))
queue_timeout_stack.append(timeout)
ivy.__setattr__("queue_timeout", timeout, True)
@handle_exceptions
def unset_queue_timeout() -> None:
"""Reset the global queue timeout value (in seconds) to the previous state.
Examples
--------
>>> ivy.set_queue_timeout(10.0)
>>> y = ivy.queue_timeout
>>> print(y)
10.0
>>> ivy.unset_queue_timeout()
>>> ivy.queue_timeout
15.0
"""
global queue_timeout_stack
if queue_timeout_stack:
queue_timeout_stack.pop(-1)
timeout = queue_timeout_stack[-1] if queue_timeout_stack else 15.0
ivy.__setattr__("queue_timeout", timeout, True)
ivy.tmp_dir = tmp_dir_stack[-1] if tmp_dir_stack else "/tmp"
@handle_exceptions
def set_tmp_dir(tmp_dr: str) -> None:
"""Set the directory for saving temporary files.
Parameters
----------
tmp_dr
The new directory for saving temporary files
Examples
--------
>>> x = ivy.tmp_dir
>>> print(x)
/tmp
>>> ivy.set_tmp_dir("/my_tmp")
>>> y = ivy.tmp_dir
>>> print(y)
/my_tmp
>>> # Unset the tmp_dr
>>> ivy.unset_tmp_dir()
"""
global tmp_dir_stack
ivy.utils.assertions.check_isinstance(tmp_dr, str)
tmp_dir_stack.append(tmp_dr)
ivy.__setattr__("tmp_dir", tmp_dr, True)
@handle_exceptions
def unset_tmp_dir() -> None:
"""Reset the directory for saving temporary files to the previous value.
Examples
--------
>>> ivy.set_tmp_dir("/my_dir")
>>> y = ivy.tmp_dir
>>> print(y)
/my_dir
>>> ivy.unset_tmp_dir()
>>> ivy.tmp_dir
/tmp
"""
global tmp_dir_stack
if tmp_dir_stack:
tmp_dir_stack.pop(-1)
tmp_dr = tmp_dir_stack[-1] if tmp_dir_stack else "/tmp"
ivy.__setattr__("tmp_dir", tmp_dr, True)
@handle_exceptions
def container_types():
"""Summary.
Returns
-------
ret
a key-value structure, and exposes public methods .keys(), .values() and
items().
"""
# noinspection PyBroadException
try:
return current_backend().container_types()
except ValueError:
return []
@handle_exceptions
def inplace_arrays_supported() -> bool:
"""Determine whether inplace arrays are supported for the current backend
framework.
Returns
-------
ret
Boolean, whether or not inplace arrays are supported.
"""
return current_backend().inplace_arrays_supported()
@handle_exceptions
def inplace_variables_supported() -> bool:
"""Determine whether inplace variables are supported for the current
backend framework.
Returns
-------
ret
Boolean, whether or not inplace variables are supported.
"""
return current_backend().inplace_variables_supported()
@handle_exceptions
@handle_nestable
@inputs_to_native_arrays
@handle_array_function
def supports_inplace_updates(x: Union[ivy.Array, ivy.NativeArray], /) -> bool:
"""Return if in-place operations are supported for x's data type.
Determine whether in-place operations are supported for x's data type, by the
current backend framework setting.
Parameters
----------
x
Input variable for whose data type we check whether the current backend
framework supports in-place operations.
Returns
-------
ret
Value depends on whether in-place operations are supported for
data type of x.
Raises
------
IvyException
If x isn't a class instance of ivy.Array or ivy.NativeArray, an exception will
be raised.
This function is *nestable*, and therefore also accepts :code:'ivy.Container'
instance in place of the argument.
Examples
--------
With :class:`ivy.Array` input and default backend set as `numpy`:
>>> x = ivy.array([0, 1, 2])
>>> y = ivy.supports_inplace_updates(x)
>>> print(y)
True
With :class:`ivy.Container` input and backend set as `torch`:
>>> x = ivy.Container(a=ivy.array([5., 6.]), b=ivy.array([7., 8.]))
>>> y = ivy.supports_inplace_updates(x)
>>> print(y)
{
a: True,
b: True
}
With `ivy.Array` input and backend set as "tensorflow":
>>> x = ivy.array([1., 4.2, 2.2])
>>> ret = x.supports_inplace_updates()
>>> print(ret)
False
"""
if _is_variable(x):
return ivy.inplace_variables_supported()
elif ivy.is_native_array(x):
return ivy.inplace_arrays_supported()
raise ivy.utils.exceptions.IvyException(
"Input x must be either a variable or an array."
)
@handle_exceptions
@handle_nestable
@inputs_to_native_arrays
@handle_array_function
def assert_supports_inplace(x: Union[ivy.Array, ivy.NativeArray], /) -> bool:
"""Assert that inplace operations are supported for x.
Parameters
----------
x
Input variable or array to check for inplace support for.
Returns
-------
ret
True if supports, raises IvyBackendException otherwise
This function is *nestable*, and therefore also accepts :code:'ivy.Container'
instance in place of the argument.
Examples
--------
With :class:`ivy.Array` input and default backend set as `numpy`:
>>> ivy.set_backend("numpy")
>>> x = ivy.array([1, 2, 3])
>>> print(x.assert_supports_inplace())
True
With :class:`ivy.Array` input and default backend set as `torch`:
>>> ivy.set_backend("torch")
>>> x = ivy.array([1, 2, 3])
>>> print(x.assert_supports_inplace())
True
With :class:`ivy.Container` input and default backend set as `numpy`:
>>> ivy.set_backend("numpy")
>>> x = ivy.Container(a=ivy.array([5, 6]), b=ivy.array([7, 8]))
>>> print(x.assert_supports_inplace())
{
a: True,
b: True
}
With :class:`ivy.Container` input and default backend set as `torch`:
>>> ivy.set_backend("torch")
>>> x = ivy.Container(a=ivy.array([5, 6]), b=ivy.array([7, 8]))
>>> print(x.assert_supports_inplace())
{
a: True,
b: True
}
"""
ivy.utils.assertions.check_true(
ivy.supports_inplace_updates(x),
f"Inplace operations are not supported {type(x)} types with"
f" {ivy.current_backend_str()} backend",
)
return True
@handle_nestable
@handle_partial_mixed_function
@handle_view_indexing
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def get_item(
x: Union[ivy.Array, ivy.NativeArray],
/,
query: Union[ivy.Array, ivy.NativeArray, Tuple],
*,
copy: Optional[bool] = None,
) -> ivy.Array:
"""Gather slices from x according to query array, identical to x[query].
Parameters
----------
x
array, the array from which to gather values.
query
array, index array, integer indices or boolean mask.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
Returns
-------
ret
New array with the values gathered at the specified indices.
Examples
--------
>>> x = ivy.array([0, -1, 20])
>>> query = ivy.array([0, 1])
>>> print(ivy.get_item(x, query))
ivy.array([ 0, -1])
>>> x = ivy.array([[4, 5], [20, 128], [-2, -10]])
>>> query = ivy.array([[True, False], [False, False], [True, True]])
>>> print(ivy.get_item(x, query))
ivy.array([ 4, -2, -10])
"""
if ivy.is_array(query) and ivy.is_bool_dtype(query):
if query.ndim == 0:
if query is False:
return ivy.zeros(shape=(0,) + x.shape, dtype=x.dtype)
return x[None] # equivalent to ivy.expand_dims(x, axis=0)
query = ivy.nonzero(query, as_tuple=False)
ret = ivy.gather_nd(x, query)
else:
query, target_shape, vector_inds = _parse_query(query, x.shape)
if vector_inds is not None:
x = ivy.permute_dims(
x,
axes=[
*vector_inds,
*[i for i in range(len(x.shape)) if i not in vector_inds],
],
)
ret = ivy.gather_nd(x, query)
ret = ivy.reshape(ret, target_shape) if target_shape != list(ret.shape) else ret
return ret
get_item.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_nestable
@handle_partial_mixed_function
@inputs_to_ivy_arrays
@handle_array_function
def set_item(
x: Union[ivy.Array, ivy.NativeArray],
query: Union[ivy.Array, ivy.NativeArray, Tuple],
val: Union[ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[bool] = False,
) -> ivy.Array:
"""Replace slices of x (defined by query) with val, identical to x[query] =
val.
Parameters
----------
x
the array to be updated.
query
either an index array, or a tuple of integers or slices.
val
the array containing the values to be infused into x
copy
boolean indicating whether to copy x.
If True, the function will update and return a copy of x.
If False, the function will update x inplace.
Returns
-------
ret
the array with updated values at the specified indices.
Examples
--------
>>> x = ivy.array([0, -1, 20])
>>> query = ivy.array([0, 1])
>>> val = ivy.array([10, 10])
>>> ivy.set_item(x, query, val)
>>> print(x)
ivy.array([10, 10, 20])
>>> x = ivy.array([[0, -1, 20], [5, 2, -8]])
>>> query = ivy.array([1, 1])
>>> val = ivy.array([10, 10])
>>> y = ivy.set_item(x, query, val, copy=True)
>>> print(y)
ivy.array([[ 0, -1, 20],
[10, 10, 10]])
"""
if copy:
x = ivy.copy_array(x)
if not ivy.is_array(val):
val = ivy.array(val)
if 0 in x.shape or 0 in val.shape:
return x
if ivy.is_array(query) and ivy.is_bool_dtype(query):
if not len(query.shape):
query = ivy.tile(query, (x.shape[0],))
target_shape = ivy.get_item(x, query).shape
indices = ivy.nonzero(query, as_tuple=False)
else:
indices, target_shape, _ = _parse_query(query, x.shape, scatter=True)
if indices is None:
return x
val = _broadcast_to(val, target_shape).astype(x.dtype)
ret = ivy.scatter_nd(indices, val, reduction="replace", out=x)
return ret
set_item.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
def _parse_query(query, x_shape, scatter=False):
query = (query,) if not isinstance(query, tuple) else query
# sequence and integer queries are dealt with as array queries
query = [ivy.array(q) if isinstance(q, (tuple, list, int)) else q for q in query]
# check if non-slice queries are in consecutive positions
# if so, they have to be moved to the front
# https://numpy.org/neps/nep-0021-advanced-indexing.html#mixed-indexing
non_slice_q_idxs = [i for i, q in enumerate(query) if ivy.is_array(q)]
to_front = (
len(non_slice_q_idxs) > 1
and any(ivy.diff(non_slice_q_idxs) != 1)
and non_slice_q_idxs[-1] < len(x_shape)
)
# extract newaxis queries
new_axes = [i for i, q in enumerate(query) if q is None]
query = [q for q in query if q is not None]
query = [Ellipsis] if query == [] else query
# parse ellipsis
ellipsis_inds = None
if any(q is Ellipsis for q in query):
query, ellipsis_inds = _parse_ellipsis(query, len(x_shape))
# broadcast array queries
array_inds = [i for i, v in enumerate(query) if ivy.is_array(v)]
if array_inds:
array_queries = ivy.broadcast_arrays(
*[v for i, v in enumerate(query) if i in array_inds]
)
array_queries = [
(
ivy.where(arr < 0, arr + x_shape[i], arr).astype(ivy.int64)
if arr.size
else arr.astype(ivy.int64)
)
for arr, i in zip(array_queries, array_inds)
]
for idx, arr in zip(array_inds, array_queries):
query[idx] = arr
# convert slices to range arrays
query = [
_parse_slice(q, x_shape[i]).astype(ivy.int64) if isinstance(q, slice) else q
for i, q in enumerate(query)
]
# fill in missing queries
if len(query) < len(x_shape):
query += [ivy.arange(0, s, 1).astype(ivy.int64) for s in x_shape[len(query) :]]
# calculate target_shape, i.e. the shape the gathered/scattered values should have
if len(array_inds) and to_front:
target_shape = (
[list(array_queries[0].shape)]
+ [list(query[i].shape) for i in range(len(query)) if i not in array_inds]
+ [[] for _ in range(len(array_inds) - 1)]
)
elif len(array_inds):
target_shape = (
[list(query[i].shape) for i in range(0, array_inds[0])]
+ [list(array_queries[0].shape)]
+ [[] for _ in range(len(array_inds) - 1)]
+ [list(query[i].shape) for i in range(array_inds[-1] + 1, len(query))]
)
else:
target_shape = [list(q.shape) for q in query]
if ellipsis_inds is not None:
target_shape = (
target_shape[: ellipsis_inds[0]]
+ [target_shape[ellipsis_inds[0] : ellipsis_inds[1]]]
+ target_shape[ellipsis_inds[1] :]
)
for i, ax in enumerate(new_axes):
if len(array_inds) and to_front:
ax -= sum(1 for x in array_inds if x < ax) - 1
ax = ax + i
target_shape = [*target_shape[:ax], 1, *target_shape[ax:]]
target_shape = _deep_flatten(target_shape)
# calculate the indices mesh (indices in gather_nd/scatter_nd format)
query = [ivy.expand_dims(q) if not len(q.shape) else q for q in query]
if len(array_inds):
array_queries = [
(
arr.reshape((-1,))
if len(arr.shape) > 1
else ivy.expand_dims(arr) if not len(arr.shape) else arr
)
for arr in array_queries
]
array_queries = ivy.stack(array_queries, axis=1)
if len(array_inds) == len(query): # advanced indexing
indices = array_queries.reshape((*target_shape, len(x_shape)))
elif len(array_inds) == 0: # basic indexing
indices = ivy.stack(ivy.meshgrid(*query, indexing="ij"), axis=-1).reshape(
(*target_shape, len(x_shape))
)
else: # mixed indexing
if to_front:
post_array_queries = (
ivy.stack(
ivy.meshgrid(
*[v for i, v in enumerate(query) if i not in array_inds],
indexing="ij",
),
axis=-1,
).reshape((-1, len(query) - len(array_inds)))
if len(array_inds) < len(query)
else ivy.empty((1, 0))
)
indices = ivy.array(
[
(*arr, *post)
for arr, post in itertools.product(
array_queries, post_array_queries
)
]
).reshape((*target_shape, len(x_shape)))
else:
pre_array_queries = (
ivy.stack(
ivy.meshgrid(
*[v for i, v in enumerate(query) if i < array_inds[0]],
indexing="ij",
),
axis=-1,
).reshape((-1, array_inds[0]))
if array_inds[0] > 0
else ivy.empty((1, 0))
)
post_array_queries = (
ivy.stack(
ivy.meshgrid(
*[v for i, v in enumerate(query) if i > array_inds[-1]],
indexing="ij",
),
axis=-1,
).reshape((-1, len(query) - 1 - array_inds[-1]))
if array_inds[-1] < len(query) - 1
else ivy.empty((1, 0))
)
indices = ivy.array(
[
(*pre, *arr, *post)
for pre, arr, post in itertools.product(
pre_array_queries, array_queries, post_array_queries
)
]
).reshape((*target_shape, len(x_shape)))
return (
indices.astype(ivy.int64),
target_shape,
array_inds if len(array_inds) and to_front else None,
)
def _parse_ellipsis(so, ndims):
pre = list()
for s in so:
if s is Ellipsis:
break
pre.append(s)
post = list()
for s in reversed(so):
if s is Ellipsis:
break
post.append(s)
ret = list(
pre
+ [slice(None, None, None) for _ in range(ndims - len(pre) - len(post))]
+ list(reversed(post))
)
return ret, (len(pre), ndims - len(post))
def _parse_slice(idx, s):
step = 1 if idx.step is None else idx.step
if step > 0:
start = 0 if idx.start is None else idx.start
if start >= s:
stop = start
else:
if start <= -s:
start = 0
elif start < 0:
start = start + s
stop = s if idx.stop is None else idx.stop
if stop > s:
stop = s
elif start <= -s:
stop = 0
elif stop < 0:
stop = stop + s
else:
start = s - 1 if idx.start is None else idx.start
if start < -s:
stop = start
else:
if start >= s:
start = s - 1
elif start < 0:
start = start + s
if idx.stop is None:
stop = -1
else:
stop = idx.stop
if stop > s:
stop = s
elif stop < -s:
stop = -1
elif stop == -s:
stop = 0
elif stop < 0:
stop = stop + s
q_i = ivy.arange(start, stop, step).to_list()
q_i = [q for q in q_i if 0 <= q < s]
q_i = (
ivy.array(q_i)
if len(q_i) or start == stop or idx.stop is not None
else ivy.arange(0, s, 1)
)
return q_i
def _deep_flatten(iterable):
def _flatten_gen(iterable):
for item in iterable:
if isinstance(item, list):
yield from _flatten_gen(item)
else:
yield item
return list(_flatten_gen(iterable))
def _numel(shape):
shape = tuple(shape)
return ivy.prod(shape).to_scalar() if shape != () else 1
def _broadcast_to(input, target_shape):
if _numel(tuple(input.shape)) == _numel(tuple(target_shape)):
return ivy.reshape(input, target_shape)
else:
input = input if len(input.shape) else ivy.expand_dims(input, axis=0)
return ivy.broadcast_to(input, target_shape)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def inplace_update(
x: Union[ivy.Array, ivy.NativeArray],
val: Union[ivy.Array, ivy.NativeArray],
/,
*,
ensure_in_backend: bool = False,
keep_input_dtype: bool = False,
) -> ivy.Array:
"""Perform in-place update for the input array.
This will always be performed on ivy.Array instances pass in the input, and will
also be performed on the native array classes in the backend when the backend
supports this. If the backend does not natively support inplace updates, and x is an
ivy.NativeArray instance, then an
exception will be thrown.
Parameters
----------
x
The variable to update.
val
The array to update the variable with.
ensure_in_backend
Whether or not to ensure that the `ivy.NativeArray` is also inplace updated.
In cases where it should be, backends which do not natively support inplace
updates will raise an exception.
keep_input_dtype
Whether or not to preserve `x` data type after the update, otherwise `val`
data type will be applied. Defaults to False.
Returns
-------
ret
The array following the in-place update.
Raises
------
IvyException
If backend set doesn't natively support inplace updates and ensure_in_backend is
True, above exception will be raised.
This function is *nestable*, and therefore also accepts :code:'ivy.Container'
instance in place of the arguments.
Examples
--------
With :class:`ivy.Array` input and default backend set as `numpy`:
>>> ivy.set_backend("numpy")
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([0])
>>> ivy.inplace_update(x, y)
>>> print(x)
ivy.array([0])
With :class:`ivy.Array` input and default backend set as `numpy`:
>>> ivy.set_backend("numpy")
>>> x = ivy.array([1, 2, 3], dtype=ivy.float32)
>>> y = ivy.array([0, 0, 0], dtype=ivy.int32)
>>> ivy.inplace_update(x, y, keep_input_dtype=True)
>>> print(x)
ivy.array([0., 0., 0.])
With :class:`ivy.Container` instances:, and backend set as `torch`:
>>> ivy.set_backend("torch")
>>> x = ivy.Container(a=ivy.array([5, 6]), b=ivy.array([7, 8]))
>>> y = ivy.Container(a=ivy.array([1]), b=ivy.array([2]))
>>> ivy.inplace_update(x, y)
>>> print(x)
{
a: ivy.array([1, 1]),
b: ivy.array([2, 2])
}
With mix of :class:`ivy.Array` and :class:`ivy.Container` instances:, and backend
set as `torch`:
>>> ivy.set_backend("torch")
>>> x = ivy.Container(a=ivy.array([5, 6]), b=ivy.array([7, 8]))
>>> y = ivy.array([1, 2])
>>> ivy.inplace_update(x, y)
>>> print(x)
{
a: ivy.array([1, 2]),
b: ivy.array([1, 2])
}
"""
return current_backend(x).inplace_update(
x,
val,
ensure_in_backend=ensure_in_backend,
keep_input_dtype=keep_input_dtype,
)
inplace_update.unsupported_dtypes = {"torch": ("bfloat16",)}
ivy.inplace_mode = inplace_mode_stack[-1] if inplace_mode_stack else "lenient"
@handle_exceptions
def set_inplace_mode(mode: str = "lenient") -> None:
"""Set the memory management behavior for in-place updates in Ivy.
By default, Ivy creates new arrays in the backend for in-place updates.
However, this behavior can be controlled by the user
using the 'inplace_mode' parameter.
Parameters
----------
mode : str
The mode for memory management during in-place updates.
- 'lenient': (Default) In this mode, new arrays will be created during
in-place updates to avoid breaking existing code.
This is the default behavior.
- 'strict': In this mode, an error will be raised if the
'inplace_update' function is called
in a backend that doesn't support inplace updates natively.
Returns
-------
None
Examples
--------
>>> set_inplace_mode('lenient')
>>> ivy.inplace_mode
'lenient'
>>> set_inplace_mode('strict')
>>> ivy.inplace_mode
'strict'
Note
----
Enabling strict mode can help users have more control over memory management
but may lead to errors if the backend doesn't support inplace updates natively.
"""
global inplace_mode_stack
inplace_modes = ["lenient", "strict"]
ivy.utils.assertions.check_elem_in_list(
mode, inplace_modes, False, f"inplace mode must be one of {inplace_modes}"
)
inplace_mode_stack.append(mode)
ivy.__setattr__("inplace_mode", mode, True)
@handle_exceptions
def unset_inplace_mode() -> None:
"""Reset the memory management behavior for in-place updates in Ivy to the
previous state.
Examples
--------
>>> set_inplace_mode('strict')
>>> ivy.inplace_mode
'strict'
>>> unset_inplace_mode()
>>> ivy.inplace_mode
'lenient'
"""
global inplace_mode_stack
if inplace_mode_stack:
inplace_mode_stack.pop(-1)
mode = inplace_mode_stack[-1] if inplace_mode_stack else "lenient"
ivy.__setattr__("inplace_mode", mode, True)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def inplace_decrement(
x: Union[ivy.Array, ivy.NativeArray],
val: Union[ivy.Array, ivy.NativeArray],
) -> ivy.Array:
"""Perform in-place decrement for the input array.
Parameters
----------
x
The input array to be decremented by the defined value.
val
The value of decrement.
Returns
-------
ret
The array following the in-place decrement.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[5.3, 7., 0.],[6.8, 8, 3.9],[0., 10., 6.3]])
>>> y = ivy.inplace_decrement(x, 1.25)
>>> print(y)
ivy.array([[ 4.05, 5.75, -1.25],
[ 5.55, 6.75, 2.65],
[-1.25, 8.75, 5.05]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0.5, -5., 30.]), b=ivy.array([0., -25., 50.]))
>>> y = ivy.inplace_decrement(x, 1.5)
>>> print(y)
{
a: ivy.array([-1., -6.5, 28.5]),
b: ivy.array([-1.5, -26.5, 48.5])
}
>>> x = ivy.Container(a=ivy.array([0., 15., 30.]), b=ivy.array([0., 25., 50.]))
>>> y = ivy.Container(a=ivy.array([0., 15., 30.]), b=ivy.array([0., 25., 50.]))
>>> z = ivy.inplace_decrement(x, y)
>>> print(z)
{
a: ivy.array([0., 0., 0.]),
b: ivy.array([0., 0., 0.])
}
>>> x = ivy.Container(a=ivy.array([3., 7., 10.]), b=ivy.array([0., 75., 5.5]))
>>> y = ivy.Container(a=ivy.array([2., 5.5, 7.]), b=ivy.array([0., 25., 2.]))
>>> z = ivy.inplace_decrement(x, y)
>>> print(z)
{
a: ivy.array([1., 1.5, 3.]),
b: ivy.array([0., 50., 3.5])
}
"""
return current_backend(x).inplace_decrement(x, val)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def inplace_increment(
x: Union[ivy.Array, ivy.NativeArray],
val: Union[ivy.Array, ivy.NativeArray],
) -> ivy.Array:
"""Perform in-place increment for the input array.
Parameters
----------
x
The input array to be incremented by the defined value.
val
The value of increment.
Returns
-------
ret
The array following the in-place increment.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[5.3, 7., 0.],[6.8, 8, 3.9],[0., 10., 6.3]])
>>> y = ivy.inplace_increment(x, 3.)
>>> print(y)
ivy.array([[ 8.3, 10., 3.],
[ 9.8, 11., 6.9],
[ 3., 13., 9.3]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 15., 30.]), b=ivy.array([0., 25., 50.]))
>>> y = ivy.inplace_increment(x, 2.5)
>>> print(y)
{
a: ivy.array([2.5, 17.5, 32.5]),
b: ivy.array([2.5, 27.5, 52.5])
}
>>> x = ivy.Container(a=ivy.array([0., 15., 30.]), b=ivy.array([0., 25., 50.]))
>>> y = ivy.Container(a=ivy.array([0., 15., 30.]), b=ivy.array([0., 25., 50.]))
>>> z = ivy.inplace_increment(x, y)
>>> print(z)
{
a: ivy.array([0., 30., 60.]),
b: ivy.array([0., 50., 100.])
}
"""
return current_backend(x).inplace_increment(x, val)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_array_function
@handle_device
def scatter_flat(
indices: Union[ivy.Array, ivy.NativeArray],
updates: Union[ivy.Array, ivy.NativeArray],
/,
*,
size: Optional[int] = None,
reduction: str = "sum",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Scatter flat updates into a new flat array according to flat indices.
Parameters
----------
indices
Indices for the new values to occupy.
updates
Values for the new array to hold.
size
The size of the result. Default is `None`, in which case tensor
argument out must be provided.
reduction
The reduction method for the scatter, one of 'sum', 'min', 'max' or 'replace'
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
New array of given shape, with the values scattered at the indices.
This function is *nestable*, and therefore also accepts :code:'ivy.Container'
instance in place of the argument.
Examples
--------
With :class:`ivy.Array` input:
>>> indices = ivy.array([0, 0, 1, 0, 2, 2, 3, 3])
>>> updates = ivy.array([5, 1, 7, 2, 3, 2, 1, 3])
>>> out = ivy.array([0, 0, 0, 0, 0, 0, 0, 0])
>>> ivy.scatter_flat(indices, updates, out=out)
>>> print(out)
ivy.array([8, 7, 5, 4, 0, 0, 0, 0])
With :class:`ivy.Array` input:
>>> indices = ivy.array([1, 0, 1, 0, 2, 2, 3, 3])
>>> updates = ivy.array([9, 2, 0, 2, 3, 2, 1, 8])
>>> size = 8
>>> print(ivy.scatter_flat(indices, updates, size=size))
ivy.array([2, 0, 2, 8, 0, 0, 0, 0])
With :class:`ivy.Container` and :class:`ivy.Array` input:
>>> indices = ivy.array([1, 0, 1, 0, 2, 2, 3, 3])
>>> updates = ivy.Container(a=ivy.array([9, 2, 0, 2, 3, 2, 1, 8]),
... b=ivy.array([5, 1, 7, 2, 3, 2, 1, 3]))
>>> size = 8
>>> print(ivy.scatter_flat(indices, updates, size=size))
{
a: ivy.array([2, 0, 2, 8, 0, 0, 0, 0]),
b: ivy.array([2, 7, 2, 3, 0, 0, 0, 0])
}
With :class:`ivy.Container` input:
>>> indices = ivy.Container(a=ivy.array([1, 0, 1, 0, 2, 2, 3, 3]),
... b=ivy.array([0, 0, 1, 0, 2, 2, 3, 3]))
>>> updates = ivy.Container(a=ivy.array([9, 2, 0, 2, 3, 2, 1, 8]),
... b=ivy.array([5, 1, 7, 2, 3, 2, 1, 3]))
>>> size = 8
>>> print(ivy.scatter_flat(indices, updates, size=size))
{
a: ivy.array([2, 0, 2, 8, 0, 0, 0, 0]),
b: ivy.array([2, 7, 2, 3, 0, 0, 0, 0])
}
"""
return current_backend(indices).scatter_flat(
indices, updates, size=size, reduction=reduction, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@inputs_to_native_shapes
@to_native_arrays_and_back
@handle_array_function
@handle_device
def scatter_nd(
indices: Union[ivy.Array, ivy.NativeArray],
updates: Union[ivy.Array, ivy.NativeArray],
/,
shape: Optional[Union[tuple, list, ivy.Array, ivy.Shape, ivy.NativeShape]] = None,
*,
reduction: str = "sum",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Scatter updates into a new array according to indices.
Parameters
----------
indices
Indices for the new values to occupy.
updates
Values for the new array to hold.
shape
The shape of the result. Default is ``None``, in which case tensor
argument must be provided.
reduction
The reduction method for the scatter, one of 'sum', 'min', 'max' or 'replace'
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
New array of given shape, with the values scattered at the indices.
Examples
--------
With :class:`ivy.Array` input:
>>> indices = ivy.array([[4], [3], [7], [7]])
>>> updates = ivy.array([9, 12, 11, 10])
>>> shape = ivy.array([8])
>>> scatter = ivy.scatter_nd(indices, updates, shape)
>>> print(scatter)
ivy.array([ 0, 0, 0, 12, 9, 0, 0, 21])
>>> indices = ivy.array([[0, 1], [1, 0], [1, 1], [1, 1]])
>>> updates = ivy.array([9, 11, 12, 10])
>>> shape = (2, 2)
>>> scatter = ivy.scatter_nd(indices, updates, shape, reduction="max")
>>> print(scatter)
ivy.array([[ 0, 9], [11, 12]])
>>> indices = ivy.array([[[0], [1]], [[2], [1]]])
>>> updates = ivy.array([[9, 12], [11, 10]])
>>> shape = [4]
>>> scatter = ivy.scatter_nd(indices, updates, shape, reduction="replace")
>>> print(scatter)
ivy.array([ 9, 10, 11, 0])
>>> indices = ivy.array([[[1, 1], [0, 0]], [[1, 1], [0, 0]]])
>>> updates = ivy.array([[-1, 12], [11, 10]])
>>> shape = ivy.Shape([2, 2])
>>> result = ivy.zeros([2, 2])
>>> scatter = ivy.scatter_nd(indices, updates, shape, reduction="min", out=result)
>>> print(result)
ivy.array([[ 0., 0.], [ 0., -1.]])
With :class:`ivy.Container` input:
>>> indices = ivy.Container(a=ivy.array([[4],[3],[6]]),
... b=ivy.array([[5],[1],[2]]))
>>> updates = ivy.Container(a=ivy.array([100, 200, 200]),
... b=ivy.array([20, 30, 40]))
>>> shape = ivy.Container(a=ivy.array([10]),
... b=ivy.array([10]))
>>> z = ivy.scatter_nd(indices, updates, shape=shape)
>>> print(z)
{
a: ivy.array([0, 0, 0, 200, 100, 0, 200, 0, 0, 0]),
b: ivy.array([0, 30, 40, 0, 0, 20, 0, 0, 0, 0])
}
With :class:`ivy.Container` and :class:`ivy.Array` input:
>>> indices = ivy.array([[4],[3],[1]])
>>> updates = ivy.Container(a=ivy.array([10, 20, 30]),
... b=ivy.array([200, 300, 400]))
>>> z = ivy.Container(a=ivy.array([1, 2, 3, 4, 5]),
... b=ivy.array([10, 20, 30, 40, 50]))
>>> ivy.scatter_nd(indices, updates, reduction="replace", out=z)
>>> print(z)
{
a: ivy.array([1, 30, 3, 20, 10]),
b: ivy.array([10, 400, 30, 300, 200])
}
"""
return current_backend(indices).scatter_nd(
indices, updates, shape=shape, reduction=reduction, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def gather(
params: Union[ivy.Array, ivy.NativeArray],
indices: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: int = -1,
batch_dims: int = 0,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Gather slices from params at axis according to indices.
Parameters
----------
params
The array from which to gather values.
indices
The array which indicates the indices that will be gathered along
the specified axis.
axis
Optional int, the axis from which to gather from.
Default is ``-1``.
batch_dims
Optional int, lets you gather different items from each element of a batch.
Default is ``0``.
out
Optional array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
New array with the values gathered at the specified indices along the
specified axis.
Both the description and the type hints above assumes an array input for
simplicity, but this function is *nestable*, and therefore also accepts
:class:`ivy.Container` instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0., 1., 2.])
>>> y = ivy.array([1, 2])
>>> print(ivy.gather(x, y))
ivy.array([1., 2.])
>>> x = ivy.array([[0., 1., 2.],[3., 4., 5.]])
>>> y = ivy.array([[0, 1],[1, 2]])
>>> z = ivy.zeros((2, 2, 2))
>>> ivy.gather(x, y, out=z)
>>> print(z)
ivy.array([[[0., 1.],[1., 2.]],[[3., 4.],[4., 5.]]])
>>> x = ivy.array([[[0., 1.], [2., 3.]],
... [[8., 9.], [10., 11.]]])
>>> y = ivy.array([[0, 1]])
>>> z = ivy.zeros((1, 2, 2, 2))
>>> ivy.gather(x, y, axis=0, out=z)
>>> print(z)
ivy.array(
[[[[ 0., 1.],
[ 2., 3.]],
[[ 8., 9.],
[10., 11.]]]])
>>> x = ivy.array([[0, 10, 20, 0, 0],
... [0, 0, 0, 30, 40],
... [0, 10, 0, 0, 40]])
>>> y = ivy.array([[1, 2],[3, 4],[1, 4]])
>>> z = ivy.gather(x, y, batch_dims=1)
>>> print(z)
ivy.array([[10, 20], [30, 40],[10, 40]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a = ivy.array([0., 1., 2.]),
... b = ivy.array([4., 5., 6.]))
>>> y = ivy.Container(a = ivy.array([0, 1]),
... b = ivy.array([1, 2]))
>>> print(ivy.gather(x, y))
{
a: ivy.array([0., 1.]),
b: ivy.array([5., 6.])
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.Container(a = ivy.array([0., 1., 2.]),
... b = ivy.array([4., 5., 6.]))
>>> y = ivy.array([0, 1])
>>> print(ivy.gather(x, y))
{
a: ivy.array([0., 1.]),
b: ivy.array([4., 5.])
}
"""
return current_backend(params, indices).gather(
params, indices, axis=axis, batch_dims=batch_dims, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def gather_nd(
params: Union[ivy.Array, ivy.NativeArray],
indices: Union[ivy.Array, ivy.NativeArray],
/,
*,
batch_dims: int = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Gather slices from params into a array with shape specified by indices.
Parameters
----------
params
The array from which to gather values.
indices
Index array.
batch_dims
optional int, lets you gather different items from each element of a batch.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
New array of given shape, with the values gathered at the indices.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0., 1., 2., 3., 4., 5., 6.])
>>> y = ivy.array([1])
>>> print(ivy.gather_nd(x, y))
ivy.array(1.)
>>> x = ivy.array([[0., 1.], [2., 3.], [4., 5.]])
>>> y = ivy.array([[0],[1],[1]], dtype='int32')
>>> z = ivy.gather_nd(x,y,batch_dims=1)
ivy.array([0., 3., 5.])
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),b=ivy.array([4., 5., 6.]))
>>> y = ivy.array([1])
>>> print(ivy.gather_nd(x, y))
{
a: ivy.array(1.),
b: ivy.array(5.)
}
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[0., 10., 20.],[30.,40.,50.]]),
... b=ivy.array([[0., 100., 200.],[300.,400.,500.]]))
>>> y = ivy.Container(a=ivy.array([1,0]),
... b=ivy.array([0]))
>>> print(ivy.gather_nd(x, y))
{
a: ivy.array(30.),
b: ivy.array([0., 100., 200.])
}
"""
res = current_backend(params, indices).gather_nd(
params, indices, batch_dims=batch_dims
)
if ivy.exists(out):
return ivy.inplace_update(out, res)
return res
@handle_exceptions
@handle_nestable
@handle_array_function
def multiprocessing(context: Optional[str] = None):
"""Return backend-specific multiprocessing module.
Parameters
----------
context
The context of the multiprocessing, either 'fork', 'forkserver' or 'spawn'.
Default is ``None``.
Returns
-------
ret
Multiprocessing module
Examples
--------
>>> import ivy
Using the default context (None):
>>> mp_default = ivy.multiprocessing()
>>> print(mp_default)
<multiprocessing.context.DefaultContext object at 0x7f4e3193e520>
Specifying 'fork' as the context:
>>> mp_fork = ivy.multiprocessing(context='fork')
>>> print(mp_fork)
<multiprocessing.context.ForkContext object at 0x7f4e3193e580>
Specifying 'spawn' as the context:
>>> mp_spawn = ivy.multiprocessing(context='spawn')
>>> print(mp_spawn)
<multiprocessing.context.SpawnContext object at 0x7f4e3193e5e0>
Specifying 'forkserver' as the context:
>>> mp_forkserver = ivy.multiprocessing(context='forkserver')
>>> print(mp_forkserver)
<multiprocessing.context.ForkServerContext object at 0x7f4e3193e640>
"""
return current_backend().multiprocessing(context)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_native_arrays
@outputs_to_ivy_shapes
@outputs_to_ivy_arrays
@handle_array_function
@handle_device
def shape(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
as_array: bool = False,
) -> Union[ivy.Shape, ivy.NativeShape]:
"""Return the shape of the array ``x``.
Parameters
----------
x
Input array to infer the shape of.
as_array
Whether to return the shape as an array.
Default is False.
Returns
-------
ret
Shape of the array ``x``.
Examples
--------
>>> x = ivy.array([[-1, 0, 1], [1, 0, -1]])
>>> y = ivy.shape(x)
>>> z = ivy.shape(x, as_array = True)
>>> print(y)
(2, 3)
>>> print(z)
ivy.array([2, 3])
"""
return current_backend(x).shape(x, as_array=as_array)
ivy.shape_array_mode = shape_array_mode_stack[-1] if shape_array_mode_stack else False
@handle_exceptions
def set_shape_array_mode(mode: bool) -> None:
"""Set the mode of returning shape as ivy.Array to the given mode instance.
Parameter
---------
mode
boolean whether to return shape as ivy.Array
Examples
--------
>>> ivy.set_shape_array_mode(False)
>>> ivy.shape_array_mode
False
>>> ivy.set_shape_array_mode(True)
>>> ivy.shape_array_mode
True
"""
global shape_array_mode_stack
ivy.utils.assertions.check_isinstance(mode, bool)
shape_array_mode_stack.append(mode)
ivy.__setattr__("shape_array_mode", mode, True)
@handle_exceptions
def unset_shape_array_mode() -> None:
"""Reset the mode of returning shape as ivy.Array to the previous state.
Examples
--------
>>> ivy.set_shape_array_mode(True)
>>> ivy.shape_array_mode
True
>>> ivy.unset_shape_array_mode()
>>> ivy.shape_array_mode
False
"""
global shape_array_mode_stack
if shape_array_mode_stack:
shape_array_mode_stack.pop(-1)
mode = shape_array_mode_stack[-1] if shape_array_mode_stack else False
ivy.__setattr__("shape_array_mode", mode, True)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_array_function
@handle_device
def get_num_dims(
x: Union[ivy.Array, ivy.NativeArray], /, *, as_array: bool = False
) -> int:
"""Return the number of dimensions of the array x.
Parameters
----------
x
Input array to infer the number of dimensions for.
as_array
Whether to return the shape as a array, default False.
Returns
-------
ret
Shape of the array
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> a = ivy.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
... [[0, 0, 0], [0, 0, 0], [0, 0, 0]],
... [[0, 0, 0], [0, 0, 0], [0, 0, 0]]])
>>> b = ivy.get_num_dims(a, as_array=False)
>>> print(b)
3
With :class:`ivy.Container` input:
>>> a = ivy.Container(b = ivy.asarray([[0.,1.,1.],[1.,0.,0.],[8.,2.,3.]]))
>>> print(ivy.get_num_dims(a))
{
b: 2
}
>>> b = ivy.get_num_dims(a, as_array=True)
>>> print(b)
{
b: ivy.array(2)
}
"""
return current_backend(x).get_num_dims(x, as_array=as_array)
@handle_exceptions
def arg_info(fn: Callable, *, name: Optional[str] = None, idx: Optional[int] = None):
"""Return the index and `inspect.Parameter` representation of the specified
argument. In the form of a dict with keys "idx" and "param".
Parameters
----------
fn
The function to retrieve the argument information for
name
The name of the argument
idx
the index of the argument in the inputs
Returns
-------
ret
a `dict` containing the idx, and the `inspect.Parameter` for the argument,
which itself contains the parameter name, type, and other helpful information.
"""
ivy.utils.assertions.check_all_or_any_fn(
name,
idx,
fn=ivy.exists,
type="any",
limit=[1],
message="exactly one of the keyword arguments name or idx must be provided",
as_array=False,
)
params = inspect.signature(fn).parameters
if ivy.exists(name):
return {"idx": list(params).index(name), "param": params[name]}
return {"idx": idx, "param": list(params.values())[idx]}
def _valid_attrib_combinations(fn, backend, dnd_dict, first_attr_name, other_attr_name):
attr_list = ()
if hasattr(fn, other_attr_name):
attr_list = getattr(fn, other_attr_name)
if isinstance(attr_list, dict):
attr_list = attr_list.get(backend, ())
ivy.utils.assertions.check_false(
dnd_dict and attr_list,
f"Cannot specify both {first_attr_name} and {other_attr_name} "
"cannot both be defined for the same function",
)
def _is_valid_device_and_dtypes_attributes(fn: Callable) -> bool:
fn_unsupported_dnd = {}
fn_supported_dnd = {}
backend = ivy.current_backend_str()
if hasattr(fn, "unsupported_device_and_dtype"):
fn_unsupported_dnd = fn.unsupported_device_and_dtype
# if it's a nested dict, unwrap for the current backend
if fn_unsupported_dnd and isinstance(
list(fn_unsupported_dnd.__get__().values())[0], dict
):
fn_unsupported_dnd = fn_unsupported_dnd.get(backend, {})
if hasattr(fn, "supported_device_and_dtype"):
fn_supported_dnd = fn.supported_device_and_dtype
# if it's a nested dict, unwrap for the current backend
if fn_supported_dnd and isinstance(
list(fn_supported_dnd.__get__().values())[0], dict
):
fn_supported_dnd = fn_supported_dnd.get(backend, {})
ivy.utils.assertions.check_false(
fn_unsupported_dnd and fn_supported_dnd,
"unsupported_device_and_dtype and supported_device_and_dtype cannot"
" both be defined for the same function",
)
us = "unsupported_device_and_dtype"
_valid_attrib_combinations(fn, backend, fn_unsupported_dnd, us, "supported_devices")
_valid_attrib_combinations(fn, backend, fn_unsupported_dnd, us, "supported_dtypes")
ss = "supported_device_and_dtype"
_valid_attrib_combinations(fn, backend, fn_supported_dnd, ss, "unsupported_device")
_valid_attrib_combinations(fn, backend, fn_supported_dnd, ss, "unsupported_dtypes")
return True
def _all_dnd_combinations():
all_comb = {}
for device in ivy.all_devices:
all_comb[device] = ivy.all_dtypes
return all_comb
def _dnd_dict_intersection(a, b):
res = {}
for device in a:
if device in b:
intersection = set.intersection(set(a[device]), set(b[device]))
if intersection:
res[device] = tuple(intersection)
return res
def _dnd_dict_difference(a, b):
res = a
for device in list(a):
if device in b:
difference = set.difference(set(a[device]), set(b[device]))
if difference:
res[device] = tuple(difference)
else:
del res[device]
return res
def _dnd_dict_union(a, b):
res = {}
for device in set(list(a) + list(b)):
u1 = set(a.get(device, ()))
u2 = set(b.get(device, ()))
res[device] = tuple(set.union(u1, u2))
return res
# allow passing "integer" if all integer dtypes are supported/unsupported for e.g.
def _expand_typesets(dtypes):
typesets = {
"valid": ivy.valid_dtypes,
"numeric": ivy.valid_numeric_dtypes,
"float": ivy.valid_float_dtypes,
"integer": ivy.valid_int_dtypes,
"unsigned": ivy.valid_uint_dtypes,
"complex": ivy.valid_complex_dtypes,
}
dtypes = list(dtypes)
typeset_list = []
for i, dtype in reversed(list(enumerate(dtypes))):
if dtype in typesets:
typeset_list.extend(typesets[dtype])
dtypes.pop(i)
dtypes += typeset_list
return dtypes
def _get_devices_and_dtypes(fn, recurse=True, complement=True):
supported_devices = ivy.function_supported_devices(fn, recurse=recurse)
supported_dtypes = ivy.function_supported_dtypes(fn, recurse=recurse)
if hasattr(fn, "partial_mixed_handler"):
supported_devices = supported_devices["primary"]
supported_dtypes = supported_dtypes["primary"]
supported = {}
# Generate a base supported set from other attributes
for device in supported_devices:
supported[device] = supported_dtypes
is_frontend_fn = "frontend" in fn.__module__
is_backend_fn = "backend" in fn.__module__ and not is_frontend_fn
is_einops_fn = hasattr(fn, "__name__") and "einops" in fn.__name__
if not is_backend_fn and not is_frontend_fn and not is_einops_fn:
if complement:
all_comb = _all_dnd_combinations()
supported = _dnd_dict_difference(all_comb, supported)
return supported
backend = ivy.current_backend_str()
# Their values are formatted like either
# 1. fn.supported_device_and_dtype = {"cpu":("float16",)}
if hasattr(fn, "supported_device_and_dtype"):
fn_supported_dnd = fn.supported_device_and_dtype.__get__()
if is_einops_fn and isinstance(fn_supported_dnd, dict):
fn_supported_dnd = fn_supported_dnd.get(backend, supported)
if fn_supported_dnd:
ivy.utils.assertions.check_isinstance(
list(fn_supported_dnd.values())[0], tuple
)
if isinstance(fn_supported_dnd, dict):
for device, dtypes in fn_supported_dnd.items():
fn_supported_dnd[device] = tuple(_expand_typesets(dtypes))
# dict intersection
supported = _dnd_dict_intersection(supported, fn_supported_dnd)
if hasattr(fn, "unsupported_device_and_dtype"):
fn_unsupported_dnd = fn.unsupported_device_and_dtype.__get__()
if is_einops_fn and isinstance(fn_unsupported_dnd, dict):
fn_unsupported_dnd = fn_unsupported_dnd.get(backend, supported)
if fn_unsupported_dnd:
ivy.utils.assertions.check_isinstance(
list(fn_unsupported_dnd.values())[0], tuple
)
if isinstance(fn_unsupported_dnd, dict):
for device, dtypes in fn_unsupported_dnd.items():
fn_unsupported_dnd[device] = tuple(_expand_typesets(dtypes))
# dict difference
supported = _dnd_dict_difference(supported, fn_unsupported_dnd)
if complement:
# dict difference
all_comb = _all_dnd_combinations()
supported = _dnd_dict_difference(all_comb, supported)
return supported
@handle_exceptions
@handle_nestable
def function_supported_devices_and_dtypes(fn: Callable, recurse: bool = True) -> Dict:
"""Return the supported combination of devices and dtypes of the current
backend's function. The function returns a dict containing the supported
combination of devices and dtypes of the primary and compositional
implementations in case of partial mixed functions.
Parameters
----------
fn
The function to check for the supported device and dtype attribute
recurse
Whether to recurse into used ivy functions.
Default is ``True``.
Returns
-------
ret
Tuple or dict containing the supported devices and dtypes of the function
"""
ivy.utils.assertions.check_true(
_is_valid_device_and_dtypes_attributes(fn),
"supported_device_and_dtypes and unsupported_device_and_dtypes "
"attributes cannot both exist in a particular backend",
)
if hasattr(fn, "partial_mixed_handler"):
return {
"compositional": function_supported_devices_and_dtypes(
fn.compos, recurse=recurse
),
"primary": _get_devices_and_dtypes(fn, complement=False),
}
else:
supported_devices_dtypes = _get_devices_and_dtypes(fn, complement=False)
if recurse:
supported_devices_dtypes = ivy.functional.data_type._nested_get(
fn,
supported_devices_dtypes,
_dnd_dict_intersection,
function_supported_devices_and_dtypes,
wrapper=lambda x: x,
)
return supported_devices_dtypes
@handle_exceptions
@handle_nestable
def function_unsupported_devices_and_dtypes(fn: Callable, recurse: bool = True) -> Dict:
"""Return the unsupported combination of devices and dtypes of the current
backend's function. The function returns a dict containing the unsupported
combination of devices and dtypes of the primary and compositional
implementations in case of partial mixed functions.
Parameters
----------
fn
The function to check for the unsupported device and dtype attribute
recurse
Whether to recurse into used ivy functions.
Default is ``True``.
Returns
-------
ret
Tuple or dict containing the unsupported devices and dtypes of the function
"""
ivy.utils.assertions.check_true(
_is_valid_device_and_dtypes_attributes(fn),
"supported_device_and_dtypes and unsupported_device_and_dtypes "
"attributes cannot both exist in a particular backend",
)
if hasattr(fn, "partial_mixed_handler"):
return {
"compositional": function_unsupported_devices_and_dtypes(
fn.compos, recurse=recurse
),
"primary": _get_devices_and_dtypes(fn, complement=True),
}
else:
unsupported_devices_dtypes = _get_devices_and_dtypes(fn, complement=True)
if recurse:
unsupported_devices_dtypes = ivy.functional.data_type._nested_get(
fn,
unsupported_devices_dtypes,
_dnd_dict_union,
function_unsupported_devices_and_dtypes,
wrapper=lambda x: x,
)
return unsupported_devices_dtypes
@handle_exceptions
def vmap(
func: Callable,
in_axes: Union[int, Sequence[int], Sequence[None]] = 0,
out_axes: int = 0,
) -> Callable:
"""Vectorizing map. Creates a function which maps func over argument axes.
Parameters
----------
func
Function to be mapped over additional axes.
in_axes
An integer, None, or (nested) standard Python container
(tuple/list) thereof specifying which input array
axes to map over.If each positional argument to fun
is an array, then in_axes can be an integer, a None,
or a tuple of integers and Nones with length equal
to the number of positional arguments to fun. An
integer or None indicates which array axis to map
over for all arguments (with None indicating not to map any axis),
and a tuple indicates which axis to map for each
corresponding positional argument. Axis integers must
be in the range [-ndim, ndim) for each array,
where ndim is the number of dimensions (axes) of the
corresponding input array.
out_axes
An integer indicating where the mapped axis should appear in the output.
Returns
-------
ret
Batched/vectorized version of func with arguments
that correspond to those of func, but with extra
array axes at positions indicated by in_axes,
and a return value that corresponds
to that of fun, but with extra array axes
at positions indicated by out_axes.
This docstring is a summarised version of the `docstring
<https://jax.readthedocs.io/en/latest/_autosummary/jax.vmap.html#jax-vmap>`_
for vmap from JAX documentation.
Examples
--------
With :func:`ivy.matmul` and :class:`ivy.Array` input:
>>> x = ivy.array(ivy.arange(60).reshape((3, 5, 4)))
>>> y = ivy.array(ivy.arange(40).reshape((5, 4, 2)))
>>> z = ivy.vmap(ivy.matmul, (1, 0), 1)(x, y)
>>> z.shape
(3, 5, 2)
"""
# TODO: optimize in the numpy and tensorflow backends and extend functionality
return current_backend().vmap(func, in_axes, out_axes)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@to_native_arrays_and_back
@handle_device
def isin(
elements: Union[ivy.Array, ivy.NativeArray],
test_elements: Union[ivy.Array, ivy.NativeArray],
/,
*,
assume_unique: bool = False,
invert: bool = False,
) -> ivy.Array:
"""Test if each element of elements is in test_elements.
Parameters
----------
elements
input array
test_elements
values against which to test for each input element
assume_unique
If True, assumes both elements and test_elements contain unique elements,
which can speed up the calculation. Default value is False.
invert
If True, inverts the boolean return array, resulting in True values for
elements not in test_elements. Default value is False.
Returns
-------
ret
output a boolean array of the same shape as elements that is True for elements
in test_elements and False otherwise.
Examples
--------
>>> x = ivy.array([[10, 7, 4], [3, 2, 1]])
>>> y = ivy.array([1, 2, 3])
>>> ivy.isin(x, y)
ivy.array([[False, False, False], [ True, True, True]])
>>> x = ivy.array([3, 2, 1, 0])
>>> y = ivy.array([1, 2, 3])
>>> ivy.isin(x, y, invert=True)
ivy.array([False, False, False, True])
"""
return ivy.current_backend(elements, test_elements).isin(
elements, test_elements, assume_unique=assume_unique, invert=invert
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@inputs_to_native_arrays
@handle_device
def itemsize(
x: Union[ivy.Array, ivy.NativeArray],
/,
) -> int:
"""Return the size of the input array's elements.
Parameters
----------
x
The input array.
Returns
-------
ret
An integer specifying the element size in bytes.
Examples
--------
>>> x = ivy.array([1,2,3], dtype=ivy.float64)
>>> ivy.itemsize(x)
8
>>> x = ivy.array([1,2,3], dtype=ivy.complex128)
>>> ivy.itemsize(x)
16
"""
return ivy.current_backend(x).itemsize(x)
@handle_exceptions
@handle_nestable
@handle_device
def strides(
x: Union[ivy.Array, ivy.NativeArray],
/,
) -> Tuple[int]:
"""Return the input array's strides across each dimension.
Parameters
----------
x
The input array.
Returns
-------
ret
A tuple containing the strides.
Examples
--------
>>> x = ivy.array([[1, 5, 9], [2, 6, 10]])
>>> ivy.strides(x)
(4, 8)
"""
if ivy.is_native_array(x) or (ivy.is_ivy_array(x) and x.base is None):
return ivy.to_numpy(x).strides
# if x is an ivy array with a base,
# convert it to a numpy array with the same base:
ret = ivy.to_numpy(x.base)
ivy_numpy = ivy.with_backend("numpy")
for fn, args, kwargs, index in x._manipulation_stack:
ret = ivy_numpy.__dict__[fn](ret, *args, **kwargs)
ret = ret[index] if ivy.exists(index) else ret
return ret.to_native().strides
def is_ivy_nested_array(x: Any, /) -> bool:
"""Determine whether the input x is an Ivy Nested Array.
Parameters
----------
x
The input to check
Returns
-------
ret
Boolean, whether or not x is an ivy nested array.
"""
return isinstance(x, ivy.NestedArray)
| ivy/ivy/functional/ivy/general.py/0 | {
"file_path": "ivy/ivy/functional/ivy/general.py",
"repo_id": "ivy",
"token_count": 56877
} | 55 |
"""Collection of Ivy neural network activations as stateful classes."""
# local
import ivy
from ivy.stateful.module import Module
from typing import Literal, Optional
class GELU(Module):
def __init__(
self,
*,
approximate: bool = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
):
"""Apply the GELU activation function.
Parameters
----------
approximate
whether to use the gelu approximation algorithm or exact formulation.
complex_mode
Specifies how to handle complex input. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
"""
self._approximate = approximate
self._complex_mode = complex_mode
Module.__init__(self)
def _forward(self, x):
"""Perform forward pass of the GELU activation.
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the GELU activation *[batch_shape, d]*
"""
return ivy.gelu(
x,
approximate=self._approximate,
complex_mode=self._complex_mode,
)
def _extra_repr(self) -> str:
return f"approximate={self._approximate}, complex_mode={self._complex_mode}"
class GEGLU(Module):
def __init__(self):
"""Apply the GEGLU activation function."""
Module.__init__(self)
def _forward(self, inputs):
"""Perform forward pass of the GEGLU activation.
Parameters
----------
inputs
Inputs to process *[batch_shape, 2d]*.
Returns
-------
ret
The outputs following the GEGLU activation *[batch_shape, d]*
"""
x, gates = ivy.split(inputs, num_or_size_splits=2, axis=-1)
return ivy.gelu(gates) * x
class ReLU(Module):
def __init__(
self,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
):
"""Apply the RELU activation function.
Parameters
----------
complex_mode
Specifies how to handle complex input. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
"""
self._complex_mode = complex_mode
Module.__init__(self)
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the RELU activation *[batch_shape, d]*
"""
return ivy.relu(x, complex_mode=self._complex_mode)
def _extra_repr(self) -> str:
return f"complex_mode={self._complex_mode}"
class LeakyReLU(Module):
def __init__(
self,
alpha: float = 0.2,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
):
"""Apply the LEAKY RELU activation function.
Parameters
----------
alpha
Negative slope for ReLU.
complex_mode
Specifies how to handle complex input. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
"""
self._alpha = alpha
self._complex_mode = complex_mode
Module.__init__(self)
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the LEAKY RELU activation *[batch_shape, d]*
"""
return ivy.leaky_relu(
x,
alpha=self._alpha,
complex_mode=self._complex_mode,
)
def _extra_repr(self) -> str:
return f"alpha={self._alpha}, complex_mode={self._complex_mode}"
class LogSoftmax(Module):
def __init__(
self,
axis: Optional[int] = -1,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
):
"""Apply the LOG SOFTMAX activation function.
Parameters
----------
axis
The dimension log_softmax would be performed on. The default is ``None``
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
"""
Module.__init__(self)
self._axis = axis
self._complex_mode = complex_mode
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the LOG SOFTMAX activation *[batch_shape, d]*
"""
return ivy.log_softmax(x, axis=self._axis, complex_mode=self._complex_mode)
def _extra_repr(self) -> str:
return f"axis={self._axis}, complex_mode={self._complex_mode}"
class Softmax(Module):
def __init__(
self,
axis: int = -1,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
):
"""Apply the SOFTMAX activation function.
Parameters
----------
axis
The axis which we apply softmax op on.
complex_mode
Specifies how to handle complex input. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
"""
Module.__init__(self)
self._axis = axis
self._complex_mode = complex_mode
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
axis
The dimension softmax would be performed on. The default is ``None``.
Returns
-------
ret
The outputs following the SOFTMAX activation *[batch_shape, d]*
"""
return ivy.softmax(x, axis=self._axis, complex_mode=self._complex_mode)
def _extra_repr(self) -> str:
return f"axis={self._axis}, complex_mode={self._complex_mode}"
class Softplus(Module):
def __init__(self, beta=1.0, threshold=None):
"""Apply the SOFTPLUS activation function."""
Module.__init__(self)
self._beta = beta
self._threshold = threshold
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
beta
The beta value for the softplus formation. Default: ``None``.
threshold
values above this revert to a linear function. Default: ``None``.
Returns
-------
ret
The outputs following the SOFTPLUS activation *[batch_shape, d]*
"""
return ivy.softplus(x, beta=self._beta, threshold=self._threshold)
def _extra_repr(self) -> str:
return f"beta={self._beta}, threshold={self._threshold}"
class Mish(Module):
def __init__(self):
"""Apply the MISH activation function."""
Module.__init__(self)
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the MISH activation *[batch_shape, d]*
"""
return ivy.mish(x)
class SiLU(Module):
def __init__(self):
"""Apply the SiLU activation function."""
Module.__init__(self)
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the SiLU activation *[batch_shape, d]*
"""
return ivy.silu(x)
class Sigmoid(Module):
def __init__(self, complex_mode: Literal["split", "magnitude", "jax"] = "jax"):
"""Apply the SIGMOID activation function.
Parameter
----------
complex_mode
Specifies how to handle complex input. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
"""
self._complex_mode = complex_mode
Module.__init__(self)
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the SIGMOID activation *[batch_shape, d]*
"""
return ivy.sigmoid(x, complex_mode=self._complex_mode)
def _extra_repr(self) -> str:
return f"complex_mode={self._complex_mode}"
class Tanh(Module):
def __init__(self, complex_mode: Literal["split", "magnitude", "jax"] = "jax"):
"""Apply the TANH activation function.
Parameters
----------
complex_mode
Specifies how to handle complex input. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
"""
self._complex_mode = complex_mode
Module.__init__(self)
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the TANH activation *[batch_shape, d]*
"""
return ivy.tanh(x, complex_mode=self._complex_mode)
def _extra_repr(self) -> str:
return f"complex_mode={self._complex_mode}"
class ReLU6(Module):
def __init__(self, complex_mode: Literal["split", "magnitude", "jax"] = "jax"):
"""Apply the TANH activation function.
Parameters
----------
complex_mode
Specifies how to handle complex input. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
"""
self._complex_mode = complex_mode
Module.__init__(self)
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the RELU6 activation *[batch_shape, d]*
"""
return ivy.relu6(x, complex_mode=self._complex_mode)
def _extra_repr(self) -> str:
return f"complex_mode={self._complex_mode}"
class Hardswish(Module):
def __init__(self, complex_mode: Literal["split", "magnitude", "jax"] = "jax"):
"""Apply the HARDSWISH activation function.
Parameters
----------
complex_mode
Specifies how to handle complex input. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
"""
self._complex_mode = complex_mode
Module.__init__(self)
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the HARDSWISH activation *[batch_shape, d]*
"""
return ivy.hardswish(x, complex_mode=self._complex_mode)
def _extra_repr(self) -> str:
return f"complex_mode={self._complex_mode}"
class Logit(Module):
def __init__(
self,
eps=None,
complex_mode="jax",
):
"""Apply the LOGIT activation function.
Parameters
----------
eps
The epsilon value for the logit formation. Default: ``None``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
"""
Module.__init__(self)
self._eps = eps
self._complex_mode = complex_mode
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the LOGIT activation *[batch_shape, d]*
"""
return ivy.logit(
x,
eps=self._eps,
complex_mode=self._complex_mode,
)
def _extra_repr(self) -> str:
return f"eps={self._eps}, complex_mode={self._complex_mode}"
class PReLU(Module):
def __init__(self, slope):
"""Apply the PRELU activation function."""
Module.__init__(self)
self._slope = slope
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
slope
The slope value for the prelu formation.
Returns
-------
ret
The outputs following the PRELU activation *[batch_shape, d]*
"""
return ivy.prelu(x, self._slope)
def _extra_repr(self) -> str:
return f"slope={self._slope}"
class SeLU(Module):
def __init__(self):
"""Apply the SELU activation function."""
Module.__init__(self)
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the SELU activation *[batch_shape, d]*
"""
return ivy.selu(x)
class ELU(Module):
def __init__(self, alpha=1.0):
"""Apply the ELU activation function."""
Module.__init__(self)
self._alpha = alpha
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
alpha
scaler for controlling the slope of the function for x <= 0 Default: 1.0
Returns
-------
ret
The outputs following the ELU activation *[batch_shape, d]*
"""
return ivy.elu(x, alpha=self._alpha)
def _extra_repr(self) -> str:
return f"alpha={self._alpha}"
class LogSigmoid(Module):
def __init__(self, complex_mode: Literal["split", "magnitude", "jax"] = "jax"):
"""Apply the LogSigmoid activation function.
Parameter
----------
complex_mode
Specifies how to handle complex input. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
"""
self._complex_mode = complex_mode
Module.__init__(self)
def _forward(self, x):
"""
Parameters
----------
x
Inputs to process *[batch_shape, d]*.
Returns
-------
ret
The outputs following the LogSigmoid activation *[batch_shape, d]*
"""
return ivy.logsigmoid(x, complex_mode=self._complex_mode)
def _extra_repr(self) -> str:
return f"complex_mode={self._complex_mode}"
| ivy/ivy/stateful/activations.py/0 | {
"file_path": "ivy/ivy/stateful/activations.py",
"repo_id": "ivy",
"token_count": 6881
} | 56 |
import os
import re
from types import ModuleType, FunctionType
import logging
import importlib
import ivy
from ivy.func_wrapper import _wrap_function
from ivy.utils.exceptions import IvyException
_backends_subpackage_path = "ivy.functional.backends"
_sub_backend_dict = {}
_backend_to_sub_backends_dict = {}
# version specific sub-backend setting
def set_sub_backend_to_specific_version(sub_backend):
f = str(sub_backend.__name__)
f_sub = f[f.index("sub_backends") + 13 :]
f_back = f[f.index("backends") + 9 : f.index(".sub_backends")]
f_sub = importlib.import_module(f_sub)
f_back = importlib.import_module(f_back)
f_sub_version = f_sub.__version__
f_back_version = f_back.__version__
for key in list(sub_backend.__dict__):
if "_v_" in key:
orig_name = fn_name_from_version_specific_fn_name_sub_backend(
key, f_sub_version, f_back_version
)
if orig_name:
sub_backend.__dict__[orig_name] = sub_backend.__dict__[key]
sub_backend.__dict__[orig_name].__name__ = orig_name
def fn_name_from_version_specific_fn_name(name, version):
"""
Parameters
----------
name
the version specific name of the function for which the version support
is to be provided.
version
the version of the current framework for which the support is to be
provided, the version is inferred by importing the framework
Returns
-------
the name of the original function which will then point to the version
specific function
"""
# TODO: add tests
version = str(version)
if "+" in version:
version = tuple(map(int, version[: version.index("+")].split(".")))
else:
version = tuple(map(int, version.split(".")))
if "_to_" in name:
i = name.index("_v_")
e = name.index("_to_")
version_start = name[i + 3 : e]
version_start = tuple(map(int, version_start.split("p")))
version_end = name[e + 4 :]
version_end = tuple(map(int, version_end.split("p")))
if version_start <= version <= version_end:
return name[0:i]
elif "_and_above" in name:
i = name.index("_v_")
e = name.index("_and_")
version_start = name[i + 3 : e]
version_start = tuple(map(int, version_start.split("p")))
if version >= version_start:
return name[0:i]
else:
i = name.index("_v_")
e = name.index("_and_")
version_start = name[i + 3 : e]
version_start = tuple(map(int, version_start.split("p")))
if version <= version_start:
return name[0:i]
def fn_name_from_version_specific_fn_name_sub_backend(
name, sub_backend_version, backend_version
):
"""
Parameters
----------
name
the version specific name of the function for which the version support
is to be provided.
version
the version of the current framework for which the support is to be
provided, the version is inferred by importing the framework
Returns
-------
the name of the original function which will then point to the version
specific function
"""
# TODO: add tests
sub_version = str(sub_backend_version)
back_version = str(backend_version)
if "+" in sub_version:
sub_version = tuple(map(int, sub_version[: sub_version.index("+")].split(".")))
else:
sub_version = tuple(map(int, sub_version.split(".")))
if "+" in back_version:
back_version = tuple(
map(int, back_version[: back_version.index("+")].split("."))
)
else:
back_version = tuple(map(int, back_version.split(".")))
v_occurences = [m.start() for m in re.finditer("_v_", name)]
fn_name_1 = name[: v_occurences[1] + 3]
fn_name_2 = name[: v_occurences[0]] + name[v_occurences[1] :]
ret_1 = fn_name_from_version_specific_fn_name(fn_name_1, sub_backend_version)
ret_2 = fn_name_from_version_specific_fn_name(fn_name_2, backend_version)
if ret_1 == ret_2:
return name[: v_occurences[0]]
# dynamic sub_backend detection
for backend in os.listdir(
os.path.join(
ivy.__path__[0].rpartition(os.path.sep)[0], # type: ignore
_backends_subpackage_path.replace(".", os.path.sep),
)
):
if not backend[0].isalpha():
continue
sub_backends_dir = os.path.join(
ivy.__path__[0].rpartition(os.path.sep)[0],
_backends_subpackage_path.replace(".", os.path.sep),
backend,
"sub_backends",
)
for sub_backend in os.listdir(sub_backends_dir):
if not sub_backend[0].isalpha():
continue
_sub_backend_dict[sub_backend] = (
f"{_backends_subpackage_path}.{backend}.sub_backends.{sub_backend}"
)
try:
_backend_to_sub_backends_dict[backend].append(sub_backend)
except KeyError:
_backend_to_sub_backends_dict[backend] = [sub_backend]
_all_sub_backends = []
for v in _backend_to_sub_backends_dict.values():
_all_sub_backends.extend(v)
original_backend_dict = None
def set_sub_backend(sub_backend_str: str):
if ivy.backend == "":
logging.warning("You must set a backend first")
return
if ivy.current_backend_str() not in _backend_to_sub_backends_dict:
logging.warning(
f"backend {ivy.current_backend_str()} does not have any"
" supported sub_backends"
)
return
if sub_backend_str not in _all_sub_backends:
raise IvyException(
"sub_backend must be one from"
f" {_backend_to_sub_backends_dict[ivy.current_backend_str()]}"
)
if sub_backend_str not in _backend_to_sub_backends_dict[ivy.current_backend_str()]:
logging.warning(
f"{ivy.current_backend_str()} does not support"
f" {sub_backend_str} as a sub_backend"
)
return
if sub_backend_str in ivy.current_sub_backends:
return
global original_backend_dict
if original_backend_dict is None:
original_backend_dict = ivy.__dict__.copy()
sub_backend = importlib.import_module(_sub_backend_dict[sub_backend_str])
set_sub_backend_to_specific_version(sub_backend)
_set_sub_backend_as_ivy(ivy.__dict__.copy(), ivy, sub_backend)
ivy.current_sub_backends.append(sub_backend_str)
# this is very similar to _set_backend_as_ivy in handler.py, with a minor change
def _set_sub_backend_as_ivy(
original: dict, target: ModuleType, sub_backend: ModuleType
):
backend_str = ivy.current_backend_str()
for k, v in original.items():
if k not in sub_backend.__dict__ and not k.startswith("__"):
target.__dict__[k] = v
if (
k in sub_backend.__dict__
and not k.startswith("__")
and isinstance(v, FunctionType)
):
target.__dict__[k] = _wrap_function(
key=k, to_wrap=sub_backend.__dict__[k], original=v, compositional=False
)
elif (
k in sub_backend.__dict__
and not k.startswith("__")
and isinstance(v, ModuleType)
):
# we are creating a module to avoid inplace updating
# the sub_backends dict's modules, this happens when
# unsetting the sub_backend as we partially update the modules
mod = ModuleType(k)
mod.__name__ = v.__name__
mod.__file__ = v.__file__
target.__dict__[k] = mod
if (
isinstance(v, ModuleType)
and "ivy.functional." in v.__name__
and os.path.join("{}", "__init__.py").format(backend_str) not in v.__file__
and k in sub_backend.__dict__
):
_set_sub_backend_as_ivy(
v.__dict__,
target.__dict__[k],
sub_backend.__dict__[k],
)
def unset_sub_backend(sub_backend_str: str):
if sub_backend_str not in ivy.current_sub_backends:
return
global original_backend_dict
# The sub-backend is cached so this is fast
sub_backend = importlib.import_module(_sub_backend_dict[sub_backend_str])
_unset_sub_backend_from_ivy(
original_backend_dict, ivy, sub_backend, sub_backend.name
)
ivy.current_sub_backends.remove(sub_backend_str)
def _unset_sub_backend_from_ivy(
original: dict, target: ModuleType, sub_backend: ModuleType, sub_backend_str: str
):
backend_str = ivy.current_backend_str()
for k, v in sub_backend.__dict__.items():
if k in target.__dict__:
if (
isinstance(v, FunctionType)
and sub_backend_str in f"sub_backends.{sub_backend_str}" in v.__module__
):
target.__dict__[k] = original[k]
if (
isinstance(v, ModuleType)
and "ivy.functional." in v.__name__
and os.path.join("{}", "__init__.py").format(backend_str)
not in v.__file__
):
_unset_sub_backend_from_ivy(
original[k].__dict__,
target.__dict__[k],
sub_backend.__dict__[k],
sub_backend_str,
)
def clear_sub_backends():
if ivy.current_sub_backends:
ivy.__dict__.update(original_backend_dict)
ivy.current_sub_backends.clear()
# This is only used in set_backend in handler.py
def _clear_current_sub_backends():
global original_backend_dict
original_backend_dict = None
if ivy.current_sub_backends:
ivy.current_sub_backends.clear()
def find_available_sub_backends(sub_backends_loc):
available_sub_backends = []
for sub_backend in os.listdir(sub_backends_loc):
if sub_backend.startswith("__") or not os.path.isdir(
os.path.join(sub_backends_loc, sub_backend)
):
continue
elif importlib.util.find_spec(sub_backend):
available_sub_backends.append(sub_backend)
return available_sub_backends
| ivy/ivy/utils/backend/sub_backend_handler.py/0 | {
"file_path": "ivy/ivy/utils/backend/sub_backend_handler.py",
"repo_id": "ivy",
"token_count": 4711
} | 57 |
# global
import sys
import importlib
from ivy_tests.test_ivy.helpers.hypothesis_helpers.array_helpers import (
array_helpers_dtype_info_helper,
)
from ivy_tests.test_ivy.helpers.hypothesis_helpers.dtype_helpers import (
_get_type_dict_helper,
cast_filter_helper,
)
# local
from .testing_helpers import (
_get_supported_devices_dtypes_helper,
_get_method_supported_devices_dtypes_helper,
num_positional_args_helper,
)
from .function_testing import (
test_function_backend_computation,
test_function_ground_truth_computation,
test_method_backend_computation,
test_method_ground_truth_computation,
test_gradient_backend_computation,
test_gradient_ground_truth_computation,
_transpile_if_required_backend,
)
framework_path = "/opt/fw/"
def backend_proc(input_queue, output_queue):
# first argument is going to be the framework and its path
framework = input_queue.get()
path = framework_path + framework
sys.path.insert(1, path)
framework = framework.split("/")[0]
framework = importlib.import_module(framework)
# if jax, do more stuff
if framework.__name__ == "jax":
framework.config.update("jax_enable_x64", True)
while True:
# subsequent arguments will be passed
data = input_queue.get()
if data[0] == "supported dtypes":
# stage 1, calculating and returning supported dtypes
# of each backend
pass
_, fn_module, fn_name, b = data
output_queue.put(
_get_supported_devices_dtypes_helper(b, fn_module, fn_name)
)
elif data[0] == "method supported dtypes":
# again stage 1, calculating and returning supported dtypes
_, method_name, class_module, class_name, backend_str = data
# since class module is name, we will import it to make it a module
class_module = importlib.import_module(class_module)
organized_dtypes = _get_method_supported_devices_dtypes_helper(
method_name, class_module, class_name, backend_str
)
output_queue.put(organized_dtypes)
elif data[0] == "dtype_info_helper":
_, backend, kind_dtype, dtype = data
dtype_info = array_helpers_dtype_info_helper(backend, kind_dtype, dtype)
output_queue.put(dtype_info)
elif data[0] == "_get_type_dict_helper":
_, framework, kind, is_frontend_test = data
dtype_ret = _get_type_dict_helper(framework, kind, is_frontend_test)
output_queue.put(dtype_ret)
elif data[0] == "num_positional_args_helper":
_, fn_name, framework = data
dtype_ret = num_positional_args_helper(fn_name, framework)
output_queue.put(dtype_ret)
elif data[0] == "cast_filter_helper":
_, d, dtype, x, current_backend = data
dtype_ret = cast_filter_helper(d, dtype, x, current_backend)
output_queue.put(dtype_ret)
elif data[0] == "function_backend_computation":
# it's the backend return computation
_, fw, test_flags, all_as_kwargs_np, input_dtypes, on_device, fn_name = data
(
ret_from_target,
ret_np_flat_from_target,
ret_device,
args_np,
arg_np_arrays,
arrays_args_indices,
kwargs_np,
arrays_kwargs_indices,
kwarg_np_arrays,
test_flags,
input_dtypes,
) = test_function_backend_computation(
fw, test_flags, all_as_kwargs_np, input_dtypes, on_device, fn_name
)
# ret_from_target to be none, because main process has
# framework imports blocked
output_queue.put(
(
(None),
ret_np_flat_from_target,
ret_device,
args_np,
arg_np_arrays,
arrays_args_indices,
kwargs_np,
arrays_kwargs_indices,
kwarg_np_arrays,
test_flags,
input_dtypes,
)
)
elif data[0] == "function_ground_truth_computation":
# it's the ground_truth return computation
(
_,
ground_truth_backend,
on_device,
args_np,
arg_np_arrays,
arrays_args_indices,
kwargs_np,
arrays_kwargs_indices,
kwarg_np_arrays,
input_dtypes,
test_flags,
fn_name,
) = data
(
ret_from_gt,
ret_np_from_gt_flat,
ret_from_gt_device,
test_flags,
fw_list,
) = test_function_ground_truth_computation(
ground_truth_backend,
on_device,
args_np,
arg_np_arrays,
arrays_args_indices,
kwargs_np,
arrays_kwargs_indices,
kwarg_np_arrays,
input_dtypes,
test_flags,
fn_name,
)
# ret_from gt is none because main process has frameworks is None
output_queue.put(
(
(None),
ret_np_from_gt_flat,
ret_from_gt_device,
test_flags,
fw_list,
)
)
elif data[0] == "gradient_backend_computation":
# gradient testing , part where it uses the backend
(
_,
backend_to_test,
args_np,
arg_np_vals,
args_idxs,
kwargs_np,
kwarg_np_vals,
kwargs_idxs,
input_dtypes,
test_flags,
on_device,
fn,
test_trace,
xs_grad_idxs,
ret_grad_idxs,
) = data
grads_np_flat = test_gradient_backend_computation(
backend_to_test,
args_np,
arg_np_vals,
args_idxs,
kwargs_np,
kwarg_np_vals,
kwargs_idxs,
input_dtypes,
test_flags,
on_device,
fn,
test_trace,
xs_grad_idxs,
ret_grad_idxs,
)
output_queue.put(grads_np_flat)
elif data[0] == "gradient_ground_truth_computation":
# gradient testing, part where it uses ground truth
(
_,
ground_truth_backend,
on_device,
fn,
input_dtypes,
all_as_kwargs_np,
args_np,
arg_np_vals,
args_idxs,
kwargs_np,
kwarg_np_vals,
test_flags,
kwargs_idxs,
test_trace,
xs_grad_idxs,
ret_grad_idxs,
) = data
grads_np_from_gt_flat = test_gradient_ground_truth_computation(
ground_truth_backend,
on_device,
fn,
input_dtypes,
all_as_kwargs_np,
args_np,
arg_np_vals,
args_idxs,
kwargs_np,
kwarg_np_vals,
test_flags,
kwargs_idxs,
test_trace,
xs_grad_idxs,
ret_grad_idxs,
)
output_queue.put(grads_np_from_gt_flat)
elif data[0] == "method_backend_computation":
(
_,
init_input_dtypes,
init_flags,
backend_to_test,
init_all_as_kwargs_np,
on_device,
method_input_dtypes,
method_flags,
method_all_as_kwargs_np,
class_name,
method_name,
init_with_v,
test_trace,
method_with_v,
) = data
(
ret,
ret_np_flat,
ret_device,
org_con_data,
args_np_method,
met_arg_np_vals,
met_args_idxs,
kwargs_np_method,
met_kwarg_np_vals,
met_kwargs_idxs,
v_np,
fw_list,
) = test_method_backend_computation(
init_input_dtypes,
init_flags,
backend_to_test,
init_all_as_kwargs_np,
on_device,
method_input_dtypes,
method_flags,
method_all_as_kwargs_np,
class_name,
method_name,
init_with_v,
test_trace,
method_with_v,
)
# ret is none here, because main process doesn't import framework
output_queue.put(
(
(None),
ret_np_flat,
ret_device,
org_con_data,
args_np_method,
met_arg_np_vals,
met_args_idxs,
kwargs_np_method,
met_kwarg_np_vals,
met_kwargs_idxs,
v_np,
fw_list,
)
)
elif data[0] == "method_ground_truth_computation":
(
_,
ground_truth_backend,
on_device,
org_con_data,
args_np_method,
met_arg_np_vals,
met_args_idxs,
kwargs_np_method,
met_kwarg_np_vals,
met_kwargs_idxs,
method_input_dtypes,
method_flags,
class_name,
method_name,
test_trace,
v_np,
) = data
(
ret_from_gt,
ret_np_from_gt_flat,
ret_from_gt_device,
fw_list2,
) = test_method_ground_truth_computation(
ground_truth_backend,
on_device,
org_con_data,
args_np_method,
met_arg_np_vals,
met_args_idxs,
kwargs_np_method,
met_kwarg_np_vals,
met_kwargs_idxs,
method_input_dtypes,
method_flags,
class_name,
method_name,
test_trace,
v_np,
)
# ret from gt None here, because main process doesn't import framework
output_queue.put(
((None), ret_np_from_gt_flat, ret_from_gt_device, fw_list2)
)
if data[0] == "transpile_if_required_backend":
_, backend, fn_name, args_np, kwargs_np = data
_transpile_if_required_backend(backend, fn_name, args_np, kwargs_np)
if not data:
break
# process the data
# TODO incomplete
def frontend_proc(input_queue, output_queue):
# first argument is going to be the framework and its path
framework = input_queue.get()
sys.path.insert(1, f"{framework_path}{framework}")
importlib.import_module(framework.split("/")[0])
while True:
# subsequent arguments will be passed
data = input_queue.get()
if not data:
break
# process the data
| ivy/ivy_tests/test_ivy/helpers/multiprocessing.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/helpers/multiprocessing.py",
"repo_id": "ivy",
"token_count": 7337
} | 58 |
from .base import FrontendConfigWithBackend
def get_config():
return TorchFrontendConfig()
class TorchFrontendConfig(FrontendConfigWithBackend):
backend_str = "torch"
| ivy/ivy_tests/test_ivy/test_frontends/config/torch.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/config/torch.py",
"repo_id": "ivy",
"token_count": 55
} | 59 |
import pytest
@pytest.fixture(scope="session")
def frontend():
return "mindspore"
| ivy/ivy_tests/test_ivy/test_frontends/test_mindspore/conftest.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_mindspore/conftest.py",
"repo_id": "ivy",
"token_count": 32
} | 60 |
# global
from numpy import mgrid as np_mgrid, ogrid as np_ogrid
from hypothesis import strategies as st
import ivy
# local
from ivy.functional.frontends.numpy import mgrid, ogrid
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test, handle_frontend_method
# --- Helpers --- #
# --------------- #
@st.composite
def _get_dtype_and_range(draw):
dim = draw(helpers.ints(min_value=2, max_value=5))
dtype = draw(helpers.get_dtypes("float", index=1, full=False))
start = draw(
helpers.array_values(dtype=dtype[0], shape=(dim,), min_value=-50, max_value=0)
)
stop = draw(
helpers.array_values(dtype=dtype[0], shape=(dim,), min_value=1, max_value=50)
)
return dtype * 2, start, stop
# helpers
@st.composite
def _get_range_for_grid(draw):
start = draw(st.booleans())
step = draw(st.booleans())
if start:
start = draw(helpers.ints(min_value=-25, max_value=25))
stop = draw(st.booleans())
if stop:
stop = draw(helpers.ints(min_value=30, max_value=100))
else:
stop = None
else:
start = None
stop = draw(helpers.ints(min_value=30, max_value=100))
if step:
step = draw(helpers.ints(min_value=1, max_value=5))
return start, stop, step
return start, stop, None
# --- Main --- #
# ------------ #
# arange
@handle_frontend_test(
fn_tree="numpy.arange",
start=helpers.ints(min_value=-50, max_value=0),
stop=helpers.ints(min_value=1, max_value=50),
step=helpers.ints(min_value=1, max_value=5),
dtype=helpers.get_dtypes("float"),
test_with_out=st.just(False),
)
def test_numpy_arange(
start,
stop,
step,
dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=[ivy.as_ivy_dtype("int8")],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
start=start,
stop=stop,
step=step,
dtype=dtype[0],
)
@handle_frontend_test(
fn_tree="numpy.geomspace",
dtype_start_stop=_get_dtype_and_range(),
num=helpers.ints(min_value=5, max_value=50),
endpoint=st.booleans(),
test_with_out=st.just(False),
)
def test_numpy_geomspace(
dtype_start_stop,
num,
endpoint,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, start, stop = dtype_start_stop
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-1,
start=start,
stop=stop,
num=num,
endpoint=endpoint,
dtype=input_dtypes[0],
)
# linspace
@handle_frontend_test(
fn_tree="numpy.linspace",
dtype_start_stop=_get_dtype_and_range(),
num=helpers.ints(min_value=2, max_value=5),
axis=helpers.ints(min_value=-1, max_value=0),
test_with_out=st.just(False),
)
def test_numpy_linspace(
dtype_start_stop,
num,
axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, start, stop = dtype_start_stop
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
start=start,
stop=stop,
num=num,
endpoint=True,
retstep=False,
dtype=input_dtypes[0],
axis=axis,
)
# logspace
@handle_frontend_test(
fn_tree="numpy.logspace",
dtype_start_stop=_get_dtype_and_range(),
num=helpers.ints(min_value=5, max_value=50),
base=helpers.ints(min_value=2, max_value=10),
axis=helpers.ints(min_value=-1, max_value=0),
test_with_out=st.just(False),
)
def test_numpy_logspace(
dtype_start_stop,
num,
base,
axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, start, stop = dtype_start_stop
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
start=start,
stop=stop,
num=num,
endpoint=True,
base=base,
dtype=input_dtypes[0],
axis=axis,
)
# meshgrid
@handle_frontend_test(
fn_tree="numpy.meshgrid",
dtype_and_arrays=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=st.integers(min_value=1, max_value=4),
min_num_dims=1,
max_num_dims=1,
min_dim_size=1,
shared_dtype=True,
),
copy=st.booleans(),
sparse=st.booleans(),
indexing=st.sampled_from(["xy", "ij"]),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_numpy_meshgrid(
*,
dtype_and_arrays,
copy,
sparse,
indexing,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, arrays = dtype_and_arrays
kw = {}
i = 0
for x_ in arrays:
kw[f"x{i}"] = x_
i += 1
test_flags.num_positional_args = len(arrays)
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
**kw,
copy=copy,
sparse=sparse,
indexing=indexing,
)
# mgrid
@handle_frontend_method(
class_tree="ivy.functional.frontends.numpy.mgrid",
init_tree="numpy.mgrid",
method_name="__getitem__",
range=_get_range_for_grid(),
)
def test_numpy_mgrid(
range,
class_,
method_name,
backend_fw,
frontend,
):
start, stop, step = range
if start and stop and step:
ret = mgrid[start:stop:step]
ret_np = np_mgrid[start:stop:step]
elif start and step:
ret = mgrid[start::step]
ret_np = np_mgrid[start::step]
elif stop and step:
ret = mgrid[:stop:step]
ret_np = np_mgrid[:stop:step]
elif start and stop:
ret = mgrid[start:stop]
ret_np = np_mgrid[start:stop]
elif start:
ret = mgrid[start:]
ret_np = np_mgrid[start:]
else:
ret = mgrid[:stop]
ret_np = np_mgrid[:stop]
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=frontend)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_np,
rtol=1e-03,
backend=backend_fw,
ground_truth_backend=frontend,
)
# ogrid
@handle_frontend_method(
class_tree="ivy.functional.frontends.numpy.ogrid",
init_tree="numpy.ogrid",
method_name="__getitem__",
range=_get_range_for_grid(),
)
def test_numpy_ogrid(range, class_, method_name, backend_fw, frontend):
start, stop, step = range
if start and stop and step:
ret = ogrid[start:stop:step]
ret_np = np_ogrid[start:stop:step]
elif start and step:
ret = ogrid[start::step]
ret_np = np_ogrid[start::step]
elif stop and step:
ret = ogrid[:stop:step]
ret_np = np_ogrid[:stop:step]
elif start and stop:
ret = ogrid[start:stop]
ret_np = np_ogrid[start:stop]
elif start:
ret = ogrid[start:]
ret_np = np_ogrid[start:]
else:
ret = ogrid[:stop]
ret_np = np_ogrid[:stop]
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=frontend)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_np,
rtol=1e-03,
backend=backend_fw,
ground_truth_backend=frontend,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_creation_routines/test_numerical_ranges.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_creation_routines/test_numerical_ranges.py",
"repo_id": "ivy",
"token_count": 4015
} | 61 |
# global
from hypothesis import assume, strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_elementwise import ( # noqa
_float_power_helper,
)
# add
@handle_frontend_test(
fn_tree="numpy.add",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="add"
),
)
def test_numpy_add(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where, input_dtype=input_dtypes, test_flags=test_flags
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# divide
@handle_frontend_test(
fn_tree="numpy.divide",
aliases=["numpy.true_divide"],
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="divide"
),
)
def test_numpy_divide(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
assume(not np.any(np.isclose(xs[1], 0.0)))
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-03,
rtol=1e-03,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# divmod
@handle_frontend_test(
fn_tree="numpy.divmod",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=6,
safety_factor_scale="linear",
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
test_with_out=st.just(False),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="divmod"
),
)
def test_numpy_divmod(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
assume(not np.any(np.isclose(xs[1], 0)))
if dtype:
assume(np.dtype(dtype) >= np.dtype(input_dtypes[0]))
assume(np.dtype(dtype) >= np.dtype(input_dtypes[1]))
assume(not np.any(np.isclose(xs[1].astype(dtype), 0)))
assume("uint" not in input_dtypes[0] and "uint" not in input_dtypes[1])
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
)
# float_power
@handle_frontend_test(
fn_tree="numpy.float_power",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[lambda: _float_power_helper()],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="float_power"
),
)
def test_numpy_float_power(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
xs = list(xs[0])
input_dtypes = list(input_dtypes[0])
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
# removing casting options as they raise errors for this function
assume(casting == "same_kind")
assume(dtype != "bool")
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# floor_divide
@handle_frontend_test(
fn_tree="numpy.floor_divide",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=4,
shared_dtype=True,
safety_factor_scale="linear",
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="floor_divide"
),
)
def test_numpy_floor_divide(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
assume(not np.any(np.isclose(x[1], 0, rtol=1e-1, atol=1e-1)))
assume(not np.any(np.isclose(x[0], 0, rtol=1e-1, atol=1e-1)))
if dtype:
assume(np.dtype(dtype) >= np.dtype(input_dtypes[0]))
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
atol=1e-2,
rtol=1e-2,
)
@handle_frontend_test(
fn_tree="numpy.fmod",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
large_abs_safety_factor=6,
small_abs_safety_factor=6,
safety_factor_scale="log",
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="fmod"
),
)
def test_numpy_fmod(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
assume(not np.any(np.isclose(xs[1], 0.0)))
assume(not np.any(np.isclose(xs[0], 0.0)))
if dtype:
assume(not np.any(np.isclose(xs[1].astype(dtype), 0.0)))
assume(not np.any(np.isclose(xs[0].astype(dtype), 0.0)))
assume("uint" not in input_dtypes[0] and "uint" not in input_dtypes[1])
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# mod
@handle_frontend_test(
fn_tree="numpy.mod",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
min_value=0,
exclude_min=True,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="mod"
),
)
def test_numpy_mod(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
rtol=1e-5,
atol=1e-5,
)
# modf
@handle_frontend_test(
fn_tree="numpy.modf",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_value=0,
exclude_min=True,
)
],
),
where=np_frontend_helpers.where(),
test_with_out=st.just(False),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="modf"
),
)
def test_numpy_modf(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# multiply
@handle_frontend_test(
fn_tree="numpy.multiply",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="multiply"
),
)
def test_numpy_multiply(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# negative
@handle_frontend_test(
fn_tree="numpy.negative",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="negative"
),
)
def test_numpy_negative(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# positive
@handle_frontend_test(
fn_tree="numpy.positive",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="positive"
),
)
def test_numpy_positive(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# power
@handle_frontend_test(
fn_tree="numpy.power",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
min_value=0,
max_value=7,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="power"
),
)
def test_numpy_power(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# reciprocal
@handle_frontend_test(
fn_tree="numpy.reciprocal",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
small_abs_safety_factor=4,
large_abs_safety_factor=4,
safety_factor_scale="log",
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="reciprocal"
),
)
def test_numpy_reciprocal(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
assume(not np.any(np.isclose(x[0], 0)))
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
x=x[0],
out=None,
where=where,
casting=casting,
rtol=1e-2,
atol=1e-2,
order="K",
dtype=dtype,
subok=True,
)
# remainder
@handle_frontend_test(
fn_tree="numpy.remainder",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="remainder"
),
)
def test_numpy_remainder(
dtypes_values_casting,
where,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
assume(not np.any(np.isclose(xs[1], 0.0)))
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# subtract
@handle_frontend_test(
fn_tree="numpy.subtract",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="subtract"
),
)
def test_numpy_subtract(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# vdot
@handle_frontend_test(
fn_tree="numpy.vdot",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2
),
test_with_out=st.just(False),
)
def test_numpy_vdot(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
a=xs[0],
b=xs[1],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_arithmetic_operations.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_arithmetic_operations.py",
"repo_id": "ivy",
"token_count": 11196
} | 62 |
import pytest
@pytest.fixture(scope="session")
def frontend():
return "onnx"
| ivy/ivy_tests/test_ivy/test_frontends/test_onnx/conftest.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_onnx/conftest.py",
"repo_id": "ivy",
"token_count": 32
} | 63 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
@handle_frontend_test(
fn_tree="tensorflow.nest.flatten",
dtype_and_x=helpers.dtype_and_values(
min_num_dims=2,
max_num_dims=5,
min_dim_size=3,
max_dim_size=5,
),
expand_composites=st.booleans(),
use_array=st.booleans(),
)
def test_tensorflow_flatten(
*,
dtype_and_x,
expand_composites,
use_array,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
structure=x[0] if use_array else x[0].tolist(),
expand_composites=expand_composites,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_nest.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_nest.py",
"repo_id": "ivy",
"token_count": 473
} | 64 |
# global
from hypothesis import settings, strategies as st
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers.testing_helpers import handle_frontend_test
import ivy.functional.frontends.torch as torch_frontend
# can_cast
@handle_frontend_test(
fn_tree="torch.can_cast",
from_=helpers.get_dtypes("valid", full=False),
to=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
number_positional_args=st.just(2),
)
# there are 100 combinations of dtypes, so run 200 examples to make sure all are tested
@settings(max_examples=200)
def test_torch_can_cast(
*,
from_,
to,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=[],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
from_=ivy.Dtype(from_[0]),
to=ivy.Dtype(to[0]),
)
# promote_types
@handle_frontend_test(
fn_tree="torch.promote_types",
type1=helpers.get_dtypes("valid", full=False),
type2=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
# there are 100 combinations of dtypes, so run 200 examples to make sure all are tested
@settings(max_examples=200)
def test_torch_promote_types(
*,
type1,
type2,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=[],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
type1=type1[0],
type2=type2[0],
)
assert ret == repr(frontend_ret[0]).split(".")[1]
# set_default_dtype
@handle_frontend_test(
fn_tree="torch.set_default_dtype",
dtype=helpers.get_dtypes("float", full=False),
)
def test_torch_set_default_dtype(
*,
dtype,
):
dtype = dtype[0]
torch_frontend.set_default_dtype(dtype)
assert torch_frontend.get_default_dtype() == dtype
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_dtype.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_dtype.py",
"repo_id": "ivy",
"token_count": 964
} | 65 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
import math
def calculate_same_padding(kernel_size, stride, shape):
padding = tuple(
max(
0,
math.ceil(((shape[i] - 1) * stride[i] + kernel_size[i] - shape[i]) / 2),
)
for i in range(len(kernel_size))
)
if all(kernel_size[i] / 2 >= padding[i] for i in range(len(kernel_size))):
if is_same_padding(padding, stride, kernel_size, shape):
return padding
return [0] * len(shape)
def is_same_padding(padding, stride, kernel_size, input_shape):
output_shape = tuple(
(input_shape[i] + 2 * padding[i] - kernel_size[i]) // stride[i] + 1
for i in range(len(padding))
)
return all(
output_shape[i] == math.ceil(input_shape[i] / stride[i])
for i in range(len(padding))
)
# adaptive_avg_pool1d
@handle_frontend_test(
fn_tree="torch.nn.functional.adaptive_avg_pool1d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=3,
min_dim_size=5,
max_value=100,
min_value=-100,
),
output_size=helpers.ints(min_value=1, max_value=10),
test_with_out=st.just(False),
)
def test_torch_adaptive_avg_pool1d(
*,
dtype_and_x,
output_size,
on_device,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
output_size=output_size,
atol=1e-2,
)
# adaptive_avg_pool2d
@handle_frontend_test(
fn_tree="torch.nn.functional.adaptive_avg_pool2d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=3,
max_num_dims=4,
min_dim_size=5,
max_value=100,
min_value=-100,
),
output_size=st.one_of(
st.tuples(
helpers.ints(min_value=1, max_value=10),
helpers.ints(min_value=1, max_value=10),
),
helpers.ints(min_value=1, max_value=10),
),
test_with_out=st.just(False),
)
def test_torch_adaptive_avg_pool2d(
*,
dtype_and_x,
output_size,
on_device,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
output_size=output_size,
atol=1e-2,
)
# adaptive_max_pool2d
@handle_frontend_test(
fn_tree="torch.nn.functional.adaptive_max_pool2d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=3,
max_num_dims=4,
min_dim_size=5,
# Setting max and min value because this operation in paddle is not
# numerically stable
max_value=100,
min_value=-100,
),
output_size=st.one_of(
st.tuples(
helpers.ints(min_value=1, max_value=10),
helpers.ints(min_value=1, max_value=10),
),
helpers.ints(min_value=1, max_value=10),
),
test_with_out=st.just(False),
)
def test_torch_adaptive_max_pool2d(
*,
dtype_and_x,
output_size,
on_device,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
output_size=output_size,
atol=1e-2,
)
@handle_frontend_test(
fn_tree="torch.nn.functional.adaptive_max_pool3d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=4,
max_num_dims=5,
min_dim_size=2,
max_value=100,
min_value=-100,
),
output_size=st.one_of(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=1, max_value=5),
),
helpers.ints(min_value=1, max_value=5),
),
test_with_out=st.just(False),
)
def test_torch_adaptive_max_pool3d(
*,
dtype_and_x,
output_size,
on_device,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
output_size=output_size,
)
# avg_pool1d
@handle_frontend_test(
fn_tree="torch.nn.functional.avg_pool1d",
dtype_x_k_s=helpers.arrays_for_pooling(
min_dims=3,
max_dims=3,
min_side=1,
max_side=3,
data_format="channel_first",
only_explicit_padding=True,
),
count_include_pad=st.booleans(),
ceil_mode=st.booleans(),
test_with_out=st.just(False),
)
def test_torch_avg_pool1d(
dtype_x_k_s,
count_include_pad,
ceil_mode,
*,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x, kernel_size, stride, padding = dtype_x_k_s
if not isinstance(padding, int):
padding = [pad[0] for pad in padding]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
kernel_size=kernel_size,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
# avg_pool2d
@handle_frontend_test(
fn_tree="torch.nn.functional.avg_pool2d",
dtype_x_k_s=helpers.arrays_for_pooling(
min_dims=4,
max_dims=4,
min_side=1,
max_side=4,
only_explicit_padding=True,
data_format="channel_first",
),
ceil_mode=st.booleans(),
count_include_pad=st.booleans(),
test_with_out=st.just(False),
)
def test_torch_avg_pool2d(
dtype_x_k_s,
count_include_pad,
ceil_mode,
*,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x, kernel_size, stride, padding = dtype_x_k_s
if not isinstance(padding, int):
padding = [pad[0] for pad in padding]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
kernel_size=kernel_size,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
divisor_override=None,
)
# avg_pool3d
@handle_frontend_test(
fn_tree="torch.nn.functional.avg_pool3d",
dtype_x_k_s=helpers.arrays_for_pooling(
min_dims=5,
max_dims=5,
min_side=2,
max_side=4,
only_explicit_padding=True,
data_format="channel_first",
),
count_include_pad=st.booleans(),
ceil_mode=st.booleans(),
divisor_override=st.just(None),
test_with_out=st.just(False),
)
def test_torch_avg_pool3d(
*,
dtype_x_k_s,
count_include_pad,
ceil_mode,
divisor_override,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x, kernel_size, stride, padding = dtype_x_k_s
if not isinstance(padding, int):
padding = [pad[0] for pad in padding]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
kernel_size=kernel_size,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
divisor_override=divisor_override,
)
# lp_pool1d
@handle_frontend_test(
fn_tree="torch.nn.functional.lp_pool1d",
dtype_x_k_s=helpers.arrays_for_pooling(
min_dims=3,
max_dims=3,
min_side=1,
max_side=3,
data_format="channel_first",
),
norm_type=helpers.ints(min_value=1, max_value=6),
test_with_out=st.just(False),
)
def test_torch_lp_pool1d(
dtype_x_k_s,
norm_type,
*,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x, kernel_size, stride, _ = dtype_x_k_s
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
norm_type=norm_type if norm_type > 0 else 1,
kernel_size=kernel_size[0],
stride=stride[0],
ceil_mode=False,
)
# lp_pool2d
@handle_frontend_test(
fn_tree="torch.nn.functional.lp_pool2d",
dtype_x_k_s=helpers.arrays_for_pooling(
min_dims=4,
max_dims=4,
min_side=1,
max_side=4,
data_format="channel_first",
),
norm_type=helpers.ints(min_value=1, max_value=6),
test_with_out=st.just(False),
)
def test_torch_lp_pool2d(
dtype_x_k_s,
norm_type,
*,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x, kernel_size, stride, _ = dtype_x_k_s
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
norm_type=norm_type if norm_type > 0 else 1,
kernel_size=kernel_size,
stride=stride[0],
ceil_mode=False,
)
# max_pool1d
@handle_frontend_test(
fn_tree="torch.nn.functional.max_pool1d",
dtype_x_k_s=helpers.arrays_for_pooling(
min_dims=3,
max_dims=3,
min_side=1,
max_side=3,
only_explicit_padding=True,
return_dilation=True,
data_format="channel_first",
),
test_with_out=st.just(False),
ceil_mode=st.booleans(),
)
def test_torch_max_pool1d(
dtype_x_k_s,
ceil_mode,
*,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, x, kernel, stride, padding, dilation = dtype_x_k_s
if not isinstance(padding, int):
padding = [pad[0] for pad in padding]
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
kernel_size=kernel,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
)
# max_pool2d
@handle_frontend_test(
fn_tree="torch.nn.functional.max_pool2d",
x_k_s_p=helpers.arrays_for_pooling(
min_dims=4,
max_dims=4,
min_side=1,
max_side=4,
only_explicit_padding=True,
return_dilation=True,
data_format="channel_first",
),
test_with_out=st.just(False),
ceil_mode=st.booleans(),
)
def test_torch_max_pool2d(
x_k_s_p,
ceil_mode,
*,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, x, kernel, stride, padding, dilation = x_k_s_p
if not isinstance(padding, int):
padding = [pad[0] for pad in padding]
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
kernel_size=kernel,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
)
# max_pool3d
@handle_frontend_test(
fn_tree="torch.nn.functional.max_pool3d",
x_k_s_p=helpers.arrays_for_pooling(
min_dims=5,
max_dims=5,
min_side=1,
max_side=5,
only_explicit_padding=True,
return_dilation=True,
data_format="channel_first",
),
test_with_out=st.just(False),
ceil_mode=st.booleans(),
)
def test_torch_max_pool3d(
x_k_s_p,
ceil_mode,
*,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, x, kernel, stride, padding, dilation = x_k_s_p
if not isinstance(padding, int):
padding = [pad[0] for pad in padding]
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
kernel_size=kernel,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_pooling_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_pooling_functions.py",
"repo_id": "ivy",
"token_count": 7049
} | 66 |
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_method
CLASS_TREE = "ivy.functional.frontends.xgboost.core.DMatrix"
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="xgboost.DMatrix",
method_name="num_col",
init_array=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(kind="valid"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
max_dim_size=5,
min_value=1,
max_value=10,
),
)
def test_xgboost_instance_num_col(
init_array,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
dtype, arr = init_array
helpers.test_frontend_method(
init_input_dtypes=dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": arr[0]},
method_input_dtypes=[],
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="xgboost.DMatrix",
method_name="num_row",
init_array=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(kind="valid"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
max_dim_size=5,
min_value=1,
max_value=10,
),
)
def test_xgboost_instance_num_row(
init_array,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
dtype, arr = init_array
helpers.test_frontend_method(
init_input_dtypes=dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": arr[0]},
method_input_dtypes=[],
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_xgboost/test_core.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_xgboost/test_core.py",
"repo_id": "ivy",
"token_count": 1026
} | 67 |
"""Collection of tests for sorting functions."""
# global
from hypothesis import strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
# --- Helpers --- #
# --------------- #
@st.composite
def _searchsorted_case1(draw):
# 1-D for x, N-D for v
dtype_x, x = draw(
helpers.dtype_and_values(
dtype=draw(helpers.get_dtypes("numeric", full=False, key="searchsorted")),
shape=(draw(st.integers(min_value=1, max_value=5)),),
)
)
dtype_v, v = draw(
helpers.dtype_and_values(
dtype=draw(helpers.get_dtypes("numeric", full=False, key="searchsorted")),
min_num_dims=1,
)
)
return dtype_x + dtype_v, x + v
@st.composite
def _searchsorted_case2(draw):
# N-D for x, N-D for v
arb_leading_dims = draw(
helpers.get_shape(
min_num_dims=1,
)
)
nx = draw(st.integers(min_value=1, max_value=5))
nv = draw(st.integers(min_value=1, max_value=5))
dtype_x, x = draw(
helpers.dtype_and_values(
dtype=draw(helpers.get_dtypes("numeric", full=False, key="searchsorted")),
shape=arb_leading_dims + (nx,),
)
)
dtype_v, v = draw(
helpers.dtype_and_values(
dtype=draw(helpers.get_dtypes("numeric", full=False, key="searchsorted")),
shape=arb_leading_dims + (nv,),
)
)
return dtype_x + dtype_v, x + v
# --- Main --- #
# ------------ #
# argsort
@handle_test(
fn_tree="functional.ivy.argsort",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
min_dim_size=1,
min_axis=-1,
max_axis=0,
),
descending=st.booleans(),
stable=st.booleans(),
test_gradients=st.just(False),
)
def test_argsort(
*, dtype_x_axis, descending, stable, test_flags, backend_fw, fn_name, on_device
):
dtype, x, axis = dtype_x_axis
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
axis=axis,
descending=descending,
stable=stable,
)
# msort
@handle_test(
fn_tree="functional.ivy.msort",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
min_value=-100,
max_value=100,
),
test_gradients=st.just(False),
test_with_copy=st.just(True),
)
def test_msort(dtype_and_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
)
@handle_test(
fn_tree="functional.ivy.searchsorted",
data=st.data(),
dtypes_and_xs=st.one_of(_searchsorted_case1(), _searchsorted_case2()),
side=st.sampled_from(["left", "right"]),
use_sorter=st.booleans(),
ret_dtype=helpers.get_dtypes("valid", full=False),
test_gradients=st.just(False),
)
def test_searchsorted(
*,
data,
dtypes_and_xs,
side,
use_sorter,
ret_dtype,
test_flags,
backend_fw,
fn_name,
on_device
):
dtypes, xs = dtypes_and_xs
if use_sorter:
sorter_dtype = data.draw(helpers.get_dtypes("signed_integer", full=False))
dtypes += sorter_dtype
sorter = np.argsort(xs[0]).astype(sorter_dtype[0])
else:
sorter = None
xs[0] = np.sort(xs[0])
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=xs[0],
v=xs[1],
side=side,
sorter=sorter,
ret_dtype=ret_dtype[0],
)
# sort
@handle_test(
fn_tree="functional.ivy.sort",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
min_dim_size=1,
min_axis=-1,
max_axis=0,
),
descending=st.booleans(),
stable=st.booleans(),
test_gradients=st.just(False),
test_with_copy=st.just(True),
)
def test_sort(
*, dtype_x_axis, descending, stable, test_flags, backend_fw, fn_name, on_device
):
dtype, x, axis = dtype_x_axis
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
axis=axis,
descending=descending,
stable=stable,
)
| ivy/ivy_tests/test_ivy/test_functional/test_core/test_sorting.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_sorting.py",
"repo_id": "ivy",
"token_count": 2444
} | 68 |
# global
from hypothesis import strategies as st
# local
import numpy as np
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
# --- Helpers --- #
# --------------- #
# unravel_index
@st.composite
def max_value_as_shape_prod(draw):
shape = draw(
helpers.get_shape(
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
)
)
dtype_and_x = draw(
helpers.dtype_values_axis(
available_dtypes=["int32", "int64"],
min_value=0,
max_value=np.prod(shape) - 1,
min_num_dims=1,
)
)
return dtype_and_x, shape
@handle_test(
fn_tree="functional.ivy.experimental.unravel_index",
dtype_x_shape=max_value_as_shape_prod(),
test_gradients=st.just(False),
)
def test_unravel_index(*, dtype_x_shape, test_flags, backend_fw, fn_name, on_device):
dtype_and_x, shape = dtype_x_shape
input_dtype, x = dtype_and_x[0], dtype_and_x[1]
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
indices=np.asarray(x[0], dtype=input_dtype[0]),
shape=shape,
)
| ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_searching.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_searching.py",
"repo_id": "ivy",
"token_count": 649
} | 69 |
# global
import pytest
from typing import List, Tuple, Dict, Optional, Union
# local
import ivy
# --- Helpers --- #
# --------------- #
def _fn0(xs: Optional[List[ivy.Array]] = None):
return xs
def _fn1(
a: Union[ivy.Array, ivy.NativeArray],
b: str = "hello",
c: Optional[int] = None,
d: ivy.NativeArray = None,
):
return a, b, c, d
def _fn2(
a: Tuple[Union[ivy.Array, ivy.NativeArray, ivy.Container]],
bs: Tuple[str] = ("a", "b", "c"),
cs: Optional[Dict[str, ivy.Array]] = None,
):
return a, bs, cs
# --- Main --- #
# ------------ #
@pytest.mark.parametrize(
"fn_n_spec",
[
(_fn0, [[(0, "xs"), "optional", int]]),
(_fn1, [[(0, "a")], [(3, "d"), "optional"]]),
(_fn2, [[(0, "a"), int], [(2, "cs"), "optional", str]]),
],
)
def test_fn_array_spec(fn_n_spec, backend_fw):
ivy.set_backend(backend_fw)
fn, spec = fn_n_spec
assert ivy.fn_array_spec(fn) == spec
ivy.previous_backend()
| ivy/ivy_tests/test_ivy/test_misc/test_inspection.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_inspection.py",
"repo_id": "ivy",
"token_count": 457
} | 70 |
import importlib
import os
import sys
import glob
def get_all_functions_from_directory(root_dir, startswith="test"):
if not os.path.exists(root_dir):
print("Invalid directory")
sys.exit(1)
functions_names = []
for filename in glob.iglob(f"{root_dir}/**/*.py", recursive=True):
if len(filename) >= 2 and filename[:2] == "./":
filename = filename[2:]
filename = filename.replace(".py", "")
filename = filename.replace("/", ".")
module = importlib.import_module(filename)
module_functions_names = [
obj for obj in dir(module) if obj.startswith(startswith)
]
functions_names.extend(module_functions_names)
return functions_names
def check_duplicate():
fn_test_core = get_all_functions_from_directory(
"ivy_tests/test_ivy/test_functional/test_core"
)
fn_test_nn = get_all_functions_from_directory(
"ivy_tests/test_ivy/test_functional/test_nn"
)
fn_test_experimental = get_all_functions_from_directory(
"ivy_tests/test_ivy/test_functional/test_experimental"
)
fn_ivy_test = set(fn_test_core).union(set(fn_test_nn))
common_list = fn_ivy_test.intersection(set(fn_test_experimental))
return common_list
if __name__ == "__main__":
common_set = check_duplicate()
if len(common_set) != 0:
print("This function already exists in the functional API.")
sys.exit(1)
| ivy/scripts/duplicate.py/0 | {
"file_path": "ivy/scripts/duplicate.py",
"repo_id": "ivy",
"token_count": 611
} | 71 |
import os
import random
import ast
BACKENDS = ["jax", "numpy", "tensorflow", "torch", "paddle"]
def is_test_function(node):
if isinstance(node, ast.FunctionDef):
return node.name.startswith("test_")
return False
def extract_tests_from_file(filename):
with open(filename, "r") as file:
try:
module = ast.parse(file.read())
except SyntaxError:
print(f"Syntax error in file: {filename}")
return []
return [
f"{filename}::{node.name}" for node in module.body if is_test_function(node)
]
def extract_tests_from_dir(directory):
test_files = []
for root, _, files in os.walk(directory):
for file in files:
if file.endswith(".py") and "helpers" not in root:
full_path = os.path.join(root, file)
test_files.extend(extract_tests_from_file(full_path))
return test_files
def get_all_tests():
test_names_without_backend = extract_tests_from_dir("ivy_tests/test_ivy")
test_names_without_backend = sorted(set(test_names_without_backend))
random.Random(4).shuffle(test_names_without_backend)
test_names = []
for test_name in test_names_without_backend:
for backend in BACKENDS:
test_backend = f"{test_name},{backend}"
test_names.append(test_backend)
return test_names
| ivy/scripts/setup_tests/get_all_tests.py/0 | {
"file_path": "ivy/scripts/setup_tests/get_all_tests.py",
"repo_id": "ivy",
"token_count": 604
} | 72 |
FROM arm64v8/debian:buster
# ensure local python is preferred over distribution python
ENV PATH /usr/local/bin:$PATH
# http://bugs.python.org/issue19846
# > At the moment, setting "LANG=C" on a Linux system *fundamentally breaks Python 3*, and that's not OK.
ENV LANG C.UTF-8
# runtime dependencies
RUN set -eux; \
apt-get update; \
apt-get install -y --no-install-recommends \
ca-certificates \
netbase \
tzdata \
; \
rm -rf /var/lib/apt/lists/*
ENV GPG_KEY E3FF2839C048B25C084DEBE9B26995E310250568
ENV PYTHON_VERSION 3.8.18
RUN set -eux; \
\
savedAptMark="$(apt-mark showmanual)"; \
apt-get update; \
apt-get install -y --no-install-recommends \
dpkg-dev \
gcc \
gnupg \
libbluetooth-dev \
libbz2-dev \
libc6-dev \
libdb-dev \
libexpat1-dev \
libffi-dev \
libgdbm-dev \
liblzma-dev \
libncursesw5-dev \
libreadline-dev \
libsqlite3-dev \
libssl-dev \
make \
tk-dev \
uuid-dev \
wget \
xz-utils \
zlib1g-dev \
; \
\
wget -O python.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz"; \
wget -O python.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc"; \
GNUPGHOME="$(mktemp -d)"; export GNUPGHOME; \
gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys "$GPG_KEY"; \
gpg --batch --verify python.tar.xz.asc python.tar.xz; \
gpgconf --kill all; \
rm -rf "$GNUPGHOME" python.tar.xz.asc; \
mkdir -p /usr/src/python; \
tar --extract --directory /usr/src/python --strip-components=1 --file python.tar.xz; \
rm python.tar.xz; \
\
cd /usr/src/python; \
gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; \
./configure \
--build="$gnuArch" \
--enable-loadable-sqlite-extensions \
--enable-optimizations \
--enable-option-checking=fatal \
--enable-shared \
--with-system-expat \
--without-ensurepip \
; \
nproc="$(nproc)"; \
EXTRA_CFLAGS="$(dpkg-buildflags --get CFLAGS)"; \
LDFLAGS="$(dpkg-buildflags --get LDFLAGS)"; \
LDFLAGS="${LDFLAGS:--Wl},--strip-all"; \
make -j "$nproc" \
"EXTRA_CFLAGS=${EXTRA_CFLAGS:-}" \
"LDFLAGS=${LDFLAGS:-}" \
"PROFILE_TASK=${PROFILE_TASK:-}" \
; \
# https://github.com/docker-library/python/issues/784
# prevent accidental usage of a system installed libpython of the same version
rm python; \
make -j "$nproc" \
"EXTRA_CFLAGS=${EXTRA_CFLAGS:-}" \
"LDFLAGS=${LDFLAGS:--Wl},-rpath='\$\$ORIGIN/../lib'" \
"PROFILE_TASK=${PROFILE_TASK:-}" \
python \
; \
make install; \
\
cd /; \
rm -rf /usr/src/python; \
\
find /usr/local -depth \
\( \
\( -type d -a \( -name test -o -name tests -o -name idle_test \) \) \
-o \( -type f -a \( -name '*.pyc' -o -name '*.pyo' -o -name 'libpython*.a' \) \) \
-o \( -type f -a -name 'wininst-*.exe' \) \
\) -exec rm -rf '{}' + \
; \
\
ldconfig; \
\
apt-mark auto '.*' > /dev/null; \
apt-mark manual $savedAptMark; \
find /usr/local -type f -executable -not \( -name '*tkinter*' \) -exec ldd '{}' ';' \
| awk '/=>/ { so = $(NF-1); if (index(so, "/usr/local/") == 1) { next }; gsub("^/(usr/)?", "", so); printf "*%s\n", so }' \
| sort -u \
| xargs -r dpkg-query --search \
| cut -d: -f1 \
| sort -u \
| xargs -r apt-mark manual \
; \
apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \
rm -rf /var/lib/apt/lists/*; \
\
python3 --version
# make some useful symlinks that are expected to exist ("/usr/local/bin/python" and friends)
RUN set -eux; \
for src in idle3 pydoc3 python3 python3-config; do \
dst="$(echo "$src" | tr -d 3)"; \
[ -s "/usr/local/bin/$src" ]; \
[ ! -e "/usr/local/bin/$dst" ]; \
ln -svT "$src" "/usr/local/bin/$dst"; \
done
# if this is called "PIP_VERSION", pip explodes with "ValueError: invalid truth value '<VERSION>'"
ENV PYTHON_PIP_VERSION 23.0.1
# https://github.com/docker-library/python/issues/365
ENV PYTHON_SETUPTOOLS_VERSION 57.5.0
# https://github.com/pypa/get-pip
ENV PYTHON_GET_PIP_URL https://github.com/pypa/get-pip/raw/9af82b715db434abb94a0a6f3569f43e72157346/public/get-pip.py
ENV PYTHON_GET_PIP_SHA256 45a2bb8bf2bb5eff16fdd00faef6f29731831c7c59bd9fc2bf1f3bed511ff1fe
RUN set -eux; \
\
savedAptMark="$(apt-mark showmanual)"; \
apt-get update; \
apt-get install -y --no-install-recommends wget; \
\
wget -O get-pip.py "$PYTHON_GET_PIP_URL"; \
echo "$PYTHON_GET_PIP_SHA256 *get-pip.py" | sha256sum -c -; \
\
apt-mark auto '.*' > /dev/null; \
[ -z "$savedAptMark" ] || apt-mark manual $savedAptMark > /dev/null; \
apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \
rm -rf /var/lib/apt/lists/*; \
\
export PYTHONDONTWRITEBYTECODE=1; \
\
python get-pip.py \
--disable-pip-version-check \
--no-cache-dir \
--no-compile \
"pip==$PYTHON_PIP_VERSION" \
"setuptools==$PYTHON_SETUPTOOLS_VERSION" \
; \
rm -f get-pip.py; \
\
pip --version
CMD ["python3"]
WORKDIR /ivy
ARG CLI
ENV PYTHONUNBUFFERED 1
# to fix protobuf conflicts
ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION python
# Install Python and its dependencies
RUN apt-get update && \
apt-get install sudo -y build-essential && \
apt-get install -y python3-pip python3-tk && \
apt-get install -y libsm6 libxext6 libxrender-dev libgl1-mesa-glx && \
apt-get install -y git && \
apt-get install -y rsync && \
apt-get install -y libusb-1.0-0 && \
apt-get install -y libglib2.0-0 && \
apt-get install -y jq && \
pip3 install --upgrade pip && \
pip3 install pip-autoremove &&\
pip3 install setuptools==58.5.3
# Install Ivy Upstream
RUN git clone --progress --recurse-submodules https://github.com/unifyai/ivy --depth 1 && \
cd ivy && \
cd ivy_tests/array_api_testing/test_array_api && \
pip3 install --no-cache-dir -r requirements.txt
# install optional requirements
COPY requirements/optional_apple_silicon_1.txt .
COPY requirements/optional_apple_silicon_2.txt .
COPY requirements/requirements.txt .
# setting torch path early on because torch-scatter needs it
ENV PYTHONPATH "/opt/fw/torch"
#torch and torch scatter separate installation because they cause issues
RUN pip3 install --no-cache-dir torch --target '/opt/fw/torch' --extra-index-url https://download.pytorch.org/whl/cpu
RUN export ver=$(pip show torch --path '/opt/fw/torch' | grep Version | cut -d ' ' -f2) && \
pip3 install --target '/opt/fw/torch' --no-cache-dir --upgrade torch-scatter -f https://data.pyg.org/whl/torch-$ver.html
# requirement mappings directs which dependency to be installed and where
COPY /docker/requirement_mappings_apple_silicon.json .
SHELL ["/bin/bash", "-c"]
# installing requirements based on mappings in location /opt/fw/$framework
RUN jq -r 'to_entries[] | select(.value != [""]) | .key as $dir | .value[] | "/opt/fw/\($dir) \(.)"' requirement_mappings_apple_silicon.json | \
while IFS= read -r line; do \
dir=$(echo "$line" | awk '{print $1}'); \
pkg=$(echo "$line" | awk '{print $2}'); \
printf "Installing $pkg\n"; \
pip install --ignore-installed --target "$dir" "$pkg"; \
done
# install the requirements.txt, optionalals with the mapped dependencies filtered out
RUN pip install --upgrade -r requirements.txt &&\
cat optional_apple_silicon_1.txt optional_apple_silicon_2.txt > tmp.txt &&\
jq -r 'to_entries[] | [.key] + .value | select(length > 0 or (. == "")) | .[]' requirement_mappings_apple_silicon.json | sort -u | xargs -I {} sed -i '/{}/d;/torch/d;/torch-scatter/d;/jax\[.*\]/d;/paddlepaddle/d' tmp.txt && pip install -r tmp.txt
# add all the directories to environment path so that python knows where to find them
ENV PYTHONPATH "/opt/fw/mxnet:/opt/fw/numpy:/opt/fw/jax:/opt/fw/torch:/opt/fw/tensorflow:/opt/fw/paddle"
COPY scripts/test_dependencies.py .
RUN python3 test_dependencies.py -fp requirements.txt && \
rm -rf requirements.txt && \
rm -rf optional_apple_silicon_1.txt && \
rm -rf optional_apple_silicon_2.txt && \
rm -rf tmp.txt && \
rm -rf test_dependencies.py && \
rm -rf requirement_mappings_apple_silicon.json
| ivy/docker/DockerfileAppleSilicon/0 | {
"file_path": "ivy/docker/DockerfileAppleSilicon",
"repo_id": "ivy",
"token_count": 3458
} | 0 |
{{ name | escape | underline }}
.. autofunction:: ivy.{{ name }}
.. autoskippablemethod:: ivy.Array.{{ name }}
.. autoskippablemethod:: ivy.Container.{{ name }}
| ivy/docs/_templates/functional_module.rst/0 | {
"file_path": "ivy/docs/_templates/functional_module.rst",
"repo_id": "ivy",
"token_count": 56
} | 1 |
Contributor Program
=================
The goal of the Contributor program is to facilitate contributors in the community that would like to work more closely
with our team.
Embark on a rewarding journey with Unify by `signing up <https://forms.gle/Fs6WK3GtsmizZn9SA>`_ as a Contributor.
Let's innovate together!
We've created a promotion workflow to help you advance through the different tiers of contribution,
from Contributor to Core Contributor, Rising Contributor, and finally, Top Contributor.
Our promotion structure is based on our badge system.
Check out `Contributor Rewards <contributor_rewards.rst>`_ for more information about them!
Contributor
-----------
Start as a Contributor by earning any of the Initial Badges.
It's straightforward — complete one of the open tasks listed and you're in!
This first step opens the door for you to become an integral part of our community.
Core Contributor
----------------
Get elevated to Core Contributor status by earning a Bronze Badge.
As a Core Contributor, you're recognized not just for your skills but also for your growing commitment to our mission.
You'll have the opportunity to engage more deeply with our projects and play an important role in shaping the future of our initiatives.
This tier is a testament to your dedication and a stepping stone to even greater achievements within our community.
As a token of appreciation, you'll be eligible for a $25 welcome gift.
Rising Contributor
------------------
Ascend to the Rising Contributor level by acquiring a Silver Badge.
This tier will allow you to be more deeply involved with our core group.
You'll join our GitHub team, enabling you to directly commit changes to :code:`main`, manage PRs made by others in the community, etc.
You'll also be invited to all our internal meetings to be able to follow along with everything happening in the team.
As a Rising Contributor, you're encouraged to tackle complex tasks, pushing your skills to new heights.
Top Contributor
---------------
Achieve a Gold Badge to reach the peak of our program as a Top Contributor.
This tier is for those who demonstrate exceptional dedication and expertise.
You'll gain the privilege of engaging with bounty-holding internal tasks and join our Bounty Program,
where completing top-priority issues can earn you cash rewards.
Upon reaching this level, we will be delighted to acknowledge your contributions with a special token of appreciation.
| ivy/docs/overview/contributing/volunteer_program.rst/0 | {
"file_path": "ivy/docs/overview/contributing/volunteer_program.rst",
"repo_id": "ivy",
"token_count": 542
} | 2 |
Function Types
==============
.. _`_wrap_function`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/func_wrapper.py#L412
.. _`backend setting`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/backend_handler.py#L204
.. _`handle_nestable`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/func_wrapper.py#L370
.. _`at import time`: https://github.com/unifyai/ivy/blob/055dcb3b863b70c666890c580a1d6cb9677de854/ivy/__init__.py#L114
.. _`add_ivy_array_instance_methods`: https://github.com/unifyai/ivy/blob/055dcb3b863b70c666890c580a1d6cb9677de854/ivy/array/wrapping.py#L26
.. _`add_ivy_container_instance_methods`: https://github.com/unifyai/ivy/blob/055dcb3b863b70c666890c580a1d6cb9677de854/ivy/container/wrapping.py#L69
.. _`from being added`: https://github.com/unifyai/ivy/blob/055dcb3b863b70c666890c580a1d6cb9677de854/ivy/container/wrapping.py#L78
.. _`_function_w_arrays_n_out_handled`: https://github.com/unifyai/ivy/blob/ee0da7d142ba690a317a4fe00a4dd43cf8634642/ivy/func_wrapper.py#L166
.. _`NON_WRAPPED_FUNCTIONS`: https://github.com/unifyai/ivy/blob/fdaea62380c9892e679eba37f26c14a7333013fe/ivy/func_wrapper.py#L9
.. _`ivy.set_backend`: https://github.com/unifyai/ivy/blob/30b7ca4f8a50a52f51884738fe7323883ce891bd/ivy/backend_handler.py#L153
.. _`ivy.get_backend`: https://github.com/unifyai/ivy/blob/30b7ca4f8a50a52f51884738fe7323883ce891bd/ivy/backend_handler.py#L211
.. _`ivy.nested_map`: https://github.com/unifyai/ivy/blob/08ebc4d6d5e200dcbb8498b213538ffd550767f3/ivy/functional/ivy/nest.py#L618
.. _`ivy.index_nest`: https://github.com/unifyai/ivy/blob/08ebc4d6d5e200dcbb8498b213538ffd550767f3/ivy/functional/ivy/nest.py#L15
.. _`ivy.set_default_dtype`: https://github.com/unifyai/ivy/blob/8482eb3fcadd0721f339a1a55c3f3b9f5c86d8ba/ivy/functional/ivy/data_type.py#L1555
.. _`ivy.set_default_device`: https://github.com/unifyai/ivy/blob/30b7ca4f8a50a52f51884738fe7323883ce891bd/ivy/functional/ivy/device.py#L464
.. _`submodules`: https://github.com/unifyai/ivy/tree/30b7ca4f8a50a52f51884738fe7323883ce891bd/ivy/functional/ivy
.. _`nest.py`: https://github.com/unifyai/ivy/blob/08ebc4d6d5e200dcbb8498b213538ffd550767f3/ivy/functional/ivy/nest.py
.. _`ivy.default`: https://github.com/unifyai/ivy/blob/f18df2e19d6a5a56463fa1a15760c555a30cb2b2/ivy/functional/ivy/general.py#L622
.. _`ivy.cache_fn`: https://github.com/unifyai/ivy/blob/f18df2e19d6a5a56463fa1a15760c555a30cb2b2/ivy/functional/ivy/general.py#L747
.. _`ivy.stable_divide`: https://github.com/unifyai/ivy/blob/f18df2e19d6a5a56463fa1a15760c555a30cb2b2/ivy/functional/ivy/general.py#L928
.. _`ivy.can_cast`: https://github.com/unifyai/ivy/blob/8482eb3fcadd0721f339a1a55c3f3b9f5c86d8ba/ivy/functional/ivy/data_type.py#L246
.. _`ivy.dtype`: https://github.com/unifyai/ivy/blob/8482eb3fcadd0721f339a1a55c3f3b9f5c86d8ba/ivy/functional/ivy/data_type.py#L1096
.. _`ivy.dev`: https://github.com/unifyai/ivy/blob/08ebc4d6d5e200dcbb8498b213538ffd550767f3/ivy/functional/ivy/device.py#L325
.. _`ivy.default_dtype`: https://github.com/unifyai/ivy/blob/8482eb3fcadd0721f339a1a55c3f3b9f5c86d8ba/ivy/functional/ivy/data_type.py#L879
.. _`ivy.get_all_arrays_on_dev`: https://github.com/unifyai/ivy/blob/08ebc4d6d5e200dcbb8498b213538ffd550767f3/ivy/functional/ivy/device.py#L131
.. _`inside the _wrap_function`: https://github.com/unifyai/ivy/blob/1a00001017ceca11baf0a7b83adcc51234d43fce/ivy/func_wrapper.py#L1115
.. _`FN_DECORATORS`: https://github.com/unifyai/ivy/blob/1a00001017ceca11baf0a7b83adcc51234d43fce/ivy/func_wrapper.py#L15
.. _`handle_partial_mixed_function`: https://github.com/unifyai/ivy/blob/1a00001017ceca11baf0a7b83adcc51234d43fce/ivy/functional/ivy/layers.py#L77
.. _`partial_mixed_handler`: https://github.com/unifyai/ivy/blob/1a00001017ceca11baf0a7b83adcc51234d43fce/ivy/functional/backends/torch/layers.py#L29
.. _`handle`: https://github.com/unifyai/ivy/blob/0ef2888cbabeaa8f61ce8aaea4f1175071f7c396/ivy/func_wrapper.py#L1027-L1030
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`function types thread`: https://discord.com/channels/799879767196958751/1189905318650576896
Firstly, we explain the difference between *primary*, *compositional*, *mixed* and *standalone* functions.
These four function categorizations are all **mutually exclusive**, and combined they constitute the set of **all** functions in Ivy, as outlined in the simple Venn diagram below.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/deep_dive/function_types/four_function_types.png?raw=true
:align: center
:width: 50%
:class: dark-light
Primary Functions
-----------------
*Primary* functions are essentially the lowest level building blocks in Ivy.
Each primary function has a unique backend-specific implementation for each backend specified in :mod:`ivy/functional/backends/backend_name/category_name.py`.
These are generally implemented as light wrapping around an existing function in the backend framework, which serves a near-identical purpose.
Primary functions must both be specified in :mod:`ivy/functional/ivy/category_name.py` and also in each of the backend files :mod:`ivy/functional/backends/backend_name/category_name.py`.
The function in :mod:`ivy/functional/ivy/category_name.py` includes the type hints, docstring, and docstring examples (explained in more detail in the subsequent sections), but does not include an actual implementation.
Instead, in :mod:`ivy/functional/ivy/category_name.py`, primary functions simply defer to the backend-specific implementation.
For example, the code for :func:`ivy.tan` in :mod:`ivy/functional/ivy/elementwise.py` (with decorators and docstrings removed) is given below:
.. code-block:: python
def tan(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
return ivy.current_backend(x).tan(x, out=out)
The backend-specific implementation of :func:`ivy.tan` for PyTorch in :mod:`ivy/functional/backends/torch/elementwise.py` is given below:
.. code-block:: python
def tan(
x: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None
) -> torch.Tensor:
x = _cast_for_unary_op(x)
return torch.tan(x, out=out)
The reason that the Ivy implementation has type hint :code:`Union[ivy.Array, ivy.NativeArray]` but PyTorch implementation has :class:`torch.Tensor` is explained in the `Arrays <arrays.rst>`_ section.
Likewise, the reason that the :code:`out` argument in the Ivy implementation has array type hint :class:`ivy.Array` whereas :code:`x` has :code:`Union[ivy.Array, ivy.NativeArray]` is also explained in the `Arrays <arrays.rst>`_ section.
Compositional Functions
-----------------------
*Compositional* functions on the other hand **do not** have backend-specific implementations.
They are implemented as a *composition* of other Ivy functions, which themselves can be either compositional, primary, or mixed (explained below).
Therefore, compositional functions are only implemented in :mod:`ivy/functional/ivy/category_name.py`, and there are no implementations in any of the backend files :mod:`ivy/functional/backends/backend_name/category_name.py`.
For example, the implementation of :func:`ivy.cross_entropy` in :mod:`ivy/functional/ivy/losses.py` (with docstrings and decorators removed) is given below:
.. code-block:: python
def cross_entropy(
true: Union[ivy.Array, ivy.NativeArray],
pred: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: int = -1,
epsilon: float = 1e-7,
reduction: str = "mean",
out: Optional[ivy.Array] = None
) -> ivy.Array:
ivy.utils.assertions.check_elem_in_list(reduction, ["none", "sum", "mean"])
pred = ivy.clip(pred, epsilon, 1 - epsilon)
log_pred = ivy.log(pred)
return _reduce_loss(reduction, log_pred * true, axis, out)
Mixed Functions
---------------
---------------
Sometimes, a function may only be provided by some of the supported backends. In this case, we have to take a mixed approach. We should always have a backend-specific implementation if there is a similar function provided by a certain backend. This maximises runtime efficiency, as the function in the backend will be implemented directly in C or C++. Such functions have some backend-specific implementations in :mod:`ivy/functional/backends/backend_name/category_name.py`, but not for all backends. To support backends that do not have a backend-specific implementation, a compositional implementation is also provided in :mod:`ivy/functional/ivy/category_name.py`. Compositional functions should only be used when there is no similar function to wrap in the backend.
Because these functions include both a compositional implementation and also at least one backend-specific implementation, these functions are referred to as *mixed*.
When using ivy without a backend set explicitly (for example :func:`ivy.set_backend` has not been called), then the function called is always the one implemented in :mod:`ivy/functional/ivy/category_name.py`.
For *primary* functions, then :code:`ivy.current_backend(array_arg).func_name(...)` will call the backend-specific implementation in :mod:`ivy/functional/backends/backend_name/category_name.py` directly.
However, as just explained, *mixed* functions implement a compositional approach in :mod:`ivy/functional/ivy/category_name.py`, without deferring to the backend.
Therefore, when no backend is explicitly set, then the compositional implementation is always used for *mixed* functions, even for backends that have a more efficient backend-specific implementation.
Typically the backend should always be set explicitly though (using :func:`ivy.set_backend` for example), and in this case the efficient backend-specific implementation will always be used if it exists.
Partial Mixed Functions
-----------------------
There may be instances wherein the native backend function does not encompass the full range of possible cases that ivy wants to support.
One example of this is :code:`ivy.linear` for which the torch native function :code:`torch.nn.functional.linear` only supports the :code:`weight` argument
to be a 2 dimensional tensor while as ivy also allows the :code:`weight` argument to be 3 dimensional. While achieving the objective of having superset
behaviour across the backends, the native functionality of frameworks should be made use of as much as possible. Even if a framework-specific function
doesn't provide complete superset behaviour, we should still make use of the partial behaviour that it provides and then add more logic for the
remaining part. This is explained in detail in the :ref:`overview/deep_dive/superset_behaviour:Maximizing Usage of Native Functionality` section. Ivy allows this partial support with the help of the `partial_mixed_handler`_
attribute which should be added to the backend implementation with a boolean function that specifies some condition on the inputs to switch between the compositional
and primary implementations. For example, the :code:`torch` backend implementation of :code:`linear`` looks like:
.. code-block:: python
def linear(
x: torch.Tensor,
weight: torch.Tensor,
/,
*,
bias: Optional[torch.Tensor] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.nn.functional.linear(x, weight, bias)
linear.partial_mixed_handler = lambda x, weight, **kwargs: weight.ndim == 2
And to the compositional implementation, we must add the `handle_partial_mixed_function`_ decorator. When the backend is set, the :code:`handle_partial_mixed_function`
decorator is added to the primary implementation `inside the _wrap_function`_ according to the order in the `FN_DECORATORS`_ list. When the function is executed,
the :code:`handle_partial_mixed_function` decorator first evaluates the boolean function using the given inputs, and we use the backend-specific implementation if the result
is `True` and the compositional implementation otherwise.
For further information on decorators, please refer to the `Function Wrapping <function_wrapping.rst>`_ section.
For all mixed functions, we must add the :code:`mixed_backend_wrappers` attribute to the compositional implementation of mixed functions to specify which additional wrappers need to be applied to the primary implementation and which ones from the compositional implementation should be skipped.
We do this by creating a dictionary of two keys, :code:`to_add` and :code:`to_skip`, each containing the tuple of wrappers to be added or skipped respectively. In general, :code:`handle_out_argument`, :code:`inputs_to_native_arrays` and :code:`outputs_to_ivy_arrays`
should always be added to the primary implementation and :code:`inputs_to_ivy_arrays` should be skipped. For the :code:`linear` function, :code:`mixed_backend_wrappers` was added in the following manner.
.. code-block:: python
linear.mixed_backend_wrappers = {
"to_add": (
"handle_out_argument",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
),
"to_skip": ("inputs_to_ivy_arrays", "handle_partial_mixed_function"),
}
When the backend is set, we `handle`_ these wrappers for the primary implementation inside the :code:`_wrap_function`.
Standalone Functions
---------------------
*Standalone* functions are functions which do not reference any other *primary*, *compositional* or *mixed* functions whatsoever.
By definition, standalone functions can only reference themselves or other standalone functions.
Most commonly, these functions are *convenience* functions (see below).
As a first example, every function in the `nest.py`_ module is a standalone function.
All of these either: (a) reference no other function at all, (b) only reference themselves recursively, or (c) reference other standalone functions.
A few other examples outside of the :mod:`nest.py` module are: `ivy.default`_ which simply returns :code:`x` if it exists else the default value, `ivy.cache_fn`_ which wraps a function such that when :code:`cache=True` is passed, then a previously cached output is returned, and `ivy.stable_divide`_ which simply adds a small constant to the denominator of the division.
Nestable Functions
------------------
*Nestable* functions are functions which can accept :class:`ivy.Container` instances in place of **any** of the arguments.
Multiple containers can also be passed in for multiple arguments at the same time, provided that the containers share a common nested structure.
If an :class:`ivy.Container` is passed, then the function is applied to all of the leaves of the container, with the container leaf values passed into the function at the corresponding arguments.
In this case, the function will return an :class:`ivy.Container` in the output.
*Primary*, *compositional*, *mixed*, and *standalone* functions can all *also* be nestable.
This categorization is **not** mutually exclusive, as outlined by the Venn diagram below:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/deep_dive/function_types/nestable.png?raw=true
:align: center
:width: 50%
:class: dark-light
The *nestable* property makes it very easy to write a single piece of code that can deal either with individual arguments or arbitrary batches of nested arguments.
This is very useful in machine learning, where batches of different training data often need to be processed concurrently.
Another example is when the same operation must be performed on each weight in a network.
This *nestable* property of Ivy functions means that the same function can be used for any of these use cases without modification.
This added support for handling :class:`ivy.Container` instances is all handled automatically when `_wrap_function`_ is applied to every function in the :code:`ivy` module during `backend setting`_.
This will add the `handle_nestable`_ wrapping to the function if it has the :code:`@handle_nestable` decorator.
This function wrapping process is covered in a bit more detail in the `Function Wrapping <function_wrapping.rst>`_ section.
Nestable functions are explained in more detail in the `Containers <containers.rst>` section.
Convenience Functions
---------------------
A final group of functions are the *convenience* functions (briefly mentioned above).
Convenience functions do not form part of the computation graph directly, and they do not directly modify arrays.
However, they can be used to organize and improve the code for other functions which do modify the arrays.
Convenience functions can be *primary*, *compositional*, *mixed*, or *standalone* functions.
Many are also *nestable*.
This is another categorization which is **not** mutually exclusive, as outlined by the Venn diagram below:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/deep_dive/function_types/convenience.png?raw=true
:align: center
:width: 50%
:class: dark-light
Primary convenience functions include: `ivy.can_cast`_ which determines if one data type can be cast to another data type according to type-promotion rules, `ivy.dtype`_ which gets the data type for the input array, and `ivy.dev`_ which gets the device for the input array.
Compositional convenience functions include: `ivy.set_default_dtype`_ which sets the global default data dtype, `ivy.default_dtype`_ which returns the correct data type to use, considering both the inputs and the globally set default data type, and `ivy.get_all_arrays_on_dev`_ which gets all arrays which are currently on the specified device.
Standalone convenience functions include: `ivy.get_backend`_ which returns a local Ivy module with the associated backend framework.
`ivy.nested_map`_ which enables an arbitrary function to be mapped across the leaves of an arbitrary nest, and `ivy.index_nest`_ which enables an arbitrary nest to be recursively indexed.
There are many other examples.
The convenience functions are not grouped by file or folder.
Feel free to have a look through all of the `submodules`_, you should be able to spot quite a few!
**Round Up**
This should have hopefully given you a good feel for the different function types.
If you have any questions, please feel free to reach out on `discord`_ in the `function types thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/mWYhQRu1Vuk" class="video">
</iframe>
| ivy/docs/overview/deep_dive/function_types.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/function_types.rst",
"repo_id": "ivy",
"token_count": 6210
} | 3 |
Ivy Stateful API
================
Here we explain how Ivy’s stateful API builds on the functional API and the :class:`ivy.Container` class to provide other convenient classes in the form of optimizers, network layers, and custom trainable modules, which help get your ML projects up and running very quickly!
So, without further ado, let’s walk through what the stateful API has to offer!
Modules
-------
The most helpful stateful Ivy class is perhaps the :class:`ivy.Module`.
This can be used to create custom trainable layers or entire networks.
Manually defined trainable variables must be specified in the :meth:`_create_variables` method.
For example, we can create a linear layer by deriving from :class:`ivy.Module` like so:
.. code-block:: python
class Linear(ivy.Module):
def __init__(self, input_channels, output_channels,
with_bias=True, dev=None, v=None):
self._input_channels = input_channels
self._output_channels = output_channels
self._w_shape = (output_channels, input_channels)
self._b_shape = (output_channels,)
self._with_bias = with_bias
ivy.Module.__init__(self, dev, v)
def _create_variables(self, dev):
v = {'w': ivy.random_uniform(
shape=self._w_shape, dev=dev)}
if self._with_bias:
v = dict(**v, b=ivy.random_uniform(
shape=self._b_shape, dev=dev))
return v
def _forward(self, inputs):
return ivy.linear(
inputs, self.v.w,
self.v.b if self._with_bias else None)
For simplicity, this is slightly different from the builtin :class:`ivy.Linear` in a couple of ways, as we will explain in the Initializer section below.
All :class:`ivy.Module` instances have an attribute v (short for variables), which stores all of the trainable variables in the module in an :class:`ivy.Container`.
For our example above, the hierarchical structure of these variables is the same as that defined in the method :meth:`_create_variables`.
.. code-block:: python
linear = Linear(2, 4)
print(linear.v)
{
b: ivy.array([0., 0., 0., 0.]),
w: ivy.array([[-0.729, 0.396],
[-1., -0.764],
[-0.872, 0.211],
[0.439, -0.644]])
}
This is all well and good for defining a single layer, but manually defining all variables in :code:`_create_variables` for very complex networks would be a total nightmare.
To overcome this issue, modules can be nested up to an arbitrary depth.
This means we can very easily create more complex networks as compositions of other sub-modules or layers.
For example, we can create a simple fully connected network with our linear layers.
.. code-block:: python
class FC(ivy.Module):
def __init__(self):
self.linear0 = Linear(3, 64)
self.linear1 = Linear(64, 1)
ivy.Module.__init__(self)
def _forward(self, x):
x = ivy.relu(self.linear0(x))
return ivy.sigmoid(self.linear1(x))
In this case, we don’t specify any variables manually using :code:`_create_variables`.
This is because all variables in the network reside in the linear layers.
These variables are all detected automatically.
.. code-block:: python
fc = FC()
print(fc.v)
{
linear0: {
b: (<class ivy.array.array.Array> shape=[64]),
w: (<class ivy.array.array.Array> shape=[64, 3])
},
linear1: {
b: ivy.array([0.]),
w: (<class ivy.array.array.Array> shape=[1, 64])
}
}
Not only are variables detected automatically for :class:`ivy.Module` instances which are direct attributes of the top-level class, as above, but also if they are contained within any nested structure which is itself an attribute of the top-level class, such as lists, tuples or dicts.
These all work up to an arbitrary nested depth.
Check out some of the different ways of defining network layers and how this impacts the variable structure below.
As a list:
.. code-block:: python
class FC(ivy.Module):
def __init__(self):
self.linear = [Linear(3, 64), Linear(64, 1)]
ivy.Module.__init__(self)
def _forward(self, x):
x = ivy.relu(self.linear[0](x))
return ivy.sigmoid(self.linear[1](x))
fc = FC()
print(fc.v)
{
linear: {
v0: {
b: (<class ivy.array.array.Array> shape=[64]),
w: (<class ivy.array.array.Array> shape=[64, 3])
},
v1: {
b: ivy.array([0.]),
w: (<class ivy.array.array.Array> shape=[1, 64])
}
}
}
As a tuple:
.. code-block:: python
class FC(ivy.Module):
def __init__(self):
self.linear = (Linear(3, 64), Linear(64, 1))
ivy.Module.__init__(self)
def _forward(self, x):
x = ivy.relu(self.linear[0](x))
return ivy.sigmoid(self.linear[1](x))
fc = FC()
print(fc.v)
{
linear: {
v0: {
b: (<class ivy.array.array.Array> shape=[64]),
w: (<class ivy.array.array.Array> shape=[64, 3])
},
v1: {
b: ivy.array([0.]),
w: (<class ivy.array.array.Array> shape=[1, 64])
}
}
}
As a dict:
.. code-block:: python
class FC(ivy.Module):
def __init__(self):
self.linear = {'key0': Linear(3, 64),
'key1': Linear(64, 1)}
ivy.Module.__init__(self)
def _forward(self, x):
x = ivy.relu(self.linear['key0'](x))
return ivy.sigmoid(self.linear['key1'](x))
fc = FC()
print(fc.v)
{
linear: {
key0: {
b: (<class ivy.array.array.Array> shape=[64]),
w: (<class ivy.array.array.Array> shape=[64, 3])
},
key1: {
b: ivy.array([0.]),
w: (<class ivy.array.array.Array> shape=[1, 64])
}
}
}
As a nested list:
.. code-block:: python
class FC(ivy.Module):
def __init__(self):
self.linear = [[Linear(3, 64), Linear(64, 64)],
Linear(64, 1)]
ivy.Module.__init__(self)
def _forward(self, x):
for linear in self.linear[0]:
x = ivy.relu(linear(x))
return ivy.sigmoid(self.linear[1](x))
fc = FC()
print(fc.v)
{
linear: {
v0: {
v0: {
b: (<class ivy.array.array.Array> shape=[64]),
w: (<class ivy.array.array.Array> shape=[64, 3])
},
v1: {
b: (<class ivy.array.array.Array> shape=[64]),
w: (<class ivy.array.array.Array> shape=[64, 64])
}
},
v1: {
b: ivy.array([0.]),
w: (<class ivy.array.array.Array> shape=[1, 64])
}
}
}
Duplicates are also handled correctly, if for example a layer is stored both as a direct attribute and also within a list:
.. code-block:: python
class FC(ivy.Module):
def __init__(self):
self.linear0 = Linear(3, 64)
self.linear1 = Linear(64, 64)
self.linear3 = Linear(64, 1)
self.linear = [self.linear0,
self.linear1,
Linear(64, 64)]
ivy.Module.__init__(self)
def _forward(self, x):
x = ivy.relu(self.linear[0](x))
x = ivy.relu(self.linear[1](x))
x = ivy.relu(self.linear[2](x))
return ivy.sigmoid(self.linear3(x))
fc = FC()
print(fc.v)
{
linear: {
v0: {
b: (<class ivy.array.array.Array> shape=[64]),
w: (<class ivy.array.array.Array> shape=[64, 3])
},
v1: {
b: (<class ivy.array.array.Array> shape=[64]),
w: (<class ivy.array.array.Array> shape=[64, 64])
},
v2: {
b: (<class ivy.array.array.Array> shape=[64]),
w: (<class ivy.array.array.Array> shape=[64, 64])
}
},
linear3: {
b: ivy.array([0.]),
w: (<class ivy.array.array.Array> shape=[1, 64])
}
}
While the examples above all use the functional API for calling the ReLU and Sigmoid activation functions, we can also call these using the stateful API like so:
.. code-block:: python
class FC(ivy.Module):
def __init__(self):
self.linear0 = Linear(3, 64)
self.linear1 = Linear(64, 1)
self.relu = ivy.ReLU()
self.sigmoid = ivy.Sigmoid()
ivy.Module.__init__(self)
def _forward(self, x):
x = self.relu(self.linear0(x))
return self.sigmoid(self.linear1(x))
It may seem counter intuitive to implement the activation as an :class:`ivy.Module`, as there are no hidden trainable weights.
However, for networks where modules are directly chained together, and all outputs from the preceding module are fed as inputs to the subsequent module, then we can use the :class:`ivy.Sequential` class.
This can simplify the construction of our small fully connected network even further.
.. code-block:: python
fc = ivy.Sequential(
Linear(3, 64),
ivy.ReLU(),
Linear(64, 1),
ivy.Sigmoid())
print(fc.v)
{
submodules: {
v0: {
b: (<class ivy.array.array.Array> shape=[64]),
w: (<class ivy.array.array.Array> shape=[64, 3])
},
v2: {
b: ivy.array([0.]),
w: (<class ivy.array.array.Array> shape=[1, 64])
}
}
}
Given that the weights of our network are stored in an :class:`ivy.Container`, and the gradients returned from :func:`ivy.execute_with_gradients` are also stored in an :class:`ivy.Container`, all operations are applied recursively to every variable at all leaves.
Therefore, we can train the network in a few lines of code like so:
.. code-block:: python
x_in = ivy.array([1., 2., 3.])
target = ivy.array([0.])
lr = 0.001
def loss_fn(v):
out = model(x_in, v=v)
return ivy.reduce_mean((out - target)**2)[0]
for step in range(100):
loss, grads = ivy.execute_with_gradients(
loss_fn, model.v)
model.v = model.v - grads * lr
Initializers
------------
In the examples above, we defined how the trainable weights should be initialized directly in the :code:`_create_variables` method.
However, it would be better if we could decouple the initialization scheme from the layer implementation.
This is where the :class:`ivy.Initializer` class comes in.
The actual implementation for the :class:`ivy.Linear` layer exposed in the Ivy stateful API is as follows:
.. code-block:: python
# ivy/stateful/layers.py
class Linear(ivy.Module):
def __init__(self, input_channels, output_channels,
weight_initializer=GlorotUniform(),
bias_initializer=Zeros(), with_bias=True,
dev=None, v=None):
self._input_channels = input_channels
self._output_channels = output_channels
self._w_shape = (output_channels, input_channels)
self._b_shape = (output_channels,)
self._w_init = weight_initializer
self._b_init = bias_initializer
self._with_bias = with_bias
ivy.Module.__init__(self, dev, v)
def _create_variables(self, dev):
v = {'w': self._w_init.create_variables(
self._w_shape, dev, self._output_channels,
self._input_channels)}
if self._with_bias:
v = dict(**v, b=self._b_init.create_variables(
self._b_shape, dev, self._output_channels))
return v
def _forward(self, inputs):
return ivy.linear(
inputs, self.v.w,
self.v.b if self._with_bias else None)
The :class:`ivy.Initializer` class has a single abstract method, :code:`create_variables(var_shape, dev, fan_out=None, fan_in=None, *args, **kwargs)`.
Check out the `code <https://github.com/unifyai/ivy/blob/main/ivy/stateful/initializers.py>`_ or :ref:`docs <overview/design/ivy_as_a_framework/ivy_stateful_api:Initializers>` for more details.
The default initializer for the weights is :class:`ivy.GlorotUniform` and for this bias is :class:`ivy.Zeros`.
Let’s take a quick look at what these look like.
:class:`ivy.GlorotUniform` derives from a more general :class:`ivy.Uniform` initializer class, and is then simply implemented as follows:
.. code-block:: python
# ivy/stateful/initializers.py
class GlorotUniform(ivy.Uniform):
def __init__(self):
super().__init__(
numerator=6, fan_mode='fan_sum', power=0.5, gain=1)
:class:`ivy.Zeros` derives from a more general :class:`ivy.Constant` initializer class, and is then simply implemented as follows:
.. code-block:: python
# ivy/stateful/initializers.py
class Zeros(ivy.Constant):
def __init__(self):
super().__init__(constant=0.)
The initializers are not stateful, and so adding them to the “stateful API” is a slight misnomer.
However, the dedicated initializer class helps us to decouple initialization schemes from layer implementations, which are themselves stateful.
Given that their application is entirely specific to stateful :class:`ivy.Module` instances, they still belong in the stateful API.
Optimizers
----------
Recapping the example given above, we saw that :class:`ivy.Module` instances can be trained like so:
.. code-block:: python
x_in = ivy.array([1., 2., 3.])
target = ivy.array([0.])
lr = 0.001
def loss_fn(v):
out = model(x_in, v=v)
return ivy.reduce_mean((out - target)**2)[0]
for step in range(100):
loss, grads = ivy.execute_with_gradients(
loss_fn, model.v)
model.v = model.v - grads * lr
However, what if we want to do something more complex than vanilla gradient descent? What about ADAM or other stateful optimizers such as LARS and LAMB? This is where the :class:`ivy.Optimizer` class comes in.
Let’s take the class :class:`ivy.Adam` as an example.
The implementation is as follows:
.. code-block:: python
# ivy/stateful/optimizers.py
class Adam(ivy.Optimizer):
def __init__(self, lr=1e-4, beta1=0.9, beta2=0.999,
epsilon=1e-07, inplace=None,
stop_gradients=True, trace_on_next_step=False,
dev=None):
ivy.Optimizer.__init__(
self, lr, inplace, stop_gradients, True,
trace_on_next_step, dev)
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._mw = None
self._vw = None
self._first_pass = True
self._should_trace = False
# Custom Step
def _step(self, v, grads):
if self._first_pass:
self._mw = grads
self._vw = grads ** 2
self._first_pass = False
new_v, self._mw, self._vw = ivy.adam_update(
v, grads, self._lr if isinstance(self._lr, float)
else self._lr(), self._mw, self._vw, self._count,
self._beta1, self._beta2, self._epsilon, self._inplace,
self._stop_gradients)
return new_v
def set_state(self, state):
self._mw = state.mw
self._vw = state.vw
@property
def state(self):
return ivy.Container({'mw': self._mw, 'vw': self._vw})
By changing only a couple of lines, we can use this optimizer to train our network like so:
.. code-block:: python
x_in = ivy.array([1., 2., 3.])
target = ivy.array([0.])
optimizer = ivy.Adam(0.001)
def loss_fn(v):
out = model(x_in, v=v)
return ivy.reduce_mean((out - target)**2)[0]
for step in range(100):
loss, grads = ivy.execute_with_gradients(
loss_fn, model.v)
model.v = optimizer.step(model.v, grads)
**Round Up**
That should hopefully be enough to get you started with Ivy’s stateful API 😊
Please reach out on `discord <https://discord.gg/sXyFF8tDtm>`_ if you have any questions!
| ivy/docs/overview/design/ivy_as_a_framework/ivy_stateful_api.rst/0 | {
"file_path": "ivy/docs/overview/design/ivy_as_a_framework/ivy_stateful_api.rst",
"repo_id": "ivy",
"token_count": 7835
} | 4 |
.. _`RWorks Compiler Infrastructure`:
Compiler Infrastructure
=======================
.. _`LLVM`: https://llvm.org/
.. _`Multi Level Intermediate Representation (MLIR)`: https://mlir.llvm.org/
.. _`MLIR`: https://mlir.llvm.org/
.. _`Onnx-mlir`: https://github.com/onnx/onnx-mlir
.. _`ONNX`: https://onnx.ai/
.. _`OneAPI`: https://www.oneapi.io/
.. _`Intel`: https://www.intel.com/
.. _`OneDNN`: https://github.com/oneapi-src/oneDNN
.. _`discord`: https://discord.gg/sXyFF8tDtm
Compiler infrastructure generally provides carefully thought through frameworks and principles to simplify the lives of compiler designers, maximizing the reusability of tools and interoperability when deploying to various different hardware targets.
This infrastructure doesn’t provide “full” solutions for compiling to hardware, but instead provides the general scaffolding to make the design of such compilers as principled and interoperable as possible, with maximal code sharing and interoperability being at the heart of their design.
LLVM
----
`LLVM`_ is a set of compiler and toolchain technologies that can be used to develop a front end for any programming language and a back end for any instruction set architecture.
LLVM is designed around a language-independent intermediate representation (IR) that serves as a portable, high-level assembly language that can be optimized with a variety of transformations over multiple passes.
It is designed for compile-time, link-time, run-time, and "idle-time" optimization.
It can provide the middle layers of a complete compiler system, taking intermediate representation (IR) code from a compiler and emitting an optimized IR.
This new IR can then be converted and linked into machine-dependent assembly language code for a target platform.
It can also accept the IR from the GNU Compiler Collection (GCC) toolchain, allowing it to be used with a wide array of existing compiler front-ends written for that project.
MLIR
----
The `Multi Level Intermediate Representation (MLIR)`_ is an important piece of compiler infrastructure designed to represent multiple levels of abstraction, with abstractions and domain-specific IR constructs being easy to add, and with location being a first-class construct.
It is part of the broader `LLVM`_ project.
It aims to address software fragmentation, improve compilation for heterogeneous hardware, significantly reduce the cost of building domain specific compilers, and aid in connecting existing compilers together.
Compared to other parts of the overall ML stack, MLIR is designed to operate at a lower level than the neural network exchange formats.
For example, the `Onnx-mlir`_ compiler uses the MLIR compiler infrastructure to implement a compiler which enables `ONNX`_ defined models to be compiled into native code.
OneAPI
------
`OneAPI`_ is an open standard for a unified Application Programming Interface (API) intended to be used across different compute accelerator (coprocessor) architectures, including GPUs, AI accelerators, and field-programmable gate arrays, although at present the main user is `Intel`_, with them being the authors of the standard.
The set of APIs spans several domains that benefit from acceleration, including libraries for linear algebra math, deep learning, machine learning, video processing, and others.
`OneDNN`_ is particularly relevant, focusing on neural network functions for deep learning training and inference.
Intel CPUs and GPUs have accelerators for Deep Learning software, and OneDNN provides a unified interface to utilize these accelerators, with much of the hardware-specific complexity abstracted away.
In a similar manner to `MLIR`_, OneAPI is also designed to operate at a lower level than the Neural Network :ref:`overview/related_work/what_does_ivy_add:Exchange Formats`.
The interface is lower level and more primitive than the neural network exchange formats, with a focus on the core low-level operations such as convolutions, matrix multiplications, batch normalization etc.
This makes OneDNN very much complementary to these formats, where OneDNN can sit below the exchange formats in the overall stack, enabling accelerators to be fully leveraged with minimal hardware-specific considerations, with this all helpfully being abstracted by the OneDNN API.
Indeed, OneAPI and MLIR can work together in tandem, and OneDNN is working to `integrate Tensor Possessing Primitives in the MLIR compilers used underneath TensorFlow <https://www.oneapi.io/blog/tensorflow-and-onednn-in-partnership/>`_.
| ivy/docs/overview/related_work/compiler_infrastructure.rst/0 | {
"file_path": "ivy/docs/overview/related_work/compiler_infrastructure.rst",
"repo_id": "ivy",
"token_count": 1067
} | 5 |
# global
import abc
class _ArrayWithData_typeExperimental(abc.ABC):
pass
| ivy/ivy/data_classes/array/experimental/data_type.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/data_type.py",
"repo_id": "ivy",
"token_count": 28
} | 6 |
# global
from typing import Optional
import abc
# local
import ivy
class _ArrayWithUtilityExperimental(abc.ABC):
def optional_get_element(
self: Optional[ivy.Array] = None,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""If the input is a tensor or sequence type, it returns the input. If
the input is an optional type, it outputs the element in the input. It
is an error if the input is an empty optional-type (i.e. does not have
an element) and the behavior is undefined in this case.
Parameters
----------
self
Input array
out
Optional output array, for writing the result to.
Returns
-------
ret
Input array if it is not None
"""
return ivy.optional_get_element(self._data, out=out)
| ivy/ivy/data_classes/array/experimental/utility.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/utility.py",
"repo_id": "ivy",
"token_count": 369
} | 7 |
"""Base Container Object."""
# global
import colorama
try:
# noinspection PyPackageRequirements
import h5py
except ModuleNotFoundError:
h5py = None
# local
from .wrapping import add_ivy_container_instance_methods # noqa
from .container import ContainerBase, Container # noqa
colorama.init(strip=False)
| ivy/ivy/data_classes/container/__init__.py/0 | {
"file_path": "ivy/ivy/data_classes/container/__init__.py",
"repo_id": "ivy",
"token_count": 103
} | 8 |
# global
from typing import Optional, Union, List, Dict, Callable, Sequence
# local
from ivy.data_classes.container.base import ContainerBase
import ivy
class _ContainerWithGeneralExperimental(ContainerBase):
@staticmethod
def _static_reduce(
operand: Union[ivy.Container, ivy.Array, ivy.NativeArray],
init_value: Union[int, float, ivy.Container],
computation: Union[Callable, ivy.Container],
/,
*,
axes: Union[int, Sequence[int], ivy.Container] = 0,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.reduce. This method
simply wraps the function, and so the docstring for ivy.reduce also
applies to this method with minimal changes.
Parameters
----------
operand
The array to act on.
init_value
The value with which to start the reduction.
computation
The reduction function.
axes
The dimensions along which the reduction is performed.
keepdims
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
The reduced array.
Examples
--------
>>> x = ivy.Container(
>>> a=ivy.array([[1, 2, 3], [4, 5, 6]]),
>>> b=ivy.native_array([[7, 8, 9], [10, 5, 1]])
>>> )
>>> y = ivy.Container.static_reduce(x, 0, ivy.add)
>>> print(y)
{
a: ivy.array([6, 15]),
b: ivy.array([24, 16])
}
"""
return ContainerBase.cont_multi_map_in_function(
"reduce",
operand,
init_value,
computation,
axes=axes,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def reduce(
self: ivy.Container,
init_value: Union[int, float, ivy.Container],
computation: Union[Callable, ivy.Container],
/,
*,
axes: Union[int, Sequence[int], ivy.Container] = 0,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.reduce. This method
simply wraps the function, and so the docstring for ivy.reduce also
applies to this method with minimal changes.
Parameters
----------
self
The array to act on.
init_value
The value with which to start the reduction.
computation
The reduction function.
axes
The dimensions along which the reduction is performed.
keepdims
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
The reduced array.
Examples
--------
>>> x = ivy.Container(
... a=ivy.array([[1, 2, 3], [4, 5, 6]]),
... b=ivy.native_array([[7, 8, 9], [10, 5, 1]]))
>>> y = x.reduce(0, ivy.add)
>>> print(y)
{
a: ivy.array([5, 7, 9]),
b: ivy.array([17, 13, 10])
}
"""
return self._static_reduce(
self,
init_value,
computation,
axes=axes,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
| ivy/ivy/data_classes/container/experimental/general.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/general.py",
"repo_id": "ivy",
"token_count": 2494
} | 9 |
# local
from ivy.data_classes.container.base import ContainerBase
# ToDo: implement all methods here as public instance methods
# noinspection PyMissingConstructor
class _ContainerWithImage(ContainerBase):
pass
| ivy/ivy/data_classes/container/image.py/0 | {
"file_path": "ivy/ivy/data_classes/container/image.py",
"repo_id": "ivy",
"token_count": 57
} | 10 |
# local
from .base import FactorizedTensor
import ivy
# global
from copy import deepcopy
class Parafac2Tensor(FactorizedTensor):
def __init__(self, parafac2_tensor):
super().__init__()
shape, rank = ivy.Parafac2Tensor.validate_parafac2_tensor(parafac2_tensor)
weights, factors, projections = parafac2_tensor
if weights is None:
weights = ivy.ones(rank, dtype=factors[0].dtype)
self.shape = shape
self.rank = rank
self.factors = factors
self.weights = weights
self.projections = projections
# Built-ins #
# ----------#
def __getitem__(self, index):
if index == 0:
return self.weights
elif index == 1:
return self.factors
elif index == 2:
return self.projections
else:
raise IndexError(
f"You tried to access index {index} of a PARAFAC2 tensor.\n"
"You can only access index 0, 1 and 2 of a PARAFAC2 tensor"
"(corresponding respectively to the weights, factors and projections)"
)
def __setitem__(self, index, value):
if index == 0:
self.weights = value
elif index == 1:
self.factors = value
elif index == 2:
self.projections = value
else:
raise IndexError(
f"You tried to set index {index} of a PARAFAC2 tensor.\n"
"You can only set index 0, 1 and 2 of a PARAFAC2 tensor"
"(corresponding respectively to the weights, factors and projections)"
)
def __iter__(self):
yield self.weights
yield self.factors
yield self.projections
def __len__(self):
return 3
def __repr__(self):
message = (
f"(weights, factors, projections) : rank-{self.rank} Parafac2Tensor of"
f" shape {self.shape} "
)
return message
# Public Methods #
# ---------------#
def to_tensor(self):
return ivy.Parafac2Tensor.parafac2_to_tensor(self)
def to_vec(self):
return ivy.Parafac2Tensor.parafac2_to_vec(self)
def to_unfolded(self, mode):
return ivy.Parafac2Tensor.parafac2_to_unfolded(self, mode)
# Properties #
# ---------------#
@property
def n_param(self):
factors_params = self.rank * ivy.sum(self.shape)
if self.weights:
return factors_params + self.rank
else:
return factors_params
@classmethod
def from_CPTensor(cls, cp_tensor, parafac2_tensor_ok=False):
"""Create a Parafac2Tensor from a CPTensor.
Parameters
----------
cp_tensor
CPTensor or Parafac2Tensor
If it is a Parafac2Tensor, then the argument
``parafac2_tensor_ok`` must be True'
parafac2_tensor
Whether or not Parafac2Tensors can be used as input.
Returns
-------
Parafac2Tensor with factor matrices and weights extracted from a CPTensor
"""
if parafac2_tensor_ok and len(cp_tensor) == 3:
return Parafac2Tensor(cp_tensor)
elif len(cp_tensor) == 3:
raise TypeError(
"Input is not a CPTensor. If it is a Parafac2Tensor, then the argument"
" ``parafac2_tensor_ok`` must be True"
)
weights, (A, B, C) = cp_tensor
Q, R = ivy.qr(B)
projections = [Q for _ in range(ivy.shape(A)[0])]
B = R
return Parafac2Tensor((weights, (A, B, C), projections))
# Class Methods #
# ---------------#
@staticmethod
def validate_parafac2_tensor(parafac2_tensor):
"""Validate a parafac2_tensor in the form (weights, factors) Return the
rank and shape of the validated tensor.
Parameters
----------
parafac2_tensor
Parafac2Tensor or (weights, factors)
Returns
-------
(shape, rank)
size of the full tensor and rank of the CP tensor
"""
if isinstance(parafac2_tensor, ivy.Parafac2Tensor):
# it's already been validated at creation
return parafac2_tensor.shape, parafac2_tensor.rank
weights, factors, projections = parafac2_tensor
if len(factors) != 3:
raise ValueError(
"A PARAFAC2 tensor should be composed of exactly three factors."
f"However, {len(factors)} factors was given."
)
if len(projections) != factors[0].shape[0]:
raise ValueError(
"A PARAFAC2 tensor should have one projection matrix for each"
f" horisontal slice. However, {len(projections)} projection matrices"
f" was given and the first mode haslength {factors[0].shape[0]}"
)
rank = int(ivy.shape(factors[0])[1])
shape = []
for i, projection in enumerate(projections):
current_mode_size, current_rank = ivy.shape(projection)
if current_rank != rank:
raise ValueError(
"All the projection matrices of a PARAFAC2 tensor should have the"
f" same number of columns as the rank. However, rank={rank} but"
f" projections[{i}].shape[1]={ivy.shape(projection)[1]}"
)
inner_product = ivy.dot(ivy.permute_dims(projection, (1, 0)), projection)
if (
ivy.max(
ivy.abs(inner_product - ivy.eye(rank, dtype=inner_product[0].dtype))
)
> 1e-5
):
raise ValueError(
"All the projection matrices must be orthonormal, that is, P.T@P"
" = I. "
f"However, projection[{i}].T@projection[{i}] -"
" T.eye(rank)) = "
f"""{ivy.sqrt(ivy.sum(ivy.square(inner_product -
ivy.eye(rank,dtype=inner_product[0].dtype)),
axis=0))}"""
)
# Tuple unpacking to possibly support higher
# order PARAFAC2 tensors in the future
shape.append((current_mode_size, *[f.shape[0] for f in factors[2:]]))
# Skip first factor matrix since the rank is extracted from it.
for i, factor in enumerate(factors[1:]):
current_mode_size, current_rank = ivy.shape(factor)
if current_rank != rank:
raise ValueError(
"All the factors of a PARAFAC2 tensor should have the same number"
f" of columns.However, factors[0].shape[1]={rank} but"
f" factors[{i}].shape[1]={current_rank}."
)
if weights is not None and ivy.shape(weights)[0] != rank:
raise ValueError(
f"Given factors for a rank-{rank} PARAFAC2 tensor but"
f" len(weights)={ivy.shape(weights)[0]}."
)
return tuple(shape), rank
@staticmethod
def parafac2_normalise(parafac2_tensor):
"""Return parafac2_tensor with factors normalised to unit length.
Turns ``factors = [|U_1, ... U_n|]`` into ``[weights; |V_1, ... V_n|]``,
where the columns of each `V_k` are normalized to unit Euclidean length
from the columns of `U_k` with the normalizing constants absorbed into
`weights`. In the special case of a symmetric tensor, `weights` holds the
eigenvalues of the tensor.
Parameters
----------
parafac2_tensor
Parafac2Tensor = (weight, factors, projections)
factors is list of matrices, all with the same number of columns
i.e.::
for u in U:
u[i].shape == (s_i, R)
where `R` is fixed while `s_i` can vary with `i`
Returns
-------
Parafac2Tensor
normalisation_weights, normalised_factors, normalised_projections
"""
# allocate variables for weights, and normalized factors
_, rank = ivy.Parafac2Tensor.validate_parafac2_tensor(parafac2_tensor)
weights, factors, projections = parafac2_tensor
# if (not copy) and (weights is None):
# warnings.warn('Provided copy=False and weights=None: a new Parafac2Tensor'
# 'with new weights and factors normalised inplace will
# be returned.')
# weights = T.ones(rank, **T.context(factors[0]))
# The if test below was added to enable inplace edits
# however, TensorFlow does not support inplace edits
# so this is always set to True
if True:
factors = [deepcopy(f) for f in factors]
projections = [deepcopy(p) for p in projections]
if weights is not None:
factors[0] = factors[0] * weights
weights = ivy.ones(rank, dtype=factors[0].dtype)
for i, factor in enumerate(factors):
scales = ivy.sqrt(ivy.sum(ivy.square(factor), axis=0))
weights = weights * scales
scales_non_zero = ivy.where(
scales == 0, ivy.ones(ivy.shape(scales), dtype=factors[0].dtype), scales
)
factors[i] = factor / scales_non_zero
return Parafac2Tensor((weights, factors, projections))
@staticmethod
def apply_parafac2_projections(parafac2_tensor):
"""Apply the projection matrices to the evolving factor.
Parameters
----------
parafac2_tensor : Parafac2Tensor
Returns
-------
(weights, factors)
A tensor decomposition on the form A [B_i] C such that
the :math:`X_{ijk}` is given by :math:`sum_r A_{ir} [B_i]_{jr} C_{kr}`.
This is also equivalent to a coupled matrix factorisation, where
each matrix, :math:`X_i = C diag([a_{i1}, ..., a_{ir}] B_i)`.
The first element of factors is the A matrix, the second element is
a list of B-matrices and the third element is the C matrix.
"""
ivy.Parafac2Tensor.validate_parafac2_tensor(parafac2_tensor)
weights, factors, projections = parafac2_tensor
evolving_factor = [
ivy.dot(projection, factors[1]) for projection in projections
]
return weights, (factors[0], evolving_factor, factors[2])
@staticmethod
def parafac2_to_slice(parafac2_tensor, slice_idx, validate=True):
"""Generate a single slice along the first mode from the PARAFAC2
tensor.
The decomposition is on the form :math:`(A [B_i] C)` such that the
i-th frontal slice, :math:`X_i`, of :math:`X` is given by
.. math::
X_i = B_i diag(a_i) C^T,
where :math:`diag(a_i)` is the diagonal matrix whose nonzero
entries are equal to the :math:`i`-th row of the :math:`I times R`
factor matrix :math:`A`, :math:`B_i`is a :math:`J_i times R` factor
matrix such that the cross product matrix :math:`B_{i_1}^T B_{i_1}` is
constant for all :math:`i`, and :math:`C` is a :math:`K times R`
factor matrix. To compute this decomposition, we reformulate
the expression for :math:`B_i` such that
.. math::
B_i = P_i B,
where :math:`P_i` is a :math:`J_i times R` orthogonal matrix and :math:`B`
is a :math:`R times R` matrix.
An alternative formulation of the PARAFAC2 decomposition is
that the tensor element :math:`X_{ijk}` is given by
.. math::
X_{ijk} = sum_{r=1}^R A_{ir} B_{ijr} C_{kr},
with the same constraints hold for :math:`B_i` as above.
Parameters
----------
parafac2_tensor
weights
1D array of shape (rank, ) weights of the factors
factors
List of factors of the PARAFAC2 decomposition Contains the
matrices :math:`A`, :math:`B` and :math:`C` described above
projection_matrices
List of projection matrices used to create evolving factors.
Returns
-------
Full tensor of shape [P[slice_idx].shape[1], C.shape[1]], where
P is the projection matrices and C is the last factor matrix of
the Parafac2Tensor.
"""
if validate:
ivy.Parafac2Tensor.validate_parafac2_tensor(parafac2_tensor)
weights, (A, B, C), projections = parafac2_tensor
a = A[slice_idx]
if weights is not None:
a = a * weights
Ct = ivy.permute_dims(C, (1, 0))
B_i = ivy.dot(projections[slice_idx], B)
return ivy.dot(B_i * a, Ct)
@staticmethod
def parafac2_to_slices(parafac2_tensor, validate=True):
"""Generate all slices along the first mode from a PARAFAC2 tensor.
Generates a list of all slices from a PARAFAC2 tensor. A list is returned
since the tensor might have varying size along the second mode. To return
a tensor, see the ``parafac2_to_tensor`` function instead.shape
The decomposition is on the form :math:`(A [B_i] C)` such that
the i-th frontal slice, :math:`X_i`, of :math:`X` is given by
.. math::
X_i = B_i diag(a_i) C^T,
where :math:`diag(a_i)` is the diagonal matrix whose nonzero entries are
equal to the :math:`i`-th row of the :math:`I times R` factor matrix
:math:`A`, :math:`B_i` is a :math:`J_i times R` factor matrix such
that the cross product matrix :math:`B_{i_1}^T B_{i_1}` is constant
for all :math:`i`, and :math:`C` is a :math:`K times R` factor matrix.To
compute this decomposition, we reformulate the expression for :math:`B_i`
such that
.. math::
B_i = P_i B,
where :math:`P_i` is a :math:`J_i times R` orthogonal matrix and :math:`B`
is a :math:`R times R` matrix.
An alternative formulation of the PARAFAC2 decomposition is that the
tensor element :math:`X_{ijk}` is given by
.. math::
X_{ijk} = sum_{r=1}^R A_{ir} B_{ijr} C_{kr},
with the same constraints hold for :math:`B_i` as above.
Parameters
----------
parafac2_tensor : Parafac2Tensor - (weight, factors, projection_matrices)
* weights : 1D array of shape (rank, )
weights of the factors
* factors : List of factors of the PARAFAC2 decomposition
Contains the matrices :math:`A`, :math:`B` and :math:`C` described above
* projection_matrices : List of projection matrices used to create evolving
factors.
Returns
-------
A list of full tensors of shapes [P[i].shape[1], C.shape[1]], where
P is the projection matrices and C is the last factor matrix of the
Parafac2Tensor.
"""
if validate:
ivy.Parafac2Tensor.validate_parafac2_tensor(parafac2_tensor)
weights, (A, B, C), projections = parafac2_tensor
if weights is not None:
A = A * weights
weights = None
decomposition = weights, (A, B, C), projections
I, _ = A.shape # noqa: E741
return [
ivy.Parafac2Tensor.parafac2_to_slice(decomposition, i, validate=False)
for i in range(I)
]
@staticmethod
def parafac2_to_tensor(parafac2_tensor):
"""Construct a full tensor from a PARAFAC2 decomposition.
The decomposition is on the form :math:`(A [B_i] C)` such that the
i-th frontal slice, :math:`X_i`, of :math:`X` is given by
.. math::
X_i = B_i diag(a_i) C^T,
where :math:`diag(a_i)` is the diagonal matrix whose nonzero entries
are equal to the :math:`i`-th row of the :math:`I times R` factor
matrix :math:`A`, :math:`B_i` is a :math:`J_i times R` factor matrix
such that the cross product matrix :math:`B_{i_1}^T B_{i_1}` is
constant for all :math:`i`, and :math:`C` is a :math:`K times R`
factor matrix. To compute this decomposition, we reformulate
the expression for :math:`B_i` such that
.. math::
B_i = P_i B,
where :math:`P_i` is a :math:`J_i times R` orthogonal matrix and :math:`B`
is a :math:`R times R` matrix.
An alternative formulation of the PARAFAC2 decomposition is
that the tensor element :math:`X_{ijk}` is given by
.. math::
X_{ijk} = sum_{r=1}^R A_{ir} B_{ijr} C_{kr},
with the same constraints hold for :math:`B_i` as above.
Parameters
----------
parafac2_tensor : Parafac2Tensor - (weight, factors, projection_matrices)
* weights : 1D array of shape (rank, )
weights of the factors
* factors : List of factors of the PARAFAC2 decomposition
Contains the matrices :math:`A`, :math:`B` and :math:`C` described above
* projection_matrices : List of projection matrices used to create evolving
factors.
Returns
-------
ndarray
Full constructed tensor. Uneven slices are padded with zeros.
"""
_, (A, _, C), projections = parafac2_tensor
slices = ivy.Parafac2Tensor.parafac2_to_slices(parafac2_tensor)
lengths = [projection.shape[0] for projection in projections]
tensor = ivy.zeros(
(A.shape[0], max(lengths), C.shape[0]), dtype=slices[0].dtype
)
for i, (slice_, length) in enumerate(zip(slices, lengths)):
tensor[i, :length] = slice_
return tensor
@staticmethod
def parafac2_to_unfolded(parafac2_tensor, mode):
"""Construct an unfolded tensor from a PARAFAC2 decomposition. Uneven
slices are padded by zeros.
The decomposition is on the form :math:`(A [B_i] C)` such that the
i-th frontal slice, :math:`X_i`, of :math:`X` is given by
.. math::
X_i = B_i diag(a_i) C^T,
where :math:`diag(a_i)` is the diagonal matrix whose nonzero entries
are equal to the :math:`i`-th row of the :math:`I times R` factor
matrix :math:`A`, :math:`B_i` is a :math:`J_i times R` factor
matrix such that the cross product matrix :math:`B_{i_1}^T B_{i_1}`
is constant for all :math:`i`, and :math:`C` is a :math:`K times R`
factor matrix. To compute this decomposition, we reformulate the
expression for :math:`B_i` such that
.. math::
B_i = P_i B,
where :math:`P_i` is a :math:`J_i times R` orthogonal matrix and :math:`B` is a
:math:`R times R` matrix.
An alternative formulation of the PARAFAC2 decomposition is that the
tensor element :math:`X_{ijk}` is given by
.. math::
X_{ijk} = sum_{r=1}^R A_{ir} B_{ijr} C_{kr},
with the same constraints hold for :math:`B_i` as above.
Parameters
----------
parafac2_tensor : Parafac2Tensor - (weight, factors, projection_matrices)
weights
weights of the factors
factors
Contains the matrices :math:`A`, :math:`B` and :math:`C` described above
projection_matrices
factors
Returns
-------
Full constructed tensor. Uneven slices are padded with zeros.
"""
return ivy.unfold(ivy.Parafac2Tensor.parafac2_to_tensor(parafac2_tensor), mode)
@staticmethod
def parafac2_to_vec(parafac2_tensor):
"""Construct a vectorized tensor from a PARAFAC2 decomposition. Uneven
slices are padded by zeros.
The decomposition is on the form :math:`(A [B_i] C)` such that
the i-th frontal slice, :math:`X_i`, of :math:`X` is given by
.. math::
X_i = B_i diag(a_i) C^T,
where :math:`diag(a_i)` is the diagonal matrix whose nonzero
entries are equal to the :math:`i`-th row of the :math:`I
times R` factor matrix :math:`A`, :math:`B_i` is a :math:`J_i
times R` factor matrix such that the cross product matrix :math:
`B_{i_1}^T B_{i_1}`is constant for all :math:`i`, and :math:`C`
is a :math:`K times R` factor matrix. To compute this
decomposition, we reformulate the expression for :math:`B_i`
such that
.. math::
B_i = P_i B,
where :math:`P_i` is a :math:`J_i times R` orthogonal matrix and :math:`B` is a
:math:`R times R` matrix.
An alternative formulation of the PARAFAC2 decomposition is that
the tensor element :math:`X_{ijk}` is given by
.. math::
X_{ijk} = sum_{r=1}^R A_{ir} B_{ijr} C_{kr},
with the same constraints hold for :math:`B_i` as above.
Parameters
----------
parafac2_tensor : Parafac2Tensor - (weight, factors, projection_matrices)
* weights
1D array of shape (rank, ) weights of the factors
* factors
List of factors of the PARAFAC2 decomposition Contains the matrices
:math:`A, :math:`B` and :math:`C` described above
* projection_matrices
List of projection matrices used to create evolving factors.
Returns
-------
Full constructed tensor. Uneven slices are padded with zeros.6
"""
return ivy.reshape(ivy.Parafac2Tensor.parafac2_to_tensor(parafac2_tensor), (-1))
| ivy/ivy/data_classes/factorized_tensor/parafac2_tensor.py/0 | {
"file_path": "ivy/ivy/data_classes/factorized_tensor/parafac2_tensor.py",
"repo_id": "ivy",
"token_count": 10249
} | 11 |
mod literal;
mod pjrt_buffer;
mod pjrt_client;
mod pjrt_device;
mod pjrt_loaded_executable;
mod shape;
mod xla_builder;
mod xla_op;
use crate::c_lib;
use crate::error::{Error, Result};
use num_derive::FromPrimitive;
use num_traits::FromPrimitive;
pub use literal::Literal;
pub use pjrt_buffer::PjRtBuffer;
pub use pjrt_client::PjRtClient;
pub use pjrt_device::PjRtDevice;
pub use pjrt_loaded_executable::PjRtLoadedExecutable;
pub use shape::{ArrayShape, Shape};
pub use xla_builder::XlaBuilder;
pub use xla_op::XlaOp;
use pyo3::prelude::*;
pub(self) unsafe fn c_ptr_to_string(ptr: *const std::ffi::c_char) -> String {
let str = std::ffi::CStr::from_ptr(ptr).to_string_lossy().into_owned();
libc::free(ptr as *mut libc::c_void);
str
}
/// The primitive types supported by XLA. `S8` is a signed 1 byte integer,
/// `U32` is an unsigned 4 bytes integer, etc.
#[derive(Clone, Copy, PartialEq, Eq, Debug, FromPrimitive)]
#[pyclass(unsendable)]
pub enum PrimitiveType {
Invalid = 0,
Pred = 1,
S8 = 2,
S16 = 3,
S32 = 4,
S64 = 5,
U8 = 6,
U16 = 7,
U32 = 8,
U64 = 9,
F16 = 10,
F32 = 11,
Bf16 = 16,
F64 = 12,
C64 = 15,
C128 = 18,
Tuple = 13,
OpaqueType = 14,
Token = 17,
}
impl PrimitiveType {
pub fn element_type(self) -> Result<ElementType> {
match self {
Self::Pred => Ok(ElementType::Pred),
Self::S8 => Ok(ElementType::S8),
Self::S16 => Ok(ElementType::S16),
Self::S32 => Ok(ElementType::S32),
Self::S64 => Ok(ElementType::S64),
Self::U8 => Ok(ElementType::U8),
Self::U16 => Ok(ElementType::U16),
Self::U32 => Ok(ElementType::U32),
Self::U64 => Ok(ElementType::U64),
Self::F16 => Ok(ElementType::F16),
Self::F32 => Ok(ElementType::F32),
Self::Bf16 => Ok(ElementType::Bf16),
Self::F64 => Ok(ElementType::F64),
Self::C64 => Ok(ElementType::C64),
Self::C128 => Ok(ElementType::C128),
Self::Invalid | Self::Tuple | Self::OpaqueType | Self::Token => {
Err(Error::NotAnElementType { got: self })
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[pyclass(unsendable)]
pub enum ElementType {
Pred,
S8,
S16,
S32,
S64,
U8,
U16,
U32,
U64,
F16,
F32,
Bf16,
F64,
C64,
C128,
}
impl ElementType {
/// The size for this element type in bytes.
pub fn element_size_in_bytes(&self) -> usize {
match self {
Self::Pred => 1,
Self::S8 => 1,
Self::S16 => 2,
Self::S32 => 4,
Self::S64 => 8,
Self::U8 => 1,
Self::U16 => 2,
Self::U32 => 4,
Self::U64 => 8,
Self::F16 => 2,
Self::F32 => 4,
Self::Bf16 => 2,
Self::F64 => 8,
Self::C64 => 8,
Self::C128 => 16,
}
}
pub fn primitive_type(&self) -> PrimitiveType {
match self {
Self::Pred => PrimitiveType::Pred,
Self::S8 => PrimitiveType::S8,
Self::S16 => PrimitiveType::S16,
Self::S32 => PrimitiveType::S32,
Self::S64 => PrimitiveType::S64,
Self::U8 => PrimitiveType::U8,
Self::U16 => PrimitiveType::U16,
Self::U32 => PrimitiveType::U32,
Self::U64 => PrimitiveType::U64,
Self::F16 => PrimitiveType::F16,
Self::F32 => PrimitiveType::F32,
Self::Bf16 => PrimitiveType::Bf16,
Self::F64 => PrimitiveType::F64,
Self::C64 => PrimitiveType::C64,
Self::C128 => PrimitiveType::C128,
}
}
}
pub trait ArrayElement: Copy {
const TY: ElementType;
const ELEMENT_SIZE_IN_BYTES: usize;
const ZERO: Self;
}
#[allow(clippy::missing_safety_doc)]
/// A type implementing the `NativeType` trait can be directly converted to constant ops or
/// literals.
pub trait NativeType: Copy {
unsafe fn constant_r0(b: c_lib::xla_builder, v: Self) -> c_lib::xla_op;
unsafe fn constant_r1(b: c_lib::xla_builder, v: *const Self, l: usize) -> c_lib::xla_op;
unsafe fn constant_r1c(b: c_lib::xla_builder, v: Self, l: usize) -> c_lib::xla_op;
unsafe fn create_r0(v: Self) -> c_lib::literal;
unsafe fn create_r1(v: *const Self, l: usize) -> c_lib::literal;
unsafe fn literal_get_first_element(l: c_lib::literal) -> Self;
}
macro_rules! native_type {
($ty:ty, $cst0:ident, $cst1:ident, $cst1c:ident, $cre0:ident, $cre1:ident, $gf:ident) => {
impl NativeType for $ty {
unsafe fn constant_r0(b: c_lib::xla_builder, v: Self) -> c_lib::xla_op {
c_lib::$cst0(b, v)
}
unsafe fn constant_r1(
b: c_lib::xla_builder,
v: *const Self,
l: usize,
) -> c_lib::xla_op {
c_lib::$cst1(b, v, l)
}
unsafe fn constant_r1c(b: c_lib::xla_builder, v: Self, l: usize) -> c_lib::xla_op {
c_lib::$cst1c(b, v, l)
}
unsafe fn create_r0(v: Self) -> c_lib::literal {
c_lib::$cre0(v)
}
unsafe fn create_r1(v: *const Self, l: usize) -> c_lib::literal {
c_lib::$cre1(v, l)
}
unsafe fn literal_get_first_element(l: c_lib::literal) -> Self {
c_lib::$gf(l)
}
}
};
}
native_type!(
bool,
constant_r0_bool,
constant_r1_bool,
constant_r1c_bool,
create_r0_bool,
create_r1_bool,
literal_get_first_element_bool
);
native_type!(
i8,
constant_r0_int8_t,
constant_r1_int8_t,
constant_r1c_int8_t,
create_r0_int8_t,
create_r1_int8_t,
literal_get_first_element_int8_t
);
native_type!(
i16,
constant_r0_int16_t,
constant_r1_int16_t,
constant_r1c_int16_t,
create_r0_int16_t,
create_r1_int16_t,
literal_get_first_element_int16_t
);
native_type!(
i32,
constant_r0_int32_t,
constant_r1_int32_t,
constant_r1c_int32_t,
create_r0_int32_t,
create_r1_int32_t,
literal_get_first_element_int32_t
);
native_type!(
i64,
constant_r0_int64_t,
constant_r1_int64_t,
constant_r1c_int64_t,
create_r0_int64_t,
create_r1_int64_t,
literal_get_first_element_int64_t
);
native_type!(
u8,
constant_r0_uint8_t,
constant_r1_uint8_t,
constant_r1c_uint8_t,
create_r0_uint8_t,
create_r1_uint8_t,
literal_get_first_element_uint8_t
);
native_type!(
u16,
constant_r0_uint16_t,
constant_r1_uint16_t,
constant_r1c_uint16_t,
create_r0_uint16_t,
create_r1_uint16_t,
literal_get_first_element_uint16_t
);
native_type!(
u32,
constant_r0_uint32_t,
constant_r1_uint32_t,
constant_r1c_uint32_t,
create_r0_uint32_t,
create_r1_uint32_t,
literal_get_first_element_uint32_t
);
native_type!(
u64,
constant_r0_uint64_t,
constant_r1_uint64_t,
constant_r1c_uint64_t,
create_r0_uint64_t,
create_r1_uint64_t,
literal_get_first_element_uint64_t
);
native_type!(
f32,
constant_r0_float,
constant_r1_float,
constant_r1c_float,
create_r0_float,
create_r1_float,
literal_get_first_element_float
);
native_type!(
f64,
constant_r0_double,
constant_r1_double,
constant_r1c_double,
create_r0_double,
create_r1_double,
literal_get_first_element_double
);
macro_rules! element_type {
($ty:ty, $v:ident, $sz:tt, $zero:expr) => {
impl ArrayElement for $ty {
const TY: ElementType = ElementType::$v;
const ELEMENT_SIZE_IN_BYTES: usize = $sz;
const ZERO: Self = $zero;
}
};
}
// Dummy F16 type.
#[derive(Copy, Clone, Debug)]
pub struct F16;
impl ArrayElement for F16 {
const TY: ElementType = ElementType::F16;
const ELEMENT_SIZE_IN_BYTES: usize = 2;
const ZERO: Self = Self;
}
// Dummy BF16 type.
#[derive(Copy, Clone, Debug)]
pub struct Bf16;
impl ArrayElement for Bf16 {
const TY: ElementType = ElementType::Bf16;
const ELEMENT_SIZE_IN_BYTES: usize = 2;
const ZERO: Self = Self;
}
element_type!(bool, Pred, 1, false);
element_type!(u8, U8, 1, 0);
element_type!(u16, U16, 2, 0);
element_type!(u32, U32, 4, 0);
element_type!(u64, U64, 8, 0);
element_type!(i8, S8, 1, 0);
element_type!(i16, S16, 2, 0);
element_type!(i32, S32, 4, 0);
element_type!(i64, S64, 8, 0);
element_type!(f32, F32, 4, 0.0f32);
element_type!(f64, F64, 8, 0.0f64);
/// A computation is built from a root [`XlaOp`]. Computations are device independent and can be
/// specialized to a given device through a compilation step.
#[derive(Clone)]
#[pyclass(unsendable)]
pub struct XlaComputation(c_lib::xla_computation);
pub(self) fn handle_status(status: c_lib::status) -> Result<()> {
if status.is_null() {
Ok(())
} else {
let msg = unsafe {
let error_message_ptr = c_lib::status_error_message(status);
let error_message = c_ptr_to_string(error_message_ptr);
c_lib::status_free(status);
error_message
};
let backtrace = std::backtrace::Backtrace::capture().to_string();
Err(Error::XlaError { msg, backtrace })
}
}
impl XlaComputation {
pub fn from_proto(proto: &HloModuleProto) -> Self {
let ptr = unsafe { c_lib::xla_computation_from_hlo_module_proto(proto.0) };
Self(ptr)
}
/// The computation name.
pub fn name(&self) -> String {
unsafe {
let ptr = c_lib::xla_computation_name(self.0);
c_ptr_to_string(ptr)
}
}
/// Compile this computation for the specified client.
pub fn compile(&self, client: &PjRtClient) -> Result<PjRtLoadedExecutable> {
client.compile(self)
}
/// Get the HloModuleProto for the computation.
pub fn proto(&self) -> HloModuleProto {
let ptr = unsafe { c_lib::xla_computation_proto(self.0) };
HloModuleProto(ptr)
}
}
impl Drop for XlaComputation {
fn drop(&mut self) {
unsafe { c_lib::xla_computation_free(self.0) }
}
}
#[pyclass(unsendable)]
pub struct HloModuleProto(c_lib::hlo_module_proto);
impl HloModuleProto {
/// Read a HLO module from a text file.
pub fn from_text_file<P: AsRef<std::path::Path>>(path: P) -> Result<Self> {
use std::io::Read;
let mut file = std::fs::File::open(path.as_ref())?;
let mut content = Vec::new();
file.read_to_end(&mut content)?;
Self::parse_and_return_unverified_module(&content)
}
/// Read a HLO module from a proto file, either in binary or pbtxt format.
pub fn from_proto_file<P: AsRef<std::path::Path>>(path: P, binary: bool) -> Result<Self> {
use std::io::Read;
let mut file = std::fs::File::open(path.as_ref())?;
let mut content = Vec::new();
file.read_to_end(&mut content)?;
Self::parse_proto(&content, binary)
}
pub fn parse_and_return_unverified_module(data: &[u8]) -> Result<Self> {
let mut ptr: c_lib::hlo_module_proto = std::ptr::null_mut();
let status = unsafe {
c_lib::hlo_module_proto_parse_and_return_unverified_module(
data.as_ptr() as *const libc::c_char,
data.len(),
&mut ptr,
)
};
handle_status(status)?;
Ok(Self(ptr))
}
pub fn parse_proto(data: &[u8], binary: bool) -> Result<Self> {
let mut ptr: c_lib::hlo_module_proto = std::ptr::null_mut();
let status = unsafe {
c_lib::hlo_module_proto_parse_proto(
data.as_ptr() as *const libc::c_char,
data.len(),
binary,
&mut ptr,
)
};
handle_status(status)?;
Ok(Self(ptr))
}
}
impl Drop for HloModuleProto {
fn drop(&mut self) {
unsafe { c_lib::hlo_module_proto_free(self.0) }
}
}
#[pyclass(unsendable)]
pub struct HloModule(c_lib::hlo_module);
impl HloModule {
pub fn from_proto(proto: &HloModuleProto) -> Result<Self> {
let mut ptr = std::ptr::null_mut();
let status = unsafe {
c_lib::hlo_module_from_proto(
proto.0,
&mut ptr,
)};
handle_status(status)?;
Ok(Self(ptr))
}
pub fn to_string(&self) -> Result<String> {
let str_ptr = unsafe {
c_lib::hlo_module_to_string(self.0)
};
let module_str = unsafe {
let c_str = std::ffi::CStr::from_ptr(str_ptr);
let result = c_str.to_str()?.to_string();
libc::free(str_ptr as *mut _);
Ok(result)
};
module_str
}
pub fn get_entry_computation(&self) -> Result<HloComputation> {
let entry_comp = unsafe {
c_lib::hlo_module_entry_computation(self.0)
};
Ok(HloComputation(entry_comp))
}
pub fn computation_count(&self) -> Result<i64> {
let comp_count = unsafe {
c_lib::hlo_module_computation_count(self.0)
};
Ok(comp_count)
}
pub fn instruction_count(&self) -> Result<i64> {
let instruct_count = unsafe {
c_lib::hlo_module_instruction_count(self.0)
};
Ok(instruct_count)
}
}
#[pyclass(unsendable)]
pub struct HloComputation(c_lib::hlo_computation);
| ivy/ivy/engines/XLA/rust_api/src/wrappers/mod.rs/0 | {
"file_path": "ivy/ivy/engines/XLA/rust_api/src/wrappers/mod.rs",
"repo_id": "ivy",
"token_count": 6864
} | 12 |
"""Collection of Jax activation functions, wrapped to fit Ivy syntax and
signature."""
# global
import jax
import jax.numpy as jnp
from typing import Optional, Union, Literal
# local
from ivy.functional.backends.jax import JaxArray
def gelu(
x: JaxArray,
/,
*,
approximate: bool = False,
complex_mode="jax",
out: Optional[JaxArray] = None,
) -> JaxArray:
return jax.nn.gelu(x, approximate)
def leaky_relu(
x: JaxArray,
/,
*,
alpha: float = 0.2,
complex_mode="jax",
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.asarray(jnp.where(x > 0, x, jnp.multiply(x, alpha)), x.dtype)
def relu(
x: JaxArray, /, *, complex_mode="jax", out: Optional[JaxArray] = None
) -> JaxArray:
return jax.nn.relu(x)
def sigmoid(
x: JaxArray, /, *, complex_mode="jax", out: Optional[JaxArray] = None
) -> JaxArray:
return 1 / (1 + jnp.exp(-x))
def softmax(
x: JaxArray, /, *, axis: Optional[int] = None, out: Optional[JaxArray] = None
) -> JaxArray:
if axis is None:
axis = -1
return jax.nn.softmax(x, axis)
def softplus(
x: JaxArray,
/,
*,
beta: Optional[Union[int, float]] = None,
threshold: Optional[Union[int, float]] = None,
complex_mode="jax",
out: Optional[JaxArray] = None,
) -> JaxArray:
if beta is not None and beta != 1:
x_beta = x * beta
res = (
jnp.add(
jnp.log1p(jnp.exp(-jnp.abs(x_beta))),
jnp.maximum(x_beta, 0).astype(x.dtype),
)
) / beta
else:
x_beta = x
res = jnp.add(
jnp.log1p(jnp.exp(-jnp.abs(x_beta))),
jnp.maximum(x_beta, 0).astype(x.dtype),
)
if threshold is not None:
return jnp.where(x_beta > threshold, x, res).astype(x.dtype)
return res.astype(x.dtype)
# Softsign
def softsign(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:
return jax.nn.soft_sign(x)
def log_softmax(
x: JaxArray,
/,
*,
axis: Optional[int] = -1,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[JaxArray] = None,
):
return jax.nn.log_softmax(x, axis)
def mish(
x: JaxArray,
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[JaxArray] = None,
) -> JaxArray:
return x * jnp.tanh(jax.nn.softplus(x))
def hardswish(
x: JaxArray,
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[JaxArray] = None,
) -> JaxArray:
return jax.nn.hard_swish(x)
| ivy/ivy/functional/backends/jax/activations.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/activations.py",
"repo_id": "ivy",
"token_count": 1243
} | 13 |
import math
from typing import Optional, Tuple, Sequence, Union
import jax.numpy as jnp
import jax.scipy.linalg as jla
from collections import namedtuple
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.backends.jax import JaxArray
import ivy
from ivy.functional.ivy.experimental.linear_algebra import _check_valid_dimension_size
from . import backend_version
def diagflat(
x: JaxArray,
/,
*,
offset: int = 0,
padding_value: float = 0,
align: str = "RIGHT_LEFT",
num_rows: int = -1,
num_cols: int = -1,
out: Optional[JaxArray] = None,
):
if len(x.shape) > 1:
x = jnp.ravel(x)
# Trying to avoid segfaults
x = jnp.copy(x)
if math.prod(x.shape) == 1 and offset == 0 and num_rows <= 1 and num_cols <= 1:
return x
# This is used as part of Tensorflow's shape calculation
# See their source code to see what they're doing with it
lower_diag_index = offset
upper_diag_index = lower_diag_index
x_shape = x.shape
x_rank = len(x_shape)
num_diags = upper_diag_index - lower_diag_index + 1
max_diag_len = x_shape[x_rank - 1]
min_num_rows = max_diag_len - min(upper_diag_index, 0)
min_num_cols = max_diag_len + max(lower_diag_index, 0)
if num_rows == -1 and num_cols == -1:
num_rows = max(min_num_rows, min_num_cols)
num_cols = num_rows
elif num_rows == -1:
num_rows = min_num_rows
elif num_cols == -1:
num_cols = min_num_cols
output_shape = list(x_shape)
if num_diags == 1:
output_shape[x_rank - 1] = num_rows
output_shape.append(num_cols)
else:
output_shape[x_rank - 2] = num_rows
output_shape[x_rank - 1] = num_cols
output_array = jnp.full(output_shape, padding_value)
diag_len = max(min(num_rows, num_cols) - abs(offset) + 1, 1)
if len(x) < diag_len:
x = jnp.array(list(x) + [padding_value] * max((diag_len - len(x), 0)))
temp = x - jnp.full(x.shape, padding_value)
diagonal_to_add = jnp.diag(temp, k=offset)
diagonal_to_add = diagonal_to_add[tuple(slice(0, n) for n in output_array.shape)]
output_array += jnp.pad(
diagonal_to_add,
[
(0, max([output_array.shape[0] - diagonal_to_add.shape[0], 0])),
(0, max([output_array.shape[1] - diagonal_to_add.shape[1], 0])),
],
mode="constant",
)
ret = output_array.astype(x.dtype)
if ivy.exists(out):
ivy.inplace_update(out, ret)
return ret
def kron(
a: JaxArray,
b: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.kron(a, b)
def matrix_exp(
x: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jla.expm(x)
def eig(
x: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> Tuple[JaxArray]:
return jnp.linalg.eig(x)
@with_supported_dtypes(
{"0.4.14 and below": ("complex", "float32", "float64")}, backend_version
)
def eigvals(x: JaxArray, /) -> JaxArray:
return jnp.linalg.eigvals(x)
def adjoint(
x: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
_check_valid_dimension_size(x)
axes = list(range(len(x.shape)))
axes[-1], axes[-2] = axes[-2], axes[-1]
return jnp.conjugate(jnp.transpose(x, axes=axes))
def solve_triangular(
x1: JaxArray,
x2: JaxArray,
/,
*,
upper: bool = True,
adjoint: bool = False,
unit_diagonal: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jla.solve_triangular(
x1,
x2,
lower=not upper,
trans="C" if adjoint else "N",
unit_diagonal=unit_diagonal,
)
def multi_dot(
x: Sequence[JaxArray],
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.linalg.multi_dot(x)
def cond(
x: JaxArray,
/,
*,
p: Optional[Union[int, str, None]] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.linalg.cond(x, p=p)
def lu_factor(
x: JaxArray,
/,
*,
pivot: Optional[bool] = True,
out: Optional[JaxArray] = None,
) -> Tuple[JaxArray, JaxArray]:
ret = jla.lu(x)
ret_tuple = namedtuple("lu_factor", ["LU", "p"])
ret_1 = ret[1]
return ret_tuple((ret_1 - jnp.eye(*ret_1.shape)) + ret[2], ret[0])
def lu_solve(
lu: Tuple[JaxArray, JaxArray],
p: JaxArray,
b: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jla.lu_solve((lu, p), b)
def dot(
a: JaxArray,
b: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.dot(a, b)
dot.support_native_out = True
| ivy/ivy/functional/backends/jax/experimental/linear_algebra.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/experimental/linear_algebra.py",
"repo_id": "ivy",
"token_count": 2247
} | 14 |
from typing import Optional, Union
import mxnet as mx
from ivy.utils.exceptions import IvyNotImplementedException
def logit(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
eps: Optional[float] = None,
out: Optional[None] = None,
) -> None:
raise IvyNotImplementedException()
def thresholded_relu(
x: None, /, *, threshold: Union[(int, float)] = 0, out: Optional[None] = None
) -> None:
raise IvyNotImplementedException()
def relu6(x: None, /, *, out: Optional[None] = None) -> None:
raise IvyNotImplementedException()
def logsigmoid(input: None) -> None:
raise IvyNotImplementedException()
def selu(x: None, /, *, out: Optional[None] = None) -> None:
raise IvyNotImplementedException()
def silu(x: None, /, *, out: Optional[None] = None) -> None:
raise IvyNotImplementedException()
def celu(
x: None, /, *, alpha: float = 0.2, complex_mode="jax", out: Optional[None] = None
) -> None:
return mx.nd.maximum(0, x) + alpha * mx.nd.expm1(mx.nd.minimum(0, x) / alpha)
| ivy/ivy/functional/backends/mxnet/experimental/activations.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/experimental/activations.py",
"repo_id": "ivy",
"token_count": 401
} | 15 |
from typing import Union, Optional, Tuple, Sequence
import mxnet as mx
from ivy.utils.exceptions import IvyNotImplementedException
def histogram(
a: None,
/,
*,
bins: Optional[Union[(int, None, str)]] = None,
axis: Optional[None] = None,
extend_lower_interval: Optional[bool] = False,
extend_upper_interval: Optional[bool] = False,
dtype: Optional[None] = None,
range: Optional[Tuple[float]] = None,
weights: Optional[None] = None,
density: Optional[bool] = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Tuple[None]:
raise IvyNotImplementedException()
def median(
input: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[(Tuple[int], int)]] = None,
keepdims: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def nanmean(
a: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[(int, Tuple[int])]] = None,
keepdims: bool = False,
dtype: Optional[None] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def quantile(
a: Union[(None, mx.ndarray.NDArray)],
q: Union[(None, float)],
/,
*,
axis: Optional[Union[(int, Sequence[int])]] = None,
interpolation: str = "linear",
keepdims: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def corrcoef(
x: None,
/,
*,
y: None,
rowvar: bool = True,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> None:
raise IvyNotImplementedException()
def nanmedian(
input: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[(Tuple[int], int)]] = None,
keepdims: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def bincount(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
weights: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
minlength: int = 0,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def cov(
x1: None,
x2: None = None,
/,
*,
rowVar: bool = True,
bias: bool = False,
ddof: Optional[int] = None,
fweights: Optional[None] = None,
aweights: Optional[None] = None,
dtype: Optional[type] = None,
) -> None:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/experimental/statistical.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/experimental/statistical.py",
"repo_id": "ivy",
"token_count": 1144
} | 16 |
"""Collection of Numpy activation functions, wrapped to fit Ivy syntax and
signature."""
# global
from typing import Optional, Union, Literal
import numpy as np
# local
import ivy
from ivy.functional.backends.numpy.helpers import _scalar_output_to_0d_array
from ivy.func_wrapper import with_supported_dtypes
from . import backend_version
@with_supported_dtypes(
{
"1.26.3 and below": (
"float",
"int",
"complex",
)
},
backend_version,
)
@_scalar_output_to_0d_array
def relu(
x: np.ndarray, /, *, complex_mode="jax", out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.maximum(x, 0, out=out, dtype=x.dtype)
relu.support_native_out = True
def leaky_relu(
x: np.ndarray,
/,
*,
alpha: float = 0.2,
complex_mode="jax",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.asarray(np.where(x > 0, x, np.multiply(x, alpha)), x.dtype)
@_scalar_output_to_0d_array
def gelu(
x: np.ndarray,
/,
*,
approximate: bool = False,
complex_mode="jax",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if approximate:
ret = 0.5 * x * (1 + np.tanh(0.7978845608 * (x + 0.044715 * x * x * x)))
else:
ret = 0.5 * x * (1 + ivy.erf(x / np.sqrt(2)))
return ivy.astype(ret, x.dtype, copy=False)
def sigmoid(
x: np.ndarray, /, *, complex_mode="jax", out: Optional[np.ndarray] = None
) -> np.ndarray:
if not ivy.is_array(x):
return np.asarray(1 / (1 + np.exp(-x)))
return np.asarray(1 / (1 + np.exp(-x))).astype(x.dtype)
def softmax(
x: np.ndarray, /, *, axis: Optional[int] = None, out: Optional[np.ndarray] = None
) -> np.ndarray:
axis = -1 if axis is None else axis
exp_x = np.exp(x - np.max(x, axis=axis, keepdims=True))
return np.divide(exp_x, np.sum(exp_x, axis=axis, keepdims=True), out=out)
softmax.support_native_out = True
@_scalar_output_to_0d_array
def softplus(
x: np.ndarray,
/,
*,
beta: Optional[Union[int, float]] = None,
threshold: Optional[Union[int, float]] = None,
complex_mode="jax",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if beta is not None and beta != 1:
x_beta = x * beta
res = (
np.add(
np.log1p(np.exp(-np.abs(x_beta))),
np.maximum(x_beta, 0, dtype=x.dtype),
out=out,
)
) / beta
else:
x_beta = x
res = np.add(
np.log1p(np.exp(-np.abs(x_beta))),
np.maximum(x_beta, 0, dtype=x.dtype),
out=out,
)
if threshold is not None:
return np.where(x_beta > threshold, x, res).astype(x.dtype)
return res.astype(x.dtype)
softplus.support_native_out = True
# Softsign
@_scalar_output_to_0d_array
def softsign(x: np.ndarray, /, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.divide(x, 1 + np.abs(x), out=out).astype(x.dtype)
softsign.support_native_out = True
@_scalar_output_to_0d_array
def log_softmax(
x: np.ndarray,
/,
*,
axis: Optional[int] = -1,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
x_max = np.max(x, axis=axis, keepdims=True)
sub_tmp = np.subtract(x, x_max)
ret = np.sum(np.exp(sub_tmp), axis=axis, keepdims=True)
ret = np.log(ret)
ret = np.subtract(sub_tmp, ret)
return ret
log_softmax.support_native_out = True
@_scalar_output_to_0d_array
def mish(
x: np.ndarray,
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return x * np.tanh(np.log1p(np.exp(x)))
mish.support_native_out = True
@_scalar_output_to_0d_array
def hardswish(
x: np.ndarray,
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
max_x_3 = np.maximum(x + 3, 0, dtype=x.dtype)
return (x * np.minimum(max_x_3, 6, out=out, dtype=x.dtype) / 6).astype(x.dtype)
hardswish.support_native_out = True
| ivy/ivy/functional/backends/numpy/activations.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/activations.py",
"repo_id": "ivy",
"token_count": 1985
} | 17 |
import math
from typing import Optional, Tuple, Sequence, Union, Any
import numpy as np
import ivy
from ivy.func_wrapper import with_supported_dtypes, with_unsupported_dtypes
from ivy.utils.exceptions import IvyNotImplementedException
from .. import backend_version
from ivy.functional.ivy.experimental.linear_algebra import _check_valid_dimension_size
def diagflat(
x: np.ndarray,
/,
*,
offset: int = 0,
padding_value: float = 0,
align: str = "RIGHT_LEFT",
num_rows: int = -1,
num_cols: int = -1,
out: Optional[np.ndarray] = None,
):
out_dtype = x.dtype if out is None else out.dtype
if len(x.shape) > 1:
x = np.ravel(x)
if math.prod(x.shape) == 1 and offset == 0 and num_rows <= 1 and num_cols <= 1:
return x
# This is used as part of Tensorflow's shape calculation
# See their source code to see what they're doing with it
lower_diag_index = offset
upper_diag_index = lower_diag_index
x_shape = x.shape
x_rank = len(x_shape)
num_diags = upper_diag_index - lower_diag_index + 1
max_diag_len = x_shape[x_rank - 1]
min_num_rows = max_diag_len - min(upper_diag_index, 0)
min_num_cols = max_diag_len + max(lower_diag_index, 0)
if num_rows == -1 and num_cols == -1:
num_rows = max(min_num_rows, min_num_cols)
num_cols = num_rows
elif num_rows == -1:
num_rows = min_num_rows
elif num_cols == -1:
num_cols = min_num_cols
output_shape = list(x_shape)
if num_diags == 1:
output_shape[x_rank - 1] = num_rows
output_shape.append(num_cols)
else:
output_shape[x_rank - 2] = num_rows
output_shape[x_rank - 1] = num_cols
output_array = np.full(output_shape, padding_value)
diag_len = max(min(num_rows, num_cols) - abs(offset) + 1, 1)
if len(x) < diag_len:
x = np.array(list(x) + [padding_value] * max((diag_len - len(x), 0)))
diagonal_to_add = np.diag(x - np.full_like(x, padding_value), k=offset)
diagonal_to_add = diagonal_to_add[tuple(slice(0, n) for n in output_array.shape)]
ret = diagonal_to_add.astype(out_dtype)
if ivy.exists(out):
ivy.inplace_update(out, ret)
return ret
def kron(
a: np.ndarray,
b: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.kron(a, b)
kron.support_native_out = False
@with_supported_dtypes(
{"1.26.3 and below": ("float32", "float64", "complex64", "complex128")},
backend_version,
)
def matrix_exp(
x: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
eig_vals, eig_vecs = np.linalg.eig(x)
exp_diag = np.exp(eig_vals)
exp_diag_mat = np.diag(exp_diag)
exp_mat = eig_vecs @ exp_diag_mat @ np.linalg.inv(eig_vecs)
return exp_mat.astype(x.dtype)
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def eig(
x: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, np.ndarray]:
e, v = np.linalg.eig(x)
return e.astype(complex), v.astype(complex)
eig.support_native_out = False
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def eigvals(x: np.ndarray, /) -> np.ndarray:
e = np.linalg.eigvals(x)
return e.astype(complex)
eigvals.support_native_out = False
def adjoint(
x: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
_check_valid_dimension_size(x)
axes = list(range(len(x.shape)))
axes[-1], axes[-2] = axes[-2], axes[-1]
return np.conjugate(np.transpose(x, axes=axes))
_adjoint = adjoint
def solve_triangular(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
upper: bool = True,
adjoint: bool = False,
unit_diagonal: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
# NumPy does not expose an API for `trsm`, so we have to implement substitution
# in Python. There is no need to support gradients for this backend.
# Pre: `x1` is square, `x1` and `x2` have the same number `n` of rows.
n = x1.shape[-2]
ret = x2.copy()
if adjoint:
x1 = _adjoint(x1)
upper = not upper
if unit_diagonal:
for i in range(n):
x1[..., i, i] = 1
if upper:
for i in reversed(range(n)):
ret[..., i, :] /= x1[..., i, np.newaxis, i]
ret[..., :i, :] -= x1[..., :i, np.newaxis, i] * ret[..., np.newaxis, i, :]
else:
for i in range(n):
ret[..., i, :] /= x1[..., i, np.newaxis, i]
ret[..., i + 1 :, :] -= (
x1[..., i + 1 :, np.newaxis, i] * ret[..., np.newaxis, i, :]
)
return ret
def multi_dot(
x: Sequence[np.ndarray],
/,
*,
out: Optional[np.array] = None,
) -> np.ndarray:
return np.linalg.multi_dot(x, out=out)
multi_dot.support_native_out = True
def cond(
x: np.ndarray,
/,
*,
p: Optional[Union[None, int, str]] = None,
out: Optional[np.ndarray] = None,
) -> Any:
return np.linalg.cond(x, p=p)
cond.support_native_out = False
def lu_factor(
x: np.ndarray,
/,
*,
pivot: Optional[bool] = True,
out: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray]:
raise IvyNotImplementedException()
def lu_solve(
lu: Tuple[np.ndarray],
p: np.ndarray,
b: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
raise IvyNotImplementedException()
def dot(
a: np.ndarray,
b: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.dot(a, b, out=out)
dot.support_native_out = True
| ivy/ivy/functional/backends/numpy/experimental/linear_algebra.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/experimental/linear_algebra.py",
"repo_id": "ivy",
"token_count": 2662
} | 18 |
# global
import math
from numbers import Number
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
def _flat_array_to_1_dim_array(x):
return x.reshape((1,)) if x.shape == () else x
# Array API Standard #
# -------------------#
def concat(
xs: Union[Tuple[np.ndarray, ...], List[np.ndarray]],
/,
*,
axis: int = 0,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
is_tuple = type(xs) is tuple
if axis is None:
if is_tuple:
xs = list(xs)
for i in range(len(xs)):
if xs[i].shape == ():
xs[i] = np.ravel(xs[i])
if is_tuple:
xs = tuple(xs)
ret = np.concatenate(xs, axis, out=out)
highest_dtype = xs[0].dtype
for i in xs:
highest_dtype = ivy.as_native_dtype(ivy.promote_types(highest_dtype, i.dtype))
return ivy.astype(ret, highest_dtype, copy=False)
concat.support_native_out = True
def expand_dims(
x: np.ndarray,
/,
*,
copy: Optional[bool] = None,
axis: Union[int, Sequence[int]] = 0,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.expand_dims(x, axis)
def flip(
x: np.ndarray,
/,
*,
copy: Optional[bool] = None,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if copy:
x = x.copy()
num_dims = len(x.shape)
if not num_dims:
return x
if axis is None:
axis = list(range(num_dims))
if isinstance(axis, int):
axis = [axis]
axis = [item + num_dims if item < 0 else item for item in axis]
return np.flip(x, axis)
def permute_dims(
x: np.ndarray,
/,
axes: Tuple[int, ...],
*,
copy: Optional[bool] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.transpose(x, axes)
def reshape(
x: np.ndarray,
/,
shape: Union[ivy.NativeShape, Sequence[int]],
*,
copy: Optional[bool] = None,
order: str = "C",
allowzero: bool = True,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
ivy.utils.assertions.check_elem_in_list(order, ["C", "F"])
if not allowzero:
shape = [
new_s if con else old_s
for new_s, con, old_s in zip(shape, np.array(shape) != 0, x.shape)
]
return np.reshape(x, shape, order=order)
def roll(
x: np.ndarray,
/,
shift: Union[int, Sequence[int]],
*,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.roll(x, shift, axis)
def squeeze(
x: np.ndarray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
copy: Optional[bool] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if isinstance(axis, list):
axis = tuple(axis)
if x.shape == ():
if axis is None or axis == 0 or axis == -1:
return x
raise ivy.utils.exceptions.IvyException(
f"tried to squeeze a zero-dimensional input by axis {axis}"
)
return np.squeeze(x, axis=axis)
def stack(
arrays: Union[Tuple[np.ndarray], List[np.ndarray]],
/,
*,
axis: int = 0,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.stack(arrays, axis, out=out)
stack.support_native_out = True
# Extra #
# ------#
def split(
x: np.ndarray,
/,
*,
copy: Optional[bool] = None,
num_or_size_splits: Optional[Union[int, Sequence[int], np.ndarray]] = None,
axis: int = 0,
with_remainder: bool = False,
) -> List[np.ndarray]:
if x.shape == ():
if num_or_size_splits is not None and num_or_size_splits != 1:
raise ivy.utils.exceptions.IvyException(
"input array had no shape, but num_sections specified was"
f" {num_or_size_splits}"
)
return [x]
if num_or_size_splits is None:
num_or_size_splits = x.shape[axis]
elif isinstance(num_or_size_splits, int) and with_remainder:
num_chunks = x.shape[axis] / num_or_size_splits
num_chunks_int = math.floor(num_chunks)
remainder = num_chunks - num_chunks_int
if remainder != 0:
num_or_size_splits = [num_or_size_splits] * num_chunks_int + [
int(remainder * num_or_size_splits)
]
elif isinstance(num_or_size_splits, np.ndarray):
num_or_size_splits = num_or_size_splits.tolist()
if isinstance(num_or_size_splits, (list, tuple)):
num_or_size_splits = np.cumsum(num_or_size_splits[:-1])
if copy:
newarr = x.copy()
return np.split(newarr, num_or_size_splits, axis)
return np.split(x, num_or_size_splits, axis)
@with_unsupported_dtypes({"1.26.3 and below": ("uint64",)}, backend_version)
def repeat(
x: np.ndarray,
/,
repeats: Union[int, List[int]],
*,
axis: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.repeat(x, repeats, axis)
def tile(
x: np.ndarray, /, repeats: Sequence[int], *, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.tile(x, repeats)
def constant_pad(
x: np.ndarray,
/,
pad_width: List[List[int]],
*,
value: Number = 0.0,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.pad(_flat_array_to_1_dim_array(x), pad_width, constant_values=value)
def zero_pad(
x: np.ndarray, /, pad_width: List[List[int]], *, out: Optional[np.ndarray] = None
):
return np.pad(_flat_array_to_1_dim_array(x), pad_width)
def swapaxes(
x: np.ndarray,
axis0: int,
axis1: int,
/,
*,
copy: Optional[bool] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if copy:
x = x.copy()
return np.swapaxes(x, axis0, axis1)
def unstack(
x: np.ndarray,
/,
*,
copy: Optional[bool] = None,
axis: int = 0,
keepdims: bool = False,
) -> List[np.ndarray]:
if x.shape == ():
return [x]
x_split = None
if copy:
newarr = x.copy()
x_split = np.split(newarr, newarr.shape[axis], axis)
else:
x_split = np.split(x, x.shape[axis], axis)
if keepdims:
return x_split
return [np.squeeze(item, axis) for item in x_split]
def clip(
x: np.ndarray,
/,
x_min: Optional[Union[Number, np.ndarray]] = None,
x_max: Optional[Union[Number, np.ndarray]] = None,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
promoted_type = x.dtype
if x_min is not None:
if not hasattr(x_min, "dtype"):
x_min = ivy.array(x_min).data
promoted_type = ivy.as_native_dtype(ivy.promote_types(x.dtype, x_min.dtype))
if x_max is not None:
if not hasattr(x_max, "dtype"):
x_max = ivy.array(x_max).data
promoted_type = ivy.as_native_dtype(
ivy.promote_types(promoted_type, x_max.dtype)
)
return np.clip(x.astype(promoted_type), x_min, x_max, out=out)
clip.support_native_out = True
def as_strided(
x: np.ndarray,
shape: Union[ivy.NativeShape, Sequence[int]],
strides: Sequence[int],
/,
) -> np.ndarray:
return np.lib.stride_tricks.as_strided(
x,
shape=shape,
strides=strides,
)
| ivy/ivy/functional/backends/numpy/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/manipulation.py",
"repo_id": "ivy",
"token_count": 3499
} | 19 |
# global
import paddle as paddle
backend_version = {"version": paddle.version.full_version}
from .activations import *
from .converters import *
from .creation import *
from .data_type import *
from .device import *
from .elementwise import *
from .general import *
from .gradients import *
from .layers import *
from .losses import *
from .linear_algebra import *
from .manipulation import *
from .norms import *
from .random import *
from .searching import *
from .set import *
from .sorting import *
from .sparse_array import *
from .statistical import *
from .utility import *
| ivy/ivy/functional/backends/paddle/experimental/__init__.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/experimental/__init__.py",
"repo_id": "ivy",
"token_count": 169
} | 20 |
# global
torch_scatter = None
from typing import Union, Optional, Sequence
import paddle
import ivy
from ivy.func_wrapper import (
with_supported_dtypes,
with_supported_device_and_dtypes,
)
import ivy.functional.backends.paddle as paddle_backend
from ivy.utils.einsum_parser import legalise_einsum_expr
# local
from . import backend_version
# Array API Standard #
# -------------------#
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64", "int32", "int64")},
backend_version,
)
def min(
x: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[paddle.Tensor] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
ret_dtype = x.dtype
if paddle.is_complex(x):
real = paddle.amin(x.real(), axis=axis, keepdim=keepdims)
imag = paddle.amin(x.imag(), axis=axis, keepdim=keepdims)
ret = paddle.complex(real, imag)
else:
if where is not None:
max_val = (
ivy.iinfo(x.dtype).max
if ivy.is_int_dtype(x.dtype)
else ivy.finfo(x.dtype).max
)
max_val = max_val / 10
# max_val becomes negative after multiplying with paddle.ones_like(x)
# therefore reduced it
val = paddle.ones_like(x) * max_val
val = val.astype(ret_dtype)
x = paddle.where(where, x, val)
ret = paddle.amin(x, axis=axis, keepdim=keepdims)
# The following code is to simulate other frameworks
# output shapes behaviour since min output dim is 1 in paddle
if isinstance(axis, Sequence):
if len(axis) == x.ndim:
axis = None
if (x.ndim == 1 or axis is None) and not keepdims:
ret = ret.squeeze()
if initial is not None:
initial = paddle.to_tensor(initial, dtype=ret_dtype)
ret = paddle.minimum(ret, initial)
return ret.astype(ret_dtype)
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64", "int32", "int64")},
backend_version,
)
def max(
x: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
ret_dtype = x.dtype
if paddle.is_complex(x):
const = paddle.to_tensor(1j, dtype=x.dtype)
real_max = paddle.max(x.real(), axis=axis, keepdim=keepdims)
imag = paddle.where(
x.real() == real_max, x.imag(), paddle.full_like(x.imag(), -1e10)
)
# we consider the number with the biggest real and imag part
img_max = paddle.max(imag, axis=axis, keepdim=keepdims)
img_max = paddle.cast(img_max, x.dtype)
return paddle.add(
paddle.cast(real_max, x.dtype), paddle.multiply(img_max, const)
)
else:
ret = paddle.amax(x, axis=axis, keepdim=keepdims)
# The following code is to simulate other frameworks
# output shapes behaviour since min output dim is 1 in paddle
if isinstance(axis, Sequence):
if len(axis) == x.ndim:
axis = None
if (x.ndim == 1 or axis is None) and not keepdims:
ret = ret.squeeze()
return ret.astype(ret_dtype)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "complex", "float32", "float64")}, backend_version
)
def mean(
x: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
ret_dtype = x.dtype
if paddle.is_complex(x):
ret = paddle.complex(
paddle.mean(x.real(), axis=axis, keepdim=keepdims),
paddle.mean(x.imag(), axis=axis, keepdim=keepdims),
)
else:
ret = paddle.mean(x, axis=axis, keepdim=keepdims)
# The following code is to simulate other frameworks
# output shapes behaviour since min output dim is 1 in paddle
if isinstance(axis, Sequence):
if len(axis) == x.ndim:
axis = None
if (x.ndim == 1 or axis is None) and not keepdims:
ret = ret.squeeze()
return ret.astype(ret_dtype)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, backend_version
)
def prod(
x: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
dtype: Optional[paddle.dtype] = None,
keepdims: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
ret = paddle.prod(x, axis=axis, keepdim=keepdims, dtype=dtype)
if ret.dtype != dtype:
ret = ret.cast(dtype)
return ret
def _std(x, axis, correction, keepdim):
u = paddle_backend.mean(x, axis=axis, keepdims=True)
out = paddle_backend.sum(
paddle_backend.pow(paddle_backend.subtract(x, u), 2),
axis=axis,
keepdims=keepdim,
)
num_elm_in = paddle.prod(paddle.to_tensor(x.shape)).item()
num_elm_out = paddle.prod(paddle.to_tensor(out.shape)).item()
n = num_elm_out / num_elm_in
out = paddle_backend.sqrt(paddle_backend.multiply(out, n))
if correction:
n = paddle_backend.sqrt(
paddle_backend.divide(num_elm_in, (num_elm_in - correction * num_elm_out))
)
out = paddle_backend.multiply(out, n)
return out
def std(
x: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
correction: Union[int, float] = 0,
keepdims: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return _std(x, axis, correction, keepdims).cast(x.dtype)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "float16", "float32", "float64", "int32", "int64")},
backend_version,
)
def sum(
x: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
dtype: Optional[paddle.dtype] = None,
keepdims: Optional[bool] = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
dtype = x.dtype if dtype is None else dtype
dtype = ivy.as_ivy_dtype(dtype)
ret = paddle.sum(x, axis=axis, dtype=dtype, keepdim=keepdims)
# The following code is to simulate other frameworks
# output shapes behaviour since min output dim is 1 in paddle
if isinstance(axis, Sequence):
if len(axis) == x.ndim:
axis = None
if (x.ndim == 1 or axis is None) and not keepdims:
ret = paddle_backend.squeeze(ret, axis=-1)
return ret
def var(
x: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
correction: Union[int, float] = 0,
keepdims: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
ret = paddle_backend.pow(_std(x, axis, correction, keepdims), 2).cast(x.dtype)
return ret
# Extra #
# ----- #
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64", "int32", "int64")},
backend_version,
)
def cumprod(
x: paddle.Tensor,
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[paddle.dtype] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
dtype = dtype if dtype is not None else x.dtype
x = paddle.cast(x, dtype)
if not (exclusive or reverse):
return paddle.cumprod(x, dim=axis).cast(dtype)
elif exclusive and reverse:
x = paddle.cumprod(paddle_backend.flip(x, axis=(axis,)), dim=axis)
x = paddle_backend.swapaxes(x, axis, -1)
x = paddle_backend.concat(
[
paddle.ones_like(
paddle_backend.get_item(x, (..., slice(-1, None, None)))
),
paddle_backend.get_item(x, (..., slice(None, -1, None))),
],
axis=-1,
)
x = paddle_backend.swapaxes(x, axis, -1)
return paddle_backend.flip(x, axis=(axis,)).cast(dtype)
elif exclusive:
x = paddle_backend.swapaxes(x, axis, -1)
x = paddle_backend.concat(
[
paddle.ones_like(
paddle_backend.get_item(x, (..., slice(-1, None, None)))
),
paddle_backend.get_item(x, (..., slice(None, -1, None))),
],
axis=-1,
)
x = paddle.cumprod(x, -1)
return paddle_backend.swapaxes(x, axis, -1).cast(dtype)
else:
x = paddle.cumprod(paddle_backend.flip(x, axis=(axis,)), dim=axis)
return paddle_backend.flip(x, axis=axis).cast(dtype)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, backend_version
)
def cumsum(
x: paddle.Tensor,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
*,
dtype: Optional[paddle.dtype] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
dtype = dtype if dtype is not None else x.dtype
x = paddle.cast(x, dtype)
if not (exclusive or reverse):
return paddle.cumsum(x, axis=axis).cast(dtype)
elif exclusive and reverse:
x = paddle.cumsum(paddle_backend.flip(x, axis=(axis,)), axis=axis)
x = paddle_backend.swapaxes(x, axis, -1)
x = paddle_backend.concat(
[
paddle.zeros_like(
paddle_backend.get_item(x, (..., slice(-1, None, None)))
),
paddle_backend.get_item(x, (..., slice(None, -1, None))),
],
axis=-1,
)
x = paddle_backend.swapaxes(x, axis, -1)
return paddle_backend.flip(x, axis=(axis,)).cast(dtype)
elif exclusive:
x = paddle_backend.swapaxes(x, axis, -1)
x = paddle_backend.concat(
[
paddle.zeros_like(
paddle_backend.get_item(x, (..., slice(-1, None, None)))
),
paddle_backend.get_item(x, (..., slice(None, -1, None))),
],
axis=-1,
)
x = paddle.cumsum(x, -1)
return paddle_backend.swapaxes(x, axis, -1).cast(dtype)
else:
x = paddle.cumsum(paddle_backend.flip(x, axis=(axis,)), axis=axis)
return paddle_backend.flip(x, axis=axis).cast(dtype)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("float32", "float64", "complex64", "complex128"),
"gpu": (
"bfloat16",
"float16",
"float32",
"float64",
"complex64",
"complex128",
),
},
"2.4.2 and below": {
"cpu": ("float32", "float64", "complex64", "complex128"),
"gpu": ("float16", "float32", "float64", "complex64", "complex128"),
},
},
backend_version,
)
def einsum(
equation: str,
*operands: paddle.Tensor,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
equation = legalise_einsum_expr(*[equation, *operands])
dtype_list = set(map(lambda x: x.dtype, operands))
dtype = dtype_list.pop()
if len(dtype_list) > 0:
for d in dtype_list:
dtype = ivy.promote_types(dtype, d)
operands = list(
map(lambda x: x.cast(dtype) if x.dtype != dtype else x, operands)
)
return paddle.einsum(equation, *operands)
| ivy/ivy/functional/backends/paddle/statistical.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/statistical.py",
"repo_id": "ivy",
"token_count": 5412
} | 21 |
"""Collection of TensorFlow network layers, wrapped to fit Ivy syntax and
signature."""
# global
from typing import Optional, Tuple, Union, Sequence
import tensorflow as tf
from tensorflow.python.types.core import Tensor
# local
import ivy
from ivy.func_wrapper import with_supported_dtypes, with_unsupported_dtypes
from . import backend_version
from ivy.functional.ivy.layers import (
_deconv_length,
_get_x_data_format,
)
@with_supported_dtypes(
{"2.15.0 and below": ("float", "int32", "int64", "complex")},
backend_version,
)
def linear(
x: Union[tf.Tensor, tf.Variable],
weight: Union[tf.Tensor, tf.Variable],
/,
*,
bias: Optional[Union[tf.Tensor, tf.Variable]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
# TODO: try to generalize this for >=2 dimensions
result = (
tf.matmul(x, weight, transpose_b=True)
if len(x.shape) == len(weight.shape) == 2 and x.shape[-1] == weight.shape[-1]
else tf.einsum("...i,...ji->...j", x, weight)
)
if bias is not None:
return tf.add(result, bias)
return result
def _x_dil_before_conv(x, dims, x_dilations, data_format):
# adding dilation in input
x_dilations = [x_dilations] * dims if isinstance(x_dilations, int) else x_dilations
x_dilations_idxs = [i for i, x_dil in enumerate(x_dilations) if x_dil > 1]
if x_dilations_idxs:
if data_format[-1] == "C":
offset = 1
else:
offset = 2
for i in x_dilations_idxs:
h = x.shape[offset + i]
new_height = h + (h - 1) * (x_dilations[i] - 1)
h = tf.eye(new_height, dtype=x.dtype)[:: x_dilations[i]]
x = tf.experimental.numpy.swapaxes(x, offset + i, -1)
x = tf.matmul(x, h)
x = tf.experimental.numpy.swapaxes(x, -1, offset + i)
return x
def _pad_before_conv(x, padding, dims, data_format):
if isinstance(padding, str):
return x, padding
elif isinstance(padding, int):
pad_list = [(padding, padding)] * dims
else:
pad_list = padding
if data_format[-1] == "C":
pad_list = [(0, 0), *pad_list, (0, 0)]
else:
pad_list = [(0, 0), (0, 0), *pad_list]
return tf.pad(x, pad_list, "CONSTANT"), "VALID"
def _extend_2d_padding(padding, data_format):
if isinstance(padding, str):
return padding
if isinstance(padding, int):
padding = [(padding, padding)] * 2
if data_format[-1] == "C":
padding = [(0, 0)] + padding + [(0, 0)]
else:
padding = [(0, 0), (0, 0)] + padding
return padding
def _transpose_out_pad(
x_shape, filter_shape, strides, padding, dims, dilations, data_format
):
if data_format[-1] == "C":
offset = 1
else:
offset = 2
dilations = [dilations] * dims if isinstance(dilations, int) else dilations
strides = [strides] * dims if isinstance(strides, int) else strides
if isinstance(padding, str):
out_shape = [
_deconv_length(
x_shape[offset + i], strides[i], filter_shape[i], padding, dilations[i]
)
for i in range(dims)
]
else:
if isinstance(padding, int):
padding = [[padding, padding]] * dims
out_shape = [
(x_shape[offset + i] - 1) * strides[i]
- padding[i][0]
- padding[i][1]
+ dilations[i] * (filter_shape[i] - 1)
+ 1
for i in range(dims)
]
if data_format[-1] == "C":
padding = [[0, 0], *padding, [0, 0]]
else:
padding = [[0, 0], [0, 0], *padding]
if data_format[-1] == "C":
out_shape = [x_shape[0], *out_shape, filter_shape[-2]]
else:
out_shape = [x_shape[0], filter_shape[-2], *out_shape]
return out_shape, padding
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "complex")}, backend_version)
def conv1d(
x: Union[tf.Tensor, tf.Variable],
filters: Union[tf.Tensor, tf.Variable],
strides: Union[int, Tuple[int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int]] = 1,
dilations: Union[int, Tuple[int]] = 1,
bias: Optional[Union[tf.Tensor, tf.Variable]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
permuted_x = False
if data_format == "NCW" and ivy.dev(x) == "cpu":
x = tf.transpose(x, (0, 2, 1))
data_format = "NWC"
permuted_x = True
if filter_format == "channel_first":
filters = tf.transpose(filters, (2, 1, 0))
x = _x_dil_before_conv(x, 1, x_dilations, data_format)
x, padding = _pad_before_conv(x, padding, 1, data_format)
res = tf.nn.conv1d(x, filters, strides, padding, data_format, dilations)
if bias is not None:
if data_format[1] == "C":
bias = tf.reshape(bias, [1, -1, 1])
res = tf.math.add(res, bias)
if permuted_x:
res = tf.transpose(res, (0, 2, 1))
return res
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "complex")}, backend_version)
def conv1d_transpose(
x: Union[tf.Tensor, tf.Variable],
filters: Union[tf.Tensor, tf.Variable],
strides: Union[int, Tuple[int]],
padding: str,
/,
*,
output_shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
filter_format: str = "channel_last",
data_format: str = "NWC",
dilations: Union[int, Tuple[int]] = 1,
bias: Optional[Union[tf.Tensor, tf.Variable]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
):
if ivy.dev(x) == "cpu" and (
(dilations > 1) if isinstance(dilations, int) else any(d > 1 for d in dilations)
):
raise ivy.utils.exceptions.IvyException(
"Tensorflow does not support dilations greater than 1 when device is cpu"
)
permuted_x = False
if data_format == "NCW" and ivy.dev(x) == "cpu":
x = tf.transpose(x, (0, 2, 1))
data_format = "NWC"
permuted_x = True
if filter_format == "channel_first":
filters = tf.transpose(filters, (2, 1, 0))
output_shape, padding = _transpose_out_pad(
x.shape, filters.shape, strides, padding, 1, dilations, data_format
)
res = tf.nn.conv1d_transpose(
x, filters, output_shape, strides, padding, data_format, dilations
)
if bias is not None:
if data_format[1] == "C":
bias = tf.reshape(bias, [1, -1, 1])
res = tf.math.add(res, bias)
if permuted_x:
res = tf.transpose(res, (0, 2, 1))
return res
def _extend_3d_strides_dilations(strides, dilations, data_format):
if data_format[-1] == "C":
strides = [1, *([strides] * 3 if isinstance(strides, int) else strides), 1]
dilations = [
1,
*([dilations] * 3 if isinstance(dilations, int) else dilations),
1,
]
else:
strides = [1, 1, *([strides] * 3 if isinstance(strides, int) else strides)]
dilations = [
1,
1,
*([dilations] * 3 if isinstance(dilations, int) else dilations),
]
return strides, dilations
@with_supported_dtypes({"2.14.0 and below": ("float", "int32")}, backend_version)
def conv2d(
x: Union[tf.Tensor, tf.Variable],
filters: Union[tf.Tensor, tf.Variable],
strides: Union[int, Tuple[int, int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
dilations: Union[int, Tuple[int, int]] = 1,
bias: Optional[Union[tf.Tensor, tf.Variable]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
permuted_x = False
if data_format == "NCHW" and ivy.dev(x) == "cpu":
x = tf.transpose(x, (0, 2, 3, 1))
data_format = "NHWC"
permuted_x = True
if filter_format == "channel_first":
filters = tf.transpose(filters, (2, 3, 1, 0))
x = _x_dil_before_conv(x, 2, x_dilations, data_format)
padding = _extend_2d_padding(padding, data_format)
res = tf.nn.conv2d(x, filters, strides, padding, data_format, dilations)
if bias is not None:
if data_format[1] == "C":
bias = tf.reshape(bias, [1, -1, 1, 1])
res = tf.math.add(res, bias)
if permuted_x:
return tf.transpose(res, (0, 3, 1, 2))
return res
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "complex")}, backend_version)
def conv2d_transpose(
x: Union[tf.Tensor, tf.Variable],
filters: Union[tf.Tensor, tf.Variable],
strides: Union[int, Tuple[int, int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
output_shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
filter_format: str = "channel_last",
data_format: str = "NHWC",
dilations: Union[int, Tuple[int, int]] = 1,
bias: Optional[Union[tf.Tensor, tf.Variable]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
):
if ivy.dev(x) == "cpu" and (
(dilations > 1) if isinstance(dilations, int) else any(d > 1 for d in dilations)
):
raise ivy.utils.exceptions.IvyException(
"Tensorflow does not support dilations greater than 1 when device is cpu"
)
permuted_x = False
if data_format == "NCHW" and ivy.dev(x) == "cpu":
x = tf.transpose(x, (0, 2, 3, 1))
data_format = "NHWC"
permuted_x = True
if filter_format == "channel_first":
filters = tf.transpose(filters, (2, 3, 1, 0))
output_shape, padding = _transpose_out_pad(
x.shape,
filters.shape,
strides,
padding,
2,
dilations,
data_format,
)
res = tf.nn.conv2d_transpose(
x, filters, output_shape, strides, padding, data_format, dilations
)
if bias is not None:
if data_format[1] == "C":
bias = tf.reshape(bias, [1, -1, 1, 1])
res = tf.math.add(res, bias)
if permuted_x:
return tf.transpose(res, (0, 3, 1, 2))
return res
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "complex")}, backend_version)
def depthwise_conv2d(
x: Union[tf.Tensor, tf.Variable],
filters: Union[tf.Tensor, tf.Variable],
strides: Union[int, Tuple[int, int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
dilations: Union[int, Tuple[int, int]] = 1,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
strides = [strides] * 2 if isinstance(strides, int) else strides
dilations = [dilations] * 2 if isinstance(dilations, int) else dilations
permuted_x = False
if data_format == "NCHW" and ivy.dev(x) == "cpu":
x = tf.transpose(x, (0, 2, 3, 1))
data_format = "NHWC"
permuted_x = True
if tf.rank(filters) == 3:
filters = tf.expand_dims(filters, -1)
padding = _extend_2d_padding(padding, data_format)
strides = [1, strides[0], strides[1], 1]
res = tf.nn.depthwise_conv2d(x, filters, strides, padding, data_format, dilations)
if permuted_x:
res = tf.transpose(res, (0, 3, 1, 2))
return res
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "complex")}, backend_version)
def conv3d(
x: Union[tf.Tensor, tf.Variable],
filters: Union[tf.Tensor, tf.Variable],
strides: Union[int, Tuple[int, int, int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int, int, int]] = 1,
dilations: Union[int, Tuple[int, int, int]] = 1,
bias: Optional[Union[tf.Tensor, tf.Variable]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
):
permuted_x = False
if data_format == "NCDHW" and ivy.dev(x) == "cpu":
x = tf.transpose(x, (0, 2, 3, 4, 1))
data_format = "NDHWC"
permuted_x = True
if filter_format == "channel_first":
filters = tf.transpose(filters, (2, 3, 4, 1, 0))
x = _x_dil_before_conv(x, 3, x_dilations, data_format)
x, padding = _pad_before_conv(x, padding, 3, data_format)
strides, dilations = _extend_3d_strides_dilations(strides, dilations, data_format)
res = tf.nn.conv3d(x, filters, strides, padding, data_format, dilations)
if bias is not None:
if data_format[1] == "C":
bias = tf.reshape(bias, [1, -1, 1, 1, 1])
res = tf.math.add(res, bias)
if permuted_x:
return tf.transpose(res, (0, 4, 1, 2, 3))
return res
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "complex")}, backend_version)
def conv3d_transpose(
x: Tensor,
filters: Tensor,
strides: Union[int, Tuple[int, int, int]],
padding: str,
/,
*,
output_shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
filter_format: str = "channel_last",
data_format: str = "NDHWC",
dilations: Union[int, Tuple[int, int, int]] = 1,
bias: Optional[Union[tf.Tensor, tf.Variable]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Tensor:
if ivy.dev(x) == "cpu" and (
(dilations > 1) if isinstance(dilations, int) else any(d > 1 for d in dilations)
):
raise ivy.utils.exceptions.IvyException(
"Tensorflow does not support dilations greater than 1 when device is cpu"
)
permuted_x = False
if data_format == "NCDHW" and ivy.dev(x) == "cpu":
x = tf.transpose(x, (0, 2, 3, 4, 1))
data_format = "NDHWC"
permuted_x = True
if filter_format == "channel_first":
filters = tf.transpose(filters, (2, 3, 4, 1, 0))
output_shape, padding = _transpose_out_pad(
x.shape, filters.shape, strides, padding, 3, dilations, data_format
)
strides, dilations = _extend_3d_strides_dilations(strides, dilations, data_format)
res = tf.nn.conv3d_transpose(
x, filters, output_shape, strides, padding, data_format, dilations
)
if bias is not None:
if data_format[1] == "C":
bias = tf.reshape(bias, [1, -1, 1, 1, 1])
res = tf.math.add(res, bias)
if permuted_x:
return tf.transpose(res, (0, 4, 1, 2, 3))
return res
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "complex")}, backend_version)
def conv_general_dilated(
x: Union[tf.Tensor, tf.Variable],
filters: Union[tf.Tensor, tf.Variable],
strides: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
dims: int = 2,
data_format: str = "channel_last",
filter_format: str = "channel_last",
feature_group_count: int = 1,
x_dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
bias: Optional[Union[tf.Tensor, tf.Variable]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if filter_format == "channel_first":
filters = tf.transpose(filters, (*range(2, dims + 2), 1, 0))
num_channels = x.shape[1] if data_format == "channel_first" else x.shape[-1]
if filters.shape[-2] != (num_channels // feature_group_count):
raise ivy.utils.exceptions.IvyError(
f"given feature_group_count {feature_group_count} expected input channel of"
f" the filter to be {num_channels // feature_group_count} but got"
f" {filters.shape[-2]}"
)
if num_channels % feature_group_count != 0:
raise ivy.utils.exceptions.IvyError(
"input channel should be divisible by feature group count"
f" {feature_group_count} but got input channel {num_channels}"
)
permuted_x = False
if data_format == "channel_first" and (
ivy.dev(x) == "cpu" or feature_group_count != 1
):
x = tf.transpose(x, (0, *range(2, dims + 2), 1))
data_format = "channel_last"
permuted_x = True
data_format = _get_x_data_format(dims, data_format)
x = _x_dil_before_conv(x, dims, x_dilations, data_format)
if dims == 2:
padding = _extend_2d_padding(padding, data_format)
if feature_group_count == 1:
res = tf.nn.conv2d(
x,
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
)
else:
with ivy.ArrayMode(False):
if not isinstance(padding, str):
padding = padding[1:-1]
res = depthwise_conv2d(
x,
tf.transpose(filters, (0, 1, 3, 2)),
strides,
padding,
data_format=data_format,
dilations=dilations,
)
else:
x, padding = _pad_before_conv(x, padding, dims, data_format)
if dims == 1:
if feature_group_count == 1:
res = tf.nn.conv1d(
x,
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
)
else:
res = tf.concat(
[
tf.nn.conv1d(
x[..., i : i + filters.shape[-2]],
filters[
..., j : j + filters.shape[-1] // feature_group_count
],
strides,
padding,
data_format,
dilations,
)
for i, j in zip(
range(0, x.shape[-1], filters.shape[-2]),
range(
0,
filters.shape[-1],
filters.shape[-1] // feature_group_count,
),
)
],
axis=-1,
)
else:
strides, dilations = _extend_3d_strides_dilations(
strides, dilations, data_format
)
if feature_group_count == 1:
res = tf.nn.conv3d(
x,
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
)
else:
res = tf.concat(
[
tf.nn.conv3d(
x[..., i : i + filters.shape[-2]],
filters[
..., j : j + filters.shape[-1] // feature_group_count
],
strides,
padding,
data_format,
dilations,
)
for i, j in zip(
range(0, x.shape[-1], filters.shape[-2]),
range(
0,
filters.shape[-1],
filters.shape[-1] // feature_group_count,
),
)
],
axis=-1,
)
if bias is not None:
if data_format[1] == "C":
bias = tf.reshape(bias, [1, -1, *([1] * dims)])
res = tf.math.add(res, bias)
if permuted_x:
return tf.transpose(res, (0, dims + 1, *range(1, dims + 1)))
return res
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "complex")}, backend_version)
def conv_general_transpose(
x: Union[tf.Tensor, tf.Variable],
filters: Union[tf.Tensor, tf.Variable],
strides: Union[int, Tuple[int, int]],
padding: str,
/,
*,
dims: int = 2,
filter_format: str = "channel_last",
data_format: str = "channel_last",
output_shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
feature_group_count: int = 1,
bias: Optional[Union[tf.Tensor, tf.Variable]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if feature_group_count == 1:
if dims == 1:
return conv1d_transpose(
x,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format="NWC" if data_format == "channel_last" else "NCW",
dilations=dilations,
bias=bias,
)
elif dims == 2:
return conv2d_transpose(
x,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format="NHWC" if data_format == "channel_last" else "NCHW",
dilations=dilations,
bias=bias,
)
else:
return conv3d_transpose(
x,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format="NDHWC" if data_format == "channel_last" else "NCDHW",
dilations=dilations,
bias=bias,
)
else:
if filter_format == "channel_first":
filters = tf.transpose(filters, (*range(2, dims + 2), 1, 0))
permuted_x = False
if data_format == "channel_first" and ivy.dev(x) == "cpu":
x = tf.transpose(x, (0, *range(2, dims + 2), 1))
data_format = "channel_last"
permuted_x = True
data_format = _get_x_data_format(dims, data_format)
output_shape, padding = _transpose_out_pad(
x.shape, filters.shape, strides, padding, dims, dilations, data_format
)
if dims == 1:
res = tf.concat(
[
tf.nn.conv1d_transpose(
x[..., j : j + filters.shape[-2] // feature_group_count],
filters[
..., j : j + filters.shape[-2] // feature_group_count, :
],
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
)
for j in range(
0, filters.shape[-2], filters.shape[-2] // feature_group_count
)
],
axis=-1,
)
elif dims == 2:
res = tf.concat(
[
tf.nn.conv2d_transpose(
x[..., j : j + filters.shape[-2] // feature_group_count],
filters[
..., j : j + filters.shape[-2] // feature_group_count, :
],
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
)
for j in range(
0, filters.shape[-2], filters.shape[-2] // feature_group_count
)
],
axis=-1,
)
else:
strides, dilations = _extend_3d_strides_dilations(
strides, dilations, data_format
)
res = tf.concat(
[
tf.nn.conv3d_transpose(
x[..., j : j + filters.shape[-2] // feature_group_count],
filters[
..., j : j + filters.shape[-2] // feature_group_count, :
],
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
)
for j in range(
0, filters.shape[-2], filters.shape[-2] // feature_group_count
)
],
axis=-1,
)
res = tf.math.add(res, bias) if bias is not None else res
if permuted_x:
return tf.transpose(res, (0, dims + 1, *range(1, dims + 1)))
return res
def _cpu_lstm(
x, init_h, init_c, kernel, recurrent_kernel, bias, recurrent_bias, time_major
):
def step(cell_inputs, cell_states):
h_tm1 = cell_states[0] # previous memory state
c_tm1 = cell_states[1] # previous carry state
z = tf.keras.backend.dot(cell_inputs, kernel)
if bias is not None:
z += bias
z += tf.keras.backend.dot(h_tm1, recurrent_kernel)
if recurrent_bias is not None:
z += recurrent_bias
z0, z1, z2, z3 = tf.split(z, 4, axis=-1)
i = tf.sigmoid(z0)
f = tf.sigmoid(z1)
c = f * c_tm1 + i * tf.tanh(z2)
o = tf.sigmoid(z3)
h = o * tf.tanh(c)
return h, [h, c]
_, outputs, new_states = tf.keras.backend.rnn(
step,
x,
[init_h, init_c],
time_major=time_major,
)
return outputs, new_states
def _format_weights_for_gpu(weights, biases, shape):
weights = [tf.reshape(tf.transpose(x), shape) for x in weights]
biases = [tf.reshape(x, shape) for x in biases]
return tf.concat(weights + biases, axis=0)
def _gpu_lstm(
x, init_h, init_c, kernel, recurrent_kernel, bias, recurrent_bias, time_major
):
if not time_major:
x = tf.transpose(x, perm=(1, 0, 2))
init_h = tf.expand_dims(init_h, axis=0)
init_c = tf.expand_dims(init_c, axis=0)
weights = tf.split(kernel, 4, axis=1)
weights += tf.split(recurrent_kernel, 4, axis=1)
full_bias = tf.concat((recurrent_bias, bias), axis=0)
params = _format_weights_for_gpu(
weights=weights,
biases=tf.split(full_bias, 8),
shape=tf.constant([-1]),
)
outputs, h, c, _ = tf.raw_ops.CudnnRNN(
input=x,
input_h=init_h,
input_c=init_c,
params=params,
rnn_mode="lstm",
)
return outputs, (h, c)
def lstm_update(
x: Union[tf.Tensor, tf.Variable],
init_h: Union[tf.Tensor, tf.Variable],
init_c: Union[tf.Tensor, tf.Variable],
kernel: Union[tf.Tensor, tf.Variable],
recurrent_kernel: Union[tf.Tensor, tf.Variable],
/,
*,
bias: Optional[Union[tf.Tensor, tf.Variable]] = None,
recurrent_bias: Optional[Union[tf.Tensor, tf.Variable]] = None,
time_major: bool = False,
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
dev = x.device
x = x.data
init_h = init_h.data
init_c = init_c.data
kernel = kernel.data
recurrent_kernel = recurrent_kernel.data
bias = bias.data if bias is not None else bias
recurrent_bias = (
recurrent_bias.data if recurrent_bias is not None else recurrent_bias
)
if "cpu" in dev:
outputs, new_states = _cpu_lstm(
x,
init_h,
init_c,
kernel,
recurrent_kernel,
bias,
recurrent_bias,
time_major,
)
else:
outputs, new_states = _gpu_lstm(
x,
init_h,
init_c,
kernel,
recurrent_kernel,
bias,
recurrent_bias,
time_major,
)
return outputs, new_states
def nms(
boxes,
scores=None,
iou_threshold=0.5,
max_output_size=None,
score_threshold=float("-inf"),
):
if scores is None:
scores = tf.ones(boxes.shape[0])
boxes = tf.gather(boxes, [1, 0, 3, 2], axis=1)
ret = tf.image.non_max_suppression(
boxes, scores, max_output_size or len(boxes), iou_threshold, score_threshold
)
return tf.cast(ret, dtype=tf.int64)
| ivy/ivy/functional/backends/tensorflow/layers.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/layers.py",
"repo_id": "ivy",
"token_count": 14955
} | 22 |
"""PyTorch activation functions.
Collection of PyTorch activation functions, wrapped to fit Ivy syntax
and signature.
"""
from typing import Optional, Union, Literal
# global
import numpy as np
import torch
import torch.nn
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
import ivy.functional.backends.torch as torch_backend
@with_unsupported_dtypes(
{
"2.1.2 and below": (
"float16",
"bool",
)
},
backend_version,
)
def relu(
x: torch.Tensor, /, *, complex_mode="jax", out: Optional[torch.Tensor] = None
) -> torch.Tensor:
return torch.relu(x)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def leaky_relu(
x: torch.Tensor,
/,
*,
alpha: float = 0.2,
complex_mode="jax",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.nn.functional.leaky_relu(x, alpha)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def gelu(
x: torch.Tensor,
/,
*,
approximate: bool = False,
complex_mode="jax",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if approximate:
return 0.5 * x * (1 + torch.tanh(((2 / np.pi) ** 0.5) * (x + 0.044715 * x**3)))
return torch.nn.functional.gelu(x)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def sigmoid(
x: torch.Tensor, /, *, complex_mode="jax", out: Optional[torch.Tensor] = None
) -> torch.Tensor:
if not ivy.is_array(x):
x = torch.tensor(x)
return torch.sigmoid(x, out=out)
sigmoid.support_native_out = True
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def softmax(
x: torch.Tensor,
/,
*,
axis: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if axis is None:
axis = -1
if torch.is_complex(x):
amax = torch_backend.max(x, axis=axis, keepdims=True)
exp_x = torch.exp(torch.subtract(x, amax))
return torch.divide(exp_x, torch.sum(exp_x, dim=axis, keepdim=True))
return torch.nn.functional.softmax(x, axis)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version)
def softplus(
x: torch.Tensor,
/,
*,
beta: Optional[Union[int, float]] = None,
threshold: Optional[Union[int, float]] = None,
complex_mode="jax",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
kwargs = {
k: v for k, v in {"beta": beta, "threshold": threshold}.items() if v is not None
}
return torch.nn.functional.softplus(x, **kwargs)
# Softsign
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version)
def softsign(x: torch.Tensor, /, out: Optional[torch.Tensor] = None) -> torch.Tensor:
# return x / (1 + torch.abs(x))
return torch.nn.functional.softsign(x)
softsign.support_native_out = True
@with_unsupported_dtypes(
{"2.2 and below": ("float16",)},
backend_version,
)
def log_softmax(
x: torch.Tensor,
/,
*,
axis: Optional[int] = -1,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[torch.Tensor] = None,
):
if torch.is_complex(x):
x_max = torch_backend.max(x, axis=axis, keepdims=True)
sub_temp = torch.sub(x, x_max)
ret = torch.sum(sub_temp.exp(), dim=axis, keepdim=True)
ret = torch.log(ret)
return torch.sub(sub_temp, ret)
return torch.nn.functional.log_softmax(x, axis)
@with_unsupported_dtypes(
{"2.2 and below": ("float16",)},
backend_version,
)
def mish(
x: torch.Tensor,
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if torch.is_complex(x):
x_norm = torch.log1p(x.exp())
return torch.multiply(x, x_norm.tanh())
return torch.nn.functional.mish(x)
@with_unsupported_dtypes(
{
"2.2 and below": (
"complex",
"float16",
)
},
backend_version,
)
def hardswish(
x: torch.Tensor,
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.nn.functional.hardswish(x)
| ivy/ivy/functional/backends/torch/activations.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/activations.py",
"repo_id": "ivy",
"token_count": 1905
} | 23 |
# global
import math
from collections import namedtuple
import torch
from typing import Optional, Tuple, Sequence, Union
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from .. import backend_version
from ivy.functional.ivy.experimental.linear_algebra import _check_valid_dimension_size
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def diagflat(
x: torch.Tensor,
/,
*,
offset: int = 0,
padding_value: float = 0,
align: str = "RIGHT_LEFT",
num_rows: int = -1,
num_cols: int = -1,
out: Optional[torch.Tensor] = None,
):
if len(x.shape) > 1:
x = torch.flatten(x)
# if len(x.shape) == 1 and offset == 0 and num_rows <= 1 and num_cols <= 1:
if math.prod(x.shape) == 1 and offset == 0 and num_rows <= 1 and num_cols <= 1:
return x
# This is used as part of Tensorflow's shape calculation
# See their source code to see what they're doing with it
lower_diag_index = offset
upper_diag_index = lower_diag_index
x_shape = x.shape
x_rank = len(x_shape)
num_diags = upper_diag_index - lower_diag_index + 1
max_diag_len = x_shape[x_rank - 1]
min_num_rows = max_diag_len - min(upper_diag_index, 0)
min_num_cols = max_diag_len + max(lower_diag_index, 0)
if num_rows == -1 and num_cols == -1:
num_rows = max(min_num_rows, min_num_cols)
num_cols = num_rows
elif num_rows == -1:
num_rows = min_num_rows
elif num_cols == -1:
num_cols = min_num_cols
output_shape = list(x_shape)
if num_diags == 1:
output_shape[x_rank - 1] = num_rows
output_shape.append(num_cols)
else:
output_shape[x_rank - 2] = num_rows
output_shape[x_rank - 1] = num_cols
output_array = torch.full(tuple(output_shape), padding_value, dtype=x.dtype)
output_array = output_array.to(x.dtype)
diag_len = max(min(num_rows, num_cols) - abs(offset) + 1, 1)
if len(x) < diag_len:
x = torch.tensor(
list(x) + [padding_value] * max((diag_len - len(x), 0)), dtype=x.dtype
)
temp = x - torch.full(x.shape, padding_value).type(x.dtype)
diagonal_to_add = torch.diag(temp, diagonal=offset).type(
x.dtype
) # diag does not support float16
diagonal_to_add = diagonal_to_add[tuple(slice(0, n) for n in output_array.shape)]
diagonal_to_add = diagonal_to_add.to(x.dtype)
output_array += torch.nn.functional.pad(
diagonal_to_add,
(
0,
max([output_array.shape[1] - diagonal_to_add.shape[1], 0]),
0,
max([output_array.shape[0] - diagonal_to_add.shape[0], 0]),
),
"constant",
0,
).type(x.dtype)
ret = output_array.type(x.dtype)
if ivy.exists(out):
ivy.inplace_update(out, ret)
return ret
diagflat.support_native_out = False
def kron(
a: torch.Tensor,
b: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.tensor:
return torch.kron(a, b, out=out)
kron.support_native_out = True
def matrix_exp(
x: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.linalg.matrix_exp(x)
matrix_exp.support_native_out = True
def eig(
x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor]:
if not torch.is_complex(x):
x = x.to(torch.complex128)
return torch.linalg.eig(x)
eig.support_native_out = False
def eigvals(x: torch.Tensor, /) -> torch.Tensor:
if not torch.is_complex(x):
x = x.to(torch.complex128)
return torch.linalg.eigvals(x)
eigvals.support_native_out = False
def adjoint(
x: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
_check_valid_dimension_size(x)
return torch.adjoint(x).resolve_conj()
def solve_triangular(
x1: torch.Tensor,
x2: torch.Tensor,
/,
*,
upper: bool = True,
adjoint: bool = False,
unit_diagonal: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if adjoint:
x1 = torch.adjoint(x1)
upper = not upper
return torch.linalg.solve_triangular(
x1, x2, upper=upper, unitriangular=unit_diagonal, out=out
)
solve_triangular.support_native_out = True
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def multi_dot(
x: Sequence[torch.Tensor],
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.linalg.multi_dot(x, out=out)
multi_dot.support_native_out = True
@with_unsupported_dtypes({"2.0.0 and below": ("float16", "bfloat16")}, backend_version)
def cond(
x: torch.Tensor,
/,
*,
p: Optional[Union[None, int, str]] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.linalg.cond(x, p=p, out=out)
cond.support_native_out = False
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def lu_factor(
x: torch.Tensor,
/,
*,
pivot: Optional[bool] = True,
out: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
ret = torch.linalg.lu_factor(x, pivot=pivot, out=out)
ret_tuple = namedtuple("lu_factor", ["LU", "p"])
return ret_tuple(ret.LU, ret.pivots)
def lu_solve(
lu: Tuple[torch.Tensor, torch.Tensor],
p: torch.Tensor,
b: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.linalg.lu_solve(lu, p, b, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def dot(
a: torch.Tensor,
b: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
a, b = ivy.promote_types_of_inputs(a, b)
if a.dim() == 0 or b.dim() == 0:
return torch.mul(a, b, out=out)
if a.dim() in [1, 2] and b.dim() in [1, 2] or (a.dim() >= 1 and b.dim() == 1):
return torch.matmul(a, b, out=out)
return torch.tensordot(a, b, dims=[[-1], [-2]], out=out)
dot.support_native_out = True
| ivy/ivy/functional/backends/torch/experimental/linear_algebra.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/experimental/linear_algebra.py",
"repo_id": "ivy",
"token_count": 2794
} | 24 |
import ivy.functional.frontends.jax as jax_frontend
# Dummy Array class to help with compilation, don't add methods here
class ArrayImpl(jax_frontend.Array):
pass
| ivy/ivy/functional/frontends/jax/_src/array.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/_src/array.py",
"repo_id": "ivy",
"token_count": 53
} | 25 |
import ivy
from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_supported_dtypes
# --- Helpers --- #
# --------------- #
def _batch_promotion(*args, default_dtype="float64"):
# Promote all types
promote_types = set()
for arg in args:
if args is None:
continue
if isinstance(arg, (float, int)):
continue
promote_types.add(ivy.dtype(arg))
if "float64" in promote_types:
return "float64"
if "float32" in promote_types:
return "float32"
if "float16" in promote_types:
return "float32" if "bfloat16" in promote_types else "float16"
if "bfloat16" in promote_types:
return "bfloat16"
if "int64" in promote_types or "uint64" in promote_types:
return "float64"
ints = ["int8", "int16", "int32"]
if "uint32" in promote_types and any(d in promote_types for d in ints):
return "float64"
return default_dtype
def _canonicalize_axis(axis, ndim):
if not -ndim <= axis < ndim:
raise ivy.utils.exceptions.IvyException(
f"axis {axis} is out of bounds for array of dimension {ndim}"
)
if axis < 0:
axis = axis + ndim
return axis
def _len(x):
shape = ivy.shape(x)
return 0 if len(shape) == 0 else shape[0]
def _mean(x, axis=None, keepdims=False, where=None):
# Mean with support for where
if where is None:
return ivy.mean(x, axis=axis, keepdims=keepdims)
filtered_x = ivy.where(where, ivy.array(x), ivy.zeros_like(x))
counter_x = ivy.where(where, ivy.ones_like(x), ivy.zeros_like(x))
sums = ivy.sum(filtered_x, axis=axis, keepdims=keepdims)
counts = ivy.sum(counter_x, axis=axis, keepdims=keepdims)
return ivy.divide(sums, counts)
def _reduction_dims(a, axis):
ndims = len(ivy.shape(a))
if axis is None:
return (tuple(range(ndims)),) * 2
if not isinstance(axis, (tuple, list)):
axis = (axis,)
canon_axis = tuple(_canonicalize_axis(ax, ndims) for ax in axis)
ivy.utils.assertions.check_equal(
len(canon_axis),
len(set(canon_axis)),
message=f"duplicate value in 'axis': {axis}",
as_array=False,
)
# TODO: deal with named axis
canon_pos_axis = tuple(x for x in canon_axis if isinstance(x, int))
if len(canon_pos_axis) != len(canon_axis):
return canon_pos_axis, canon_axis
else:
return canon_axis, canon_axis
def _type_conversion(x):
# Does type conversion, floats maps to float,
# complex maps to complex,
# 64bit dtype to float64, everything else to float32
x = ivy.asarray(x)
dtype = ivy.as_ivy_dtype(x.dtype)
if not ("float" in dtype or "complex" in dtype):
dtype = "float64" if "64" in dtype[-2:] else "float32"
return ivy.astype(x, dtype)
def _type_conversion_64(x):
# Does type conversion, floats maps to float,
# complex maps to complex, everything else to float64
x = ivy.asarray(x)
dtype = ivy.as_ivy_dtype(x.dtype)
if not ("float" in dtype or "complex" in dtype):
dtype = "float64"
return ivy.astype(x, dtype)
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
def celu(x, alpha=1.0):
return ivy.celu(x, alpha=alpha)
@to_ivy_arrays_and_back
def elu(x, alpha=1.0):
ret = ivy.where(x > 0, x, alpha * ivy.expm1(x))
dtype = _batch_promotion(x, alpha, default_dtype="float64")
return ivy.asarray(ret, dtype=dtype)
@to_ivy_arrays_and_back
def gelu(x, approximate=True):
return ivy.gelu(x, approximate=approximate, complex_mode="jax")
@to_ivy_arrays_and_back
def glu(x, axis=-1):
size = x.shape[axis]
ivy.utils.assertions.check_equal(
size % 2, 0, message="axis size must be divisible by 2", as_array=False
)
x1, x2 = ivy.split(x, num_or_size_splits=2, axis=axis)
return ivy.multiply(x1, ivy.sigmoid(x2))
@to_ivy_arrays_and_back
def hard_sigmoid(x):
dtype = _batch_promotion(x, default_dtype="float64")
return ivy.divide(ivy.minimum(ivy.maximum(ivy.add(x, 3), 0), 6), 6).astype(dtype)
@to_ivy_arrays_and_back
def hard_silu(x):
dtype = _batch_promotion(x, default_dtype="float64")
sig = ivy.divide(ivy.minimum(ivy.maximum(ivy.add(x, 3), 0), 6), 6)
return ivy.multiply(x, sig).astype(dtype)
@to_ivy_arrays_and_back
def hard_swish(x):
res = (x * ivy.minimum(ivy.maximum(x + 3, 0.0), 6.0)) / 6
return ivy.asarray(res, dtype=x.dtype)
@to_ivy_arrays_and_back
def hard_tanh(x):
n1 = -1
if "uint" in str(x.dtype):
dtype = x.dtype
# tensorflow can't use -1 for uint
n1 = ivy.asarray((1 << ivy.dtype_bits(dtype)) - 1, dtype=dtype)
return ivy.where(x > 1, 1, ivy.where(x < n1, n1, x)).astype(x.dtype)
@to_ivy_arrays_and_back
def leaky_relu(x, negative_slope=0.01):
x = _type_conversion_64(x)
return ivy.leaky_relu(x, alpha=negative_slope, complex_mode="jax")
@to_ivy_arrays_and_back
def log_sigmoid(x):
x = _type_conversion(x)
return ivy.logsigmoid(x, complex_mode="jax").astype(x.dtype)
@to_ivy_arrays_and_back
def log_softmax(x, axis=-1):
return ivy.log_softmax(x, axis=axis)
@to_ivy_arrays_and_back
def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):
a = ivy.asarray(a)
if b is not None:
dtype = _batch_promotion(a, b, default_dtype="float32")
a = ivy.astype(a, dtype)
b = ivy.asarray(b, dtype=dtype)
a = ivy.where(b != 0, a, -ivy.inf)
a = ivy.astype(a, dtype)
out_dtype = _batch_promotion(a, b, default_dtype="float32")
pos_dims, dims = _reduction_dims(a, axis)
amax = ivy.max(a, axis=pos_dims, keepdims=keepdims)
notinf = ivy.asarray(not ivy.isinf(amax))
amax = ivy.stop_gradient(ivy.where(notinf, amax, ivy.zeros_like(amax)))
amax_with_dims = amax if keepdims else ivy.expand_dims(amax, axis=pos_dims)
# fast path for non-negative result
if b is None:
out = ivy.add(
ivy.log(
ivy.sum(
ivy.exp(ivy.subtract(a, amax_with_dims)),
axis=dims,
keepdims=keepdims,
)
),
amax,
)
sign = ivy.where(ivy.isnan(out), out, 1.0)
sign = ivy.where(ivy.isinf(-out), 0.0, sign).astype(out.dtype)
else:
expsub = ivy.exp(ivy.subtract(a, amax_with_dims))
if b is not None:
expsub = ivy.multiply(expsub, b)
sumexp = ivy.sum(expsub, axis=dims, keepdims=keepdims)
sign = ivy.stop_gradient(ivy.sign(sumexp))
out = ivy.add(ivy.log(ivy.abs(sumexp)), amax)
if return_sign:
return out, sign
if b is not None:
out = ivy.where(sign < 0, ivy.array(ivy.nan, dtype=out.dtype), out)
return out.astype(out_dtype)
@to_ivy_arrays_and_back
def normalize(x, axis=-1, mean=None, variance=None, epsilon=1e-5, where=None):
default = "float64" if mean is not None and variance is not None else "float32"
x_typed = _type_conversion(x)
if mean is None:
mean = _mean(x_typed, axis=axis, keepdims=True, where=where)
if variance is None:
variance = _mean(
ivy.square(x).astype(x_typed.dtype), axis=axis, keepdims=True, where=where
) - ivy.square(mean)
res = (x - mean) / ivy.sqrt(variance + ivy.asarray(epsilon, dtype=x_typed.dtype))
out_type = _batch_promotion(x, mean, variance, default_dtype=default)
return ivy.asarray(res, dtype=out_type)
@to_ivy_arrays_and_back
def one_hot(x, num_classes, *, dtype=None, axis=-1):
dtype = ivy.float64 if dtype is None else ivy.as_ivy_dtype(dtype)
return ivy.one_hot(x, num_classes, axis=axis, dtype=dtype)
@to_ivy_arrays_and_back
def relu(x):
return ivy.relu(x, complex_mode="jax")
@to_ivy_arrays_and_back
def relu6(x):
res = ivy.relu6(x, complex_mode="jax")
return _type_conversion_64(res)
@to_ivy_arrays_and_back
def selu(x):
x = _type_conversion_64(x)
return ivy.selu(x)
@to_ivy_arrays_and_back
def sigmoid(x):
x = _type_conversion(x)
ret = ivy.sigmoid(x, complex_mode="jax")
return ivy.astype(ret, x.dtype)
@with_supported_dtypes(
{"0.4.24 and below": ("complex", "float")},
"jax",
)
@to_ivy_arrays_and_back
def silu(x):
x = _type_conversion(x)
return ivy.multiply(x, ivy.sigmoid(x))
@to_ivy_arrays_and_back
def soft_sign(x):
dtype = _type_conversion(x).dtype
ret = x / (ivy.abs(x) + 1)
return ret.astype(dtype)
@to_ivy_arrays_and_back
def softmax(x, axis=-1, where=None, initial=None):
return ivy.softmax(x, axis=axis)
@to_ivy_arrays_and_back
def softplus(x):
x = _type_conversion(x)
return ivy.softplus(x, complex_mode="jax").astype(x.dtype)
@to_ivy_arrays_and_back
def swish(x):
ret = x / (1 + ivy.exp(-x))
return ivy.asarray(ret, dtype=x.dtype)
| ivy/ivy/functional/frontends/jax/nn/non_linear_activations.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/nn/non_linear_activations.py",
"repo_id": "ivy",
"token_count": 4225
} | 26 |
from ._op import *
| ivy/ivy/functional/frontends/mxnet/numpy_extension/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mxnet/numpy_extension/__init__.py",
"repo_id": "ivy",
"token_count": 6
} | 27 |
# global
# local
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.frontends.numpy import promote_types_of_numpy_inputs
from ivy.functional.frontends.numpy.linalg.norms_and_other_numbers import matrix_rank
# inv
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
def inv(a):
return ivy.inv(a)
# TODO: replace this with function from API
# As the compositon provides unstable results
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, "numpy")
def lstsq(a, b, rcond="warn"):
solution = ivy.matmul(
ivy.pinv(a, rtol=1e-15).astype(ivy.float64), b.astype(ivy.float64)
)
svd = ivy.svd(a, compute_uv=False)
rank = matrix_rank(a).astype(ivy.int32)
residuals = ivy.sum((b - ivy.matmul(a, solution)) ** 2).astype(ivy.float64)
return (solution, residuals, rank, svd[0])
# pinv
# TODO: add hermitian functionality
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
def pinv(a, rcond=1e-15, hermitian=False):
return ivy.pinv(a, rtol=rcond)
# solve
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
def solve(a, b):
a, b = promote_types_of_numpy_inputs(a, b)
return ivy.solve(a, b)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"1.26.3 and below": ("float16", "blfloat16")}, "numpy")
def tensorinv(a, ind=2):
old_shape = ivy.shape(a)
prod = 1
if ind > 0:
invshape = old_shape[ind:] + old_shape[:ind]
for k in old_shape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = ivy.reshape(a, shape=(prod, -1))
ia = ivy.inv(a)
new_shape = (*invshape,)
return ivy.reshape(ia, shape=new_shape)
| ivy/ivy/functional/frontends/numpy/linalg/solving_equations_and_inverting_matrices.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/linalg/solving_equations_and_inverting_matrices.py",
"repo_id": "ivy",
"token_count": 846
} | 28 |
# local
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
inputs_to_ivy_arrays,
_assert_no_array,
_assert_array,
)
@inputs_to_ivy_arrays
def copyto(dst, src, /, *, casting="same_kind", where=True):
# Handle casting
# Numpy copyto doesn't cast the inputs
# It just checks casting rules
ivy.utils.assertions.check_elem_in_list(
casting,
["no", "equiv", "safe", "same_kind", "unsafe"],
message="casting must be one of [no, equiv, safe, same_kind, unsafe]",
)
args = [dst, src]
args_idxs = ivy.nested_argwhere(args, ivy.is_array)
args_to_check = ivy.multi_index_nest(args, args_idxs)
dtype = args_to_check[0].dtype
if casting in ["no", "equiv"]:
_assert_no_array(
args_to_check,
dtype,
)
elif casting in ["same_kind", "safe"]:
_assert_array(
args_to_check,
dtype,
casting=casting,
)
ivy.where(where, src, dst, out=dst)
@inputs_to_ivy_arrays
def shape(array, /):
return ivy.shape(array)
| ivy/ivy/functional/frontends/numpy/manipulation_routines/basic_operations.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/manipulation_routines/basic_operations.py",
"repo_id": "ivy",
"token_count": 518
} | 29 |
# global
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_casting,
handle_numpy_dtype,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
# --- Helpers --- #
# --------------- #
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _arccosh(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.acosh(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
# arcsinh
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _arcsinh(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.asinh(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _arctanh(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.atanh(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _cosh(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.cosh(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _sinh(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.sinh(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _tanh(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.tanh(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
| ivy/ivy/functional/frontends/numpy/mathematical_functions/hyperbolic_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/mathematical_functions/hyperbolic_functions.py",
"repo_id": "ivy",
"token_count": 1344
} | 30 |
# local
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
from_zero_dim_arrays_to_scalar,
)
from ivy import with_supported_dtypes
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def beta(a, b, size=None):
return ivy.beta(a, b, shape=size)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def binomial(n, p, size=None):
if p < 0 or p > 1:
raise ValueError("p must be in the interval (0, 1)")
if n < 0:
raise ValueError("n must be strictly positive")
if size is None:
size = 1
else:
size = size
if isinstance(size, int):
size = (size,)
lambda_ = ivy.multiply(n, p)
return ivy.poisson(lambda_, shape=size)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def chisquare(df, size=None):
df = ivy.array(df) # scalar ints and floats are also array_like
if ivy.any(df <= 0):
raise ValueError("df <= 0")
# ivy.gamma() throws an error if both alpha is an array and a shape is passed
# so this part broadcasts df into the shape of `size`` first to keep it happy.
if size is not None:
df = df * ivy.ones(size)
return ivy.gamma(df / 2, 2, dtype="float64")
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def choice(a, size=None, replace=True, p=None):
sc_size = 1
if isinstance(size, int):
sc_size = size
elif size is not None:
# If the given shape is, e.g., (m, n, k)
# then m * n * k samples are drawn. As per numpy docs
sc_size = 1
for s in size:
if s is not None:
sc_size *= s
if isinstance(a, int):
a = ivy.arange(a)
index = ivy.multinomial(len(a), sc_size, replace=replace, probs=p)
return a[index]
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def dirichlet(alpha, size=None):
return ivy.dirichlet(alpha, size=size)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def exponential(scale=1.0, size=None, dtype="float64"):
if scale > 0:
# Generate samples that are uniformly distributed based on given parameters
u = ivy.random_uniform(low=0.0, high=0.0, shape=size, dtype=dtype)
return ivy.exp(scale, out=u)
return 0 # if scale parameter is less than or equal to 0
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def f(dfn, dfd, size=None):
# Generate samples from the uniform distribution
x1 = ivy.gamma(ivy.to_scalar(ivy.divide(dfn, 2)), 2.0, shape=size, dtype="float64")
x2 = ivy.gamma(ivy.to_scalar(ivy.divide(dfd, 2)), 2.0, shape=size, dtype="float64")
# Calculate the F-distributed samples
samples = ivy.divide(ivy.divide(x1, ivy.array(dfn)), ivy.divide(x2, ivy.array(dfd)))
return samples
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def gamma(shape, scale=1.0, size=None):
return ivy.gamma(shape, scale, shape=size, dtype="float64")
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def geometric(p, size=None):
if p < 0 or p > 1:
raise ValueError("p must be in the interval [0, 1]")
oneMinusP = ivy.subtract(1, p)
sizeMinusOne = ivy.subtract(size, 1)
return ivy.multiply(ivy.pow(oneMinusP, sizeMinusOne), p)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def gumbel(loc=0.0, scale=1.0, size=None):
u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
x = loc - scale * ivy.log(-ivy.log(u))
return x
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def laplace(loc=0.0, scale=1.0, size=None):
u = ivy.random_uniform(low=0.0, high=0.0, shape=size, dtype="float64")
u = loc - scale * ivy.sign(u - 0.5) * ivy.log(1 - 2 * ivy.abs(u - 0.5))
return u
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def logistic(loc=0.0, scale=1.0, size=None):
u = ivy.random_uniform(low=0.0, high=0.0, shape=size, dtype="float64")
x = loc + scale * ivy.log(u / (1 - u))
return x
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def lognormal(mean=0.0, sigma=1.0, size=None):
ret = ivy.exp(ivy.random_normal(mean=mean, std=sigma, shape=size, dtype="float64"))
return ret
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def logseries(p=0, size=None):
if p < 0 or p >= 1:
raise ValueError("p value must be in the open interval (0, 1)")
r = ivy.log(1 - p)
u = ivy.random_uniform(low=0.0, high=1.0, shape=size)
v = ivy.random_uniform(low=0.0, high=1.0, shape=size)
q = 1 - ivy.exp(r * u)
ret = 1 + ivy.log(v) / ivy.log(q)
return ret
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def multinomial(n, pvals, size=None):
assert not ivy.exists(size) or (len(size) > 0 and len(size) < 3)
batch_size = 1
if ivy.exists(size):
if len(size) == 2:
batch_size = size[0]
num_samples = size[1]
else:
num_samples = size[0]
else:
num_samples = len(pvals)
return ivy.multinomial(n, num_samples, batch_size=batch_size, probs=pvals)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def negative_binomial(n, p, size=None):
if p <= 0 or p >= 1:
raise ValueError("p must be in the interval (0, 1)")
if n <= 0:
raise ValueError("n must be strictly positive")
# numpy implementation uses scale = (1 - p) / p
scale = (1 - p) / p
# poisson requires shape to be a tuple
if isinstance(size, int):
size = (size,)
lambda_ = ivy.gamma(n, scale, shape=size)
return ivy.poisson(lam=lambda_, shape=size)
@with_supported_dtypes(
{"1.25.2 and below": ("float16", "float32")},
"numpy",
)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def noncentral_chisquare(df, nonc, size=None):
if ivy.any(df <= 0):
raise ValueError("Degree of freedom must be greater than 0")
if ivy.has_nans(nonc):
return ivy.nan
if ivy.any(nonc == 0):
return chisquare(df, size=size)
if ivy.any(df < 1):
n = standard_normal() + ivy.sqrt(nonc)
return chisquare(df - 1, size=size) + n * n
else:
i = poisson(nonc / 2.0, size=size)
return chisquare(df + 2 * i, size=size)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def normal(loc=0.0, scale=1.0, size=None):
return ivy.random_normal(mean=loc, std=scale, shape=size, dtype="float64")
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def pareto(a, size=None):
if a < 0:
return 0
u = ivy.random_uniform(low=0.0, high=0.0, shape=size, dtype="float64")
return ivy.pow(1 / (1 - u), 1 / a)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def permutation(x, /):
if isinstance(x, int):
x = ivy.arange(x)
return ivy.shuffle(x)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def poisson(lam=1.0, size=None):
return ivy.poisson(lam=lam, shape=size)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def random_sample(size=None):
return ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def rayleigh(scale, size=None):
u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
log_u = ivy.log(u)
x = ivy.multiply(scale, ivy.sqrt(ivy.multiply(-2, log_u)))
return x
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def shuffle(x, axis=0, /):
if isinstance(x, int):
x = ivy.arange(x)
return ivy.shuffle(x, axis)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def standard_cauchy(size=None):
u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
return ivy.tan(ivy.pi * (u - 0.5))
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def standard_exponential(size=None):
if size is None:
size = 1
U = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
return -ivy.log(U)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def standard_gamma(shape, size=None):
return ivy.gamma(shape, 1.0, shape=size, dtype="float64")
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def standard_normal(size=None):
return ivy.random_normal(mean=0.0, std=1.0, shape=size, dtype="float64")
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def standard_t(df, size=None):
numerator = ivy.random_normal(mean=0.0, std=1.0, shape=size, dtype="float64")
denominator = ivy.gamma(df / 2, 1.0, shape=size, dtype="float64")
return ivy.sqrt(df / 2) * ivy.divide(numerator, ivy.sqrt(denominator))
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def triangular(left, mode, right, size=None):
if left > mode or mode > right or left == right:
raise ivy.utils.exceptions.IvyValueError(
"left < mode < right is not being followed"
)
u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
condition = u <= (mode - left) / (right - left)
values1 = left + (right - left) * (u * (mode - left) / (right - left)) ** 0.5
values2 = (
right - (right - mode) * ((1 - u) * (right - mode) / (right - left)) ** 0.5
)
return ivy.where(condition, values1, values2)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def uniform(low=0.0, high=1.0, size=None):
return ivy.random_uniform(low=low, high=high, shape=size, dtype="float64")
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def vonmises(mu, kappa, size=None):
t_size = 0
# Output shape. If the given shape is, e.g., (m, n, k),
# then m * n * k samples are drawn.
if size is None or len(size) == 0:
t_size = 1
else:
for x in size:
t_size = t_size * x
size = t_size
li = []
while len(li) < size:
# Generate samples from the von Mises distribution using numpy
u = ivy.random_uniform(low=-ivy.pi, high=ivy.pi, shape=size)
v = ivy.random_uniform(low=0, high=1, shape=size)
condition = v < (1 + ivy.exp(kappa * ivy.cos(u - mu))) / (
2 * ivy.pi * ivy.i0(kappa)
)
selected_samples = u[condition]
li.extend(ivy.to_list(selected_samples))
return ivy.array(li[:size])
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def wald(mean, scale, size=None):
if size is None:
size = 1
mu_2l = mean / (2 * scale)
Y = ivy.random_normal(mean=0, std=1, shape=size, dtype="float64")
U = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
Y = mean * ivy.square(Y)
X = mean + mu_2l * (Y - ivy.sqrt(((4 * scale) * Y) + ivy.square(Y)))
condition = mean / (mean + X) >= U
value1 = X
value2 = mean * mean / X
return ivy.where(condition, value1, value2)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def weibull(a, size=None):
if a < 0:
return 0
u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
return ivy.pow(-ivy.log(1 - u), 1 / a)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def zipf(a, size=None):
if a <= 1:
return 0
u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
return ivy.floor(ivy.pow(1 / (1 - u), 1 / a))
| ivy/ivy/functional/frontends/numpy/random/functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/random/functions.py",
"repo_id": "ivy",
"token_count": 5296
} | 31 |
import functools
from typing import Callable
import ivy
import ivy.functional.frontends.onnx as onnx_frontend
# --- Helpers --- #
# --------------- #
def _from_ivy_array_to_onnx_frontend_tensor(x, nested=False, include_derived=None):
if nested:
return ivy.nested_map(
_from_ivy_array_to_onnx_frontend_tensor, x, include_derived, shallow=False
)
elif isinstance(x, ivy.Array) or ivy.is_native_array(x):
a = onnx_frontend.Tensor(x)
return a
return x
def _ivy_array_to_onnx(x):
if isinstance(x, ivy.Array) or ivy.is_native_array(x):
return onnx_frontend.Tensor(x)
return x
def _native_to_ivy_array(x):
if isinstance(x, ivy.NativeArray):
return ivy.array(x)
return x
def _onnx_frontend_array_to_ivy(x):
if hasattr(x, "ivy_array"):
return x.ivy_array
return x
def _to_ivy_array(x):
return _onnx_frontend_array_to_ivy(_native_to_ivy_array(x))
# --- Main --- #
# ------------ #
def inputs_to_ivy_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def _inputs_to_ivy_arrays_onnx(*args, **kwargs):
"""Convert `Tensor` into `ivy.Array` instances.
Convert all `Tensor` instances in both the positional and
keyword arguments into `ivy.Array` instances, and then calls the
function with the updated arguments.
"""
# convert all arrays in the inputs to ivy.Array instances
new_args = ivy.nested_map(
_to_ivy_array, args, include_derived={"tuple": True}, shallow=False
)
new_kwargs = ivy.nested_map(
_to_ivy_array, kwargs, include_derived={"tuple": True}, shallow=False
)
return fn(*new_args, **new_kwargs)
return _inputs_to_ivy_arrays_onnx
def outputs_to_frontend_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def _outputs_to_frontend_arrays_onnx(*args, **kwargs):
"""Convert `ivy.Array` into `Tensor` instances.
Call the function, and then converts all `ivy.Array` instances
returned by the function into `Tensor` instances.
"""
# call unmodified function
ret = fn(*args, **kwargs)
# convert all arrays in the return to `frontend.onnx.Tensor` instances
return _from_ivy_array_to_onnx_frontend_tensor(
ret, nested=True, include_derived={"tuple": True}
)
return _outputs_to_frontend_arrays_onnx
def to_ivy_arrays_and_back(fn: Callable) -> Callable:
"""Wrap `fn` so it receives and returns `ivy.Array` instances.
Wrap `fn` so that input arrays are all converted to `ivy.Array`
instances and return arrays are all converted to `ndarray.NDArray`
instances.
"""
return outputs_to_frontend_arrays(inputs_to_ivy_arrays(fn))
| ivy/ivy/functional/frontends/onnx/func_wrapper.py/0 | {
"file_path": "ivy/ivy/functional/frontends/onnx/func_wrapper.py",
"repo_id": "ivy",
"token_count": 1218
} | 32 |
# local
import ivy
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
from ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh
tanh = paddle_tanh
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def celu(
x,
/,
*,
alpha=1.0,
name=None,
):
return ivy.celu(x, alpha=alpha)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def elu(
x,
/,
*,
alpha=1.0,
name=None,
):
return ivy.elu(x, alpha=alpha)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def gelu(x, approximate=False, name=None):
return ivy.gelu(x, approximate=approximate)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def glu(x, axis=-1, name=None):
size = x.shape[axis]
ivy.utils.assertions.check_equal(
size % 2, 0, message="axis size must be divisible by 2", as_array=False
)
a, b = ivy.split(x, num_or_size_splits=2, axis=axis)
return ivy.multiply(a, ivy.sigmoid(b))
@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def gumbel_softmax(x, temperature=1.0, hard=False, axis=-1, name=None):
gumbel_noice = -ivy.log(-ivy.log(ivy.random_uniform(ivy.shape(x) + 1e-20) + 1e-20))
gumbel_logits = (x + gumbel_noice) / temperature
y_soft = ivy.softmax(gumbel_logits, axis=axis)
if hard:
y_hard = ivy.one_hot(ivy.argmax(y_soft, axis=axis), ivy.shape(y_soft)[axis])
return y_hard
else:
return y_soft
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def hardshrink(x, threshold=0.5, name=None):
mask = ivy.logical_or(ivy.greater(x, threshold), ivy.less(x, -threshold))
return ivy.where(mask, x, 0.0)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
ret = ivy.minimum(ivy.maximum(ivy.add(ivy.multiply(x, slope), offset), 0), 1)
return ret
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def hardswish(x, name=None):
relu6_val = ivy.relu6(ivy.add(x, 3))
ret = ivy.multiply(x, ivy.divide(relu6_val, 6))
return ret
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def hardtanh(
x,
/,
*,
min=-1.0,
max=1.0,
name=None,
):
less = ivy.where(ivy.less(x, min), min, x)
ret = ivy.where(ivy.greater(x, max), max, less).astype(x.dtype)
return ret
@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def leaky_relu(x, negative_slope=0.01, name=None):
return ivy.leaky_relu(x)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def log_sigmoid(x, name=None):
return -ivy.softplus(-x)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def log_softmax(x, axis=-1, dtype=None, name=None):
x = ivy.astype(x, dtype) if dtype else x
ret = ivy.log_softmax(x, axis=axis)
ret = ivy.astype(ret, dtype) if dtype else ret
return ret
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def mish(x, name=None):
return ivy.mish(x)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def prelu(x, weight, data_format="NCHW", name=None):
return ivy.add(ivy.maximum(0, x), ivy.multiply(weight, ivy.minimum(0, x)))
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def relu(x, name=None):
return ivy.relu(x)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def relu6(x, name=None):
return ivy.relu6(x)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def relu_(x, name=None):
ret = ivy.relu(x)
ivy.inplace_update(x, ret)
return x
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def rrelu(
x,
/,
*,
lower=0.125,
upper=0.3333333333333333,
training=False,
name=None,
):
if lower < 0 or lower > 1:
raise ValueError(
"The lower value must be no less than zero or greater than one. Received:"
f" {lower}."
)
if upper < lower:
raise ValueError(
"The upper value must be greater than lower value. Received: lower"
f" {lower}, upper {upper}."
)
if upper > 1:
raise ValueError(
f"The upper value must be no greater than one. Received: {upper}."
)
is_test = not training
if is_test:
add = lower + upper
ret = add * x * 0.5
out = ivy.where(x >= 0, x, ret)
return out.astype(x.dtype)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def selu(
x,
/,
*,
alpha=1.6732632423543772848170429916717,
scale=1.0507009873554804934193349852946,
name=None,
):
if scale <= 1.0:
raise ValueError(f"The scale must be greater than 1.0. Received: {scale}.")
if alpha < 0:
raise ValueError(f"The alpha must be no less than zero. Received: {alpha}.")
ret = ivy.where(x > 0, x, alpha * ivy.expm1(x))
arr = scale * ret
return ivy.astype(arr, x.dtype)
def silu(x, name=None):
return ivy.silu(x)
@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def softmax_(x, axis=-1, dtype=None, name=None):
ret = ivy.softmax(x, axis=axis)
ivy.inplace_update(x, ret)
return x
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def softplus(x, beta=1, threshold=20, name=None):
return ivy.softplus(x, beta=beta, threshold=threshold)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def softshrink(
x,
/,
*,
threshold=0.5,
name=None,
):
low = ivy.where(ivy.less(x, -threshold), ivy.add(x, threshold), 0)
up = ivy.where(ivy.greater(x, threshold), ivy.subtract(x, threshold), 0)
add = ivy.add(low, up)
return ivy.astype(add, x.dtype)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def softsign(
x,
/,
*,
name=None,
):
return ivy.divide(x, ivy.add(1, ivy.abs(x)))
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def swish(x, name=None):
return ivy.multiply(x, ivy.sigmoid(x))
@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def tanh_(x, name=None):
ret = ivy.tanh(x)
ivy.inplace_update(x, ret)
return x
# else:
# ToDo implement a correctly after fixing ivy.random_uniform
# a = ivy.random_normal(low=lower, high=upper)
# ret = ivy.where(x >= 0, x, ivy.multiply(a, x))
# return ret.astype(x.dtype)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def tanhshrink(
x,
/,
*,
name=None,
):
return ivy.subtract(x, ivy.tanh(x))
| ivy/ivy/functional/frontends/paddle/nn/functional/activation.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/nn/functional/activation.py",
"repo_id": "ivy",
"token_count": 3564
} | 33 |
# local
from ..linalg import * # noqa: F401
| ivy/ivy/functional/frontends/paddle/tensor/linalg.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/tensor/linalg.py",
"repo_id": "ivy",
"token_count": 18
} | 34 |
# global
import sys
import ivy
# local
from ivy.functional.frontends import set_frontend_to_specific_version
from . import cluster
from . import constants
from . import fft
from . import fftpack
from . import integrate
from . import interpolate
from . import linalg
from . import ndimage
from . import odr
from . import optimize
from . import signal
from . import sparse
from . import spatial
from . import special
from . import stats
import ivy.functional.frontends.numpy as np
array = _frontend_array = np.array
# setting to specific version #
# --------------------------- #
if ivy.is_local():
module = ivy.utils._importlib.import_cache[__name__]
else:
module = sys.modules[__name__]
set_frontend_to_specific_version(module)
| ivy/ivy/functional/frontends/scipy/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/__init__.py",
"repo_id": "ivy",
"token_count": 224
} | 35 |
# global
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
import ivy.functional.frontends.scipy as sc_frontend
# --- Helpers --- #
# --------------- #
def _validate_vector(u, dtype=None):
u = ivy.asarray(u, dtype=dtype)
if u.ndim == 1:
return u
raise ValueError("Input vector should be 1-D.")
def _validate_weights(w, dtype="float64"):
w = _validate_vector(w, dtype=dtype)
if ivy.any(w < 0):
raise ValueError("Input weights should be all non-negative")
return w
# --- Main --- #
# ------------ #
# euclidean
@to_ivy_arrays_and_back
def euclidean(u, v, /, *, w=None):
return minkowski(u, v, 2, w=w)
# Functions #
# --------- #
# minkowski
@to_ivy_arrays_and_back
def minkowski(u, v, p=2, /, *, w=None):
u = _validate_vector(u)
v = _validate_vector(v)
if p <= 0:
raise ValueError("p must be greater than 0")
u_v = u - v
if w is not None:
w = _validate_weights(w)
if p == 1:
root_w = w
elif p == 2:
# better precision and speed
root_w = ivy.sqrt(w)
elif p == ivy.inf:
root_w = w != 0
else:
root_w = ivy.pow(w, 1 / p)
u_v = ivy.multiply(root_w, u_v)
dist = sc_frontend.linalg.norm(u_v, ord=p)
return dist
| ivy/ivy/functional/frontends/scipy/spatial/distance.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/spatial/distance.py",
"repo_id": "ivy",
"token_count": 651
} | 36 |
from . import _classification
from ._classification import *
| ivy/ivy/functional/frontends/sklearn/metrics/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/metrics/__init__.py",
"repo_id": "ivy",
"token_count": 14
} | 37 |
from . import v1
| ivy/ivy/functional/frontends/tensorflow/compat/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/compat/__init__.py",
"repo_id": "ivy",
"token_count": 6
} | 38 |
# global
import ivy
from ivy import (
with_supported_dtypes,
with_unsupported_dtypes,
with_supported_device_and_dtypes,
)
from ivy.functional.frontends.tensorflow import check_tensorflow_casting
from ivy.functional.frontends.tensorflow.func_wrapper import (
to_ivy_arrays_and_back,
handle_tf_dtype,
to_ivy_dtype,
)
# --- Helpers --- #
# --------------- #
def _chbevl(x, coef, N):
"""Evaluates the series.
N-1
- '
y = > coef[i] T (x/2)
- i
i=0
of Chebyshev polynomials Ti at argument x/2.
Coefficients are stored in reverse order, i.e. the zero
order term is last in the array. Note N is the number of
coefficients, not the order.
If coefficients are for the interval a to b, x must
have been transformed to x -> 2(2x - b - a)/(b-a) before
entering the routine. This maps x from (a, b) to (-1, 1),
over which the Chebyshev polynomials are defined.
If the coefficients are for the inverted interval, in
which (a, b) is mapped to (1/b, 1/a), the transformation
required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity,
this becomes x -> 4a/x - 1.
"""
b0 = coef[0:1]
b1 = ivy.zeros_like(x)
i = N - 1
p = 1
while i > 0:
b2 = b1
b1 = b0
with ivy.PreciseMode(True):
b0 = x * b1 - b2 + coef[p : p + 1]
p += 1
i -= 1
return 0.5 * (b0 - b2)
def _get_chebyshev_coefficients_for_exp_i1():
"""Chebyshev coefficients for exp(-x) I1(x) / x in the interval [0,8].
lim(x->0){ exp(-x) I1(x) / x } = 1/2.
Returns list of 29 float elements
-------
"""
return ivy.array(
[
2.77791411276104639959e-18,
-2.11142121435816608115e-17,
1.55363195773620046921e-16,
-1.10559694773538630805e-15,
7.60068429473540693410e-15,
-5.04218550472791168711e-14,
3.22379336594557470981e-13,
-1.98397439776494371520e-12,
1.17361862988909016308e-11,
-6.66348972350202774223e-11,
3.62559028155211703701e-10,
-1.88724975172282928790e-9,
9.38153738649577178388e-9,
-4.44505912879632808065e-8,
2.00329475355213526229e-7,
-8.56872026469545474066e-7,
3.47025130813767847674e-6,
-1.32731636560394358279e-5,
4.78156510755005422638e-5,
-1.61760815825896745588e-4,
5.12285956168575772895e-4,
-1.51357245063125314899e-3,
4.15642294431288815669e-3,
-1.05640848946261981558e-2,
2.47264490306265168283e-2,
-5.29459812080949914269e-2,
1.02643658689847095384e-1,
-1.76416518357834055153e-1,
2.52587186443633654823e-1,
]
)
def _get_chebyshev_coefficients_for_exp_sqrt_i1():
"""Chebyshev coefficients for exp(-x) sqrt(x) I1(x) in the inverted
interval [8,infinity].
lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi).
Returns a list of 25 elements containing float
-------
"""
return ivy.array(
[
7.51729631084210481353e-18,
4.41434832307170791151e-18,
-4.65030536848935832153e-17,
-3.20952592199342395980e-17,
2.96262899764595013876e-16,
3.30820231092092828324e-16,
-1.88035477551078244854e-15,
-3.81440307243700780478e-15,
1.04202769841288027642e-14,
4.27244001671195135429e-14,
-2.10154184277266431302e-14,
-4.08355111109219731823e-13,
-7.19855177624590851209e-13,
2.03562854414708950722e-12,
1.41258074366137813316e-11,
3.25260358301548823856e-11,
-1.89749581235054123450e-11,
-5.58974346219658380687e-10,
-3.83538038596423702205e-9,
-2.63146884688951950684e-8,
-2.51223623787020892529e-7,
-3.88256480887769039346e-6,
-1.10588938762623716291e-4,
-9.76109749136146840777e-3,
7.78576235018280120474e-1,
]
)
# --- Main --- #
# ------------ #
@with_unsupported_dtypes(
{
"1.2.0": ("float16", "complex64", "complex128"),
"1.8.0 and below": ("float16",),
"2.15.0 and below": ("int8", "int16", "uint8", "uint16", "uint32", "uint64"),
},
"tensorflow",
)
@to_ivy_arrays_and_back
def abs(x, name=None):
dtype = ivy.dtype(x)
if dtype in ["complex64", "complex128"]:
return ivy.sqrt(ivy.square(ivy.real(x)) + ivy.square(ivy.imag(x)))
return ivy.abs(x)
@to_ivy_arrays_and_back
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
return ivy.sum(inputs, axis=0)
@to_ivy_arrays_and_back
def acos(x, name="acos"):
return ivy.acos(x)
@to_ivy_arrays_and_back
def acosh(x, name="acosh"):
return ivy.acosh(x)
@to_ivy_arrays_and_back
def add(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
return ivy.add(x, y)
@to_ivy_arrays_and_back
def add_n(inputs, name=None):
inputs = ivy.array(inputs)
return ivy.sum(inputs, dtype=inputs.dtype, axis=0)
@to_ivy_arrays_and_back
def angle(input, name=None):
return ivy.angle(input)
@with_unsupported_dtypes(
{"2.15.0 and below": ("complex",)},
"tensorflow",
)
@to_ivy_arrays_and_back
def argmax(input, axis, output_type=None, name=None):
output_type = to_ivy_dtype(output_type)
if output_type in ["int32", "int64"]:
return ivy.astype(ivy.argmax(input, axis=axis), output_type)
else:
return ivy.astype(ivy.argmax(input, axis=axis), "int64")
@with_unsupported_dtypes(
{"2.15.0 and below": ("complex",)},
"tensorflow",
)
@to_ivy_arrays_and_back
def argmin(input, axis=None, output_type="int64", name=None):
output_type = to_ivy_dtype(output_type)
if output_type in ["int32", "int64"]:
return ivy.astype(ivy.argmin(input, axis=axis), output_type)
else:
return ivy.astype(ivy.argmin(input, axis=axis), "int64")
@to_ivy_arrays_and_back
def asin(x, name=None):
return ivy.asin(x)
@to_ivy_arrays_and_back
def asinh(x, name="asinh"):
return ivy.asinh(x)
@to_ivy_arrays_and_back
def atan(x, name=None):
return ivy.atan(x)
@to_ivy_arrays_and_back
def atan2(y, x, name=None):
return ivy.atan2(y, x)
@to_ivy_arrays_and_back
def atanh(x, name="atanh"):
return ivy.atanh(x)
@with_supported_dtypes(
{"2.15.0 and below": ("float16", "float32", "float64")}, "tensorflow"
)
@to_ivy_arrays_and_back
def bessel_i1(x, name=None):
z = ivy.abs(x)
result = ivy.zeros_like(z)
mask1 = z <= 8.0
if ivy.any(mask1) > 0:
y = (z[mask1] / ivy.array([2.0])) - ivy.array([2.0])
result[mask1] = (
_chbevl(y, _get_chebyshev_coefficients_for_exp_i1(), 29)
* z[mask1]
* ivy.exp(z[mask1])
)
mask2 = ~mask1
if ivy.any(mask2) > 0:
result[mask2] = (
ivy.exp(z[mask2])
* _chbevl(
ivy.array([32.0]) / z[mask2] - ivy.array([2.0]),
_get_chebyshev_coefficients_for_exp_sqrt_i1(),
25,
)
/ ivy.sqrt(z[mask2])
)
result[x < 0.0] = -result[x < 0.0]
return result
@with_supported_dtypes(
{"2.15.0 and below": ("int32",)},
"tensorflow",
)
@to_ivy_arrays_and_back
def bincount(
arr,
weights=None,
minlength=None,
maxlength=None,
dtype=ivy.int32,
name=None,
axis=None,
binary_output=False,
):
return ivy.bincount(arr, weights=weights, minlength=minlength)
@to_ivy_arrays_and_back
def ceil(x, name=None):
return ivy.ceil(x)
@handle_tf_dtype
@to_ivy_arrays_and_back
def confusion_matrix(
labels, predictions, num_classes=None, weights=None, dtype=ivy.int32, name=None
):
labels = ivy.astype(
ivy.squeeze(ivy.array(labels), axis=None), ivy.int64, copy=False
)
predictions = ivy.astype(
ivy.squeeze(ivy.array(predictions), axis=None), ivy.int64, copy=False
)
# failsafe for (1,) array will be squeeze to 0-dim
labels = ivy.expand_dims(labels, axis=-1) if labels.ndim == 0 else labels
predictions = (
ivy.expand_dims(predictions, axis=-1) if predictions.ndim == 0 else predictions
)
# Sanity check (potential optimization)
ivy.utils.assertions.check_greater(
labels, 0, allow_equal=True, message="labels contains negative values"
)
ivy.utils.assertions.check_greater(
predictions, 0, allow_equal=True, message="predictions contains negative values"
)
if num_classes is None:
num_classes = max(ivy.max(labels), ivy.max(predictions)) + 1
else:
num_classes_int64 = ivy.astype(ivy.array(num_classes), ivy.int64, copy=False)
ivy.utils.assertions.check_less(
labels, num_classes_int64, message="labels out of bound"
)
ivy.utils.assertions.check_less(
predictions, num_classes_int64, message="predictions out of bound"
)
if weights is not None:
weights = ivy.array(weights)
ivy.utils.assertions.check_equal(
ivy.shape(predictions),
ivy.shape(weights),
message="weights shape do not match predictions",
as_array=False,
)
weights = ivy.astype(weights, dtype, copy=False)
shape = ivy.stack([num_classes, num_classes])
indices = ivy.stack([labels, predictions], axis=1)
values = ivy.ones_like(predictions, dtype=dtype) if weights is None else weights
return ivy.scatter_nd(indices, values, shape=shape)
@to_ivy_arrays_and_back
def conj(x, name=None):
return ivy.conj(x)
@to_ivy_arrays_and_back
def cos(x, name=None):
return ivy.cos(x)
@to_ivy_arrays_and_back
def cosh(x, name=None):
return ivy.cosh(x)
@handle_tf_dtype
@to_ivy_arrays_and_back
def count_nonzero(input, axis=None, keepdims=None, dtype=ivy.int64, name=None):
x = ivy.array(input)
if keepdims is None:
keepdims = False
zero = ivy.zeros(ivy.shape(x), dtype=x.dtype)
return ivy.astype(
ivy.sum(
ivy.astype(ivy.not_equal(x, zero), ivy.int64),
axis=axis,
keepdims=keepdims,
),
dtype,
copy=False,
)
@to_ivy_arrays_and_back
def cumprod(x, axis, exclusive=False, reverse=False, name=None):
return ivy.astype(
ivy.cumprod(x, axis=axis, exclusive=exclusive, reverse=reverse), x.dtype
)
@to_ivy_arrays_and_back
def cumsum(x, axis, exclusive=False, reverse=False, name=None):
return ivy.astype(
ivy.cumsum(x, axis=axis, exclusive=exclusive, reverse=reverse), x.dtype
)
@to_ivy_arrays_and_back
def digamma(x, name=None):
return ivy.digamma(x)
@to_ivy_arrays_and_back
def divide(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
return ivy.divide(x, y)
@to_ivy_arrays_and_back
def divide_no_nan(x, y, name="divide_no_nan"):
x, y = check_tensorflow_casting(x, y)
return ivy.where(
y == 0,
ivy.array(0.0, dtype=ivy.promote_types(x.dtype, y.dtype)),
x / y,
)
@to_ivy_arrays_and_back
def equal(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
return ivy.equal(x, y)
@to_ivy_arrays_and_back
def erfcinv(x, name="erfcinv"):
return 1 / (1 - ivy.erf(x))
@to_ivy_arrays_and_back
def exp(x, name=None):
return ivy.exp(x)
@to_ivy_arrays_and_back
def expm1(x, name=None):
return ivy.expm1(x)
@to_ivy_arrays_and_back
def floor(x, name=None):
return ivy.floor(x)
@to_ivy_arrays_and_back
def floordiv(x, y, name=None):
return ivy.floor_divide(x, y)
@to_ivy_arrays_and_back
def floormod(x, y, name=None):
return ivy.remainder(x, y)
@to_ivy_arrays_and_back
def greater(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
return ivy.greater(x, y)
@to_ivy_arrays_and_back
def greater_equal(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
return ivy.greater_equal(x, y)
@with_supported_device_and_dtypes(
{
"2.15.0 and below": {
"cpu": ("float32", "float64"),
"gpu": ("bfloat16", "float16", "float32", "float64"),
}
},
"tensorflow",
)
@to_ivy_arrays_and_back
def igamma(a, x, name=None):
return ivy.igamma(a, x=x)
@with_supported_dtypes(
{"2.15.0 and below": ("float16", "float32", "float64", "complex64", "complex128")},
"tensorflow",
)
@to_ivy_arrays_and_back
def imag(input, name=None):
return ivy.imag(input)
@to_ivy_arrays_and_back
def in_top_k(target, pred, k, name=None):
top_k = ivy.top_k(target, k)
return ivy.array([val in top_k.values for val in target])
@with_supported_dtypes(
{
"2.15.0 and below": ("int32", "int64"),
},
"tensorflow",
)
@to_ivy_arrays_and_back
def invert_permutation(x, name=None):
return ivy.invert_permutation(x)
@with_supported_dtypes(
{
"2.15.0 and below": ("bfloat16", "half", "float32", "float64"),
},
"tensorflow",
)
@to_ivy_arrays_and_back
def is_finite(x, name=None):
return ivy.isfinite(x)
@to_ivy_arrays_and_back
def is_inf(x, name=None):
return ivy.isinf(x)
@to_ivy_arrays_and_back
def is_nan(x, name=None):
return ivy.isnan(x)
@to_ivy_arrays_and_back
def is_non_decreasing(x, name="is_non_decreasing"):
if ivy.array(x).size < 2:
return ivy.array(True)
if ivy.array(x).size == 2:
return ivy.array([x[0] <= x[1]])
return ivy.all(ivy.less_equal(x, ivy.roll(x, -1)))
@to_ivy_arrays_and_back
def is_strictly_increasing(x, name="is_strictly_increasing"):
if ivy.array(x).size < 2:
return ivy.array(True)
x = ivy.flatten(x)
res = ivy.less(x, ivy.roll(x, -1))
if res.size >= 2:
res[res.size - 1] = True # The last comparison must be set to true.
return ivy.all(res)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.15.0 and below": ("float32", "float64")}, "tensorflow")
def l2_normalize(x, axis=None, epsilon=1e-12, name=None):
square_sum = ivy.sum(ivy.square(x), axis=axis, keepdims=True)
x_inv_norm = ivy.reciprocal(ivy.sqrt(ivy.maximum(square_sum, epsilon)))
return ivy.multiply(x, x_inv_norm)
@to_ivy_arrays_and_back
def less(x, y, name="None"):
x, y = check_tensorflow_casting(x, y)
return ivy.less(x, y)
@to_ivy_arrays_and_back
def less_equal(x, y, name="LessEqual"):
x, y = check_tensorflow_casting(x, y)
return ivy.less_equal(x, y)
# lgamma
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.15.0 and below": ("float32", "float64")}, "tensorflow")
def lgamma(x, name=None):
return ivy.lgamma(x)
@to_ivy_arrays_and_back
def log(x, name=None):
return ivy.log(x)
@to_ivy_arrays_and_back
def log1p(x, name=None):
return ivy.log1p(x)
@to_ivy_arrays_and_back
def log_sigmoid(x, name=None):
return -ivy.softplus(-x)
@to_ivy_arrays_and_back
def log_softmax(logits, axis=None):
if axis is None:
axis = -1
return ivy.log_softmax(logits, axis=axis)
@to_ivy_arrays_and_back
def logical_and(x, y, name="LogicalAnd"):
return ivy.logical_and(x, y)
@to_ivy_arrays_and_back
def logical_not(x, name="logical_not"):
return ivy.logical_not(x)
@to_ivy_arrays_and_back
def logical_or(x, y, name="logical_or"):
return ivy.logical_or(x, y)
@to_ivy_arrays_and_back
def logical_xor(x, y, name="LogicalXor"):
return ivy.logical_xor(x, y)
@to_ivy_arrays_and_back
def maximum(x, y, name=None):
return ivy.maximum(x, y)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, "tensorflow")
def minimum(x, y, name=None):
return ivy.minimum(x, y)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.6.0 and below": ("bfloat16",)}, "paddle")
def mod(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
return ivy.remainder(x, y)
@to_ivy_arrays_and_back
def multiply(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
return ivy.multiply(x, y)
@to_ivy_arrays_and_back
def multiply_no_nan(x, y, name="multiply_no_nan"):
x, y = check_tensorflow_casting(x, y)
return ivy.where(
y == 0,
ivy.array(0.0, dtype=ivy.promote_types(x.dtype, y.dtype)),
x * y,
)
@to_ivy_arrays_and_back
def negative(x, name=None):
return ivy.negative(x)
@to_ivy_arrays_and_back
def nextafter(x1, x2, name=None):
return ivy.nextafter(x1, x2)
@to_ivy_arrays_and_back
def not_equal(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
return ivy.not_equal(x, y)
@to_ivy_arrays_and_back
def polyval(coeffs, x, name=None):
ivy.utils.assertions.check_isinstance(coeffs, list)
x = ivy.array(x)
if len(coeffs) < 1:
return ivy.zeros_like(x, dtype=x.dtype)
coeffs = [ivy.array(_) for _ in coeffs]
p = coeffs[0]
for c in coeffs[1:]:
p = c + p * x
return p
@to_ivy_arrays_and_back
def pow(x, y, name="pow"):
x, y = check_tensorflow_casting(x, y)
return ivy.pow(x, y)
@to_ivy_arrays_and_back
def real(input, name=None):
return ivy.real(input)
@to_ivy_arrays_and_back
def reciprocal(x, name="reciprocal"):
return ivy.reciprocal(x)
@to_ivy_arrays_and_back
def reciprocal_no_nan(x, name="reciprocal_no_nan"):
return ivy.where(
x == 0,
ivy.array(0.0, dtype=x.dtype),
ivy.ones_like(x, dtype=x.dtype) / x,
)
@to_ivy_arrays_and_back
def reduce_all(input_tensor, axis=None, keepdims=False, name="reduce_all"):
return ivy.all(input_tensor, axis=axis, keepdims=keepdims)
@to_ivy_arrays_and_back
def reduce_any(input_tensor, axis=None, keepdims=False, name="reduce_any"):
return ivy.any(input_tensor, axis=axis, keepdims=keepdims)
@to_ivy_arrays_and_back
def reduce_euclidean_norm(
input_tensor, axis=None, keepdims=False, name="reduce_euclidean_norm"
):
return ivy.vector_norm(
input_tensor, axis=axis, keepdims=keepdims, ord=2
) # ord = '2' is the euclidean norm
@to_ivy_arrays_and_back
def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name="reduce_logsumexp"):
# stable logsumexp trick
max_input_tensor = ivy.max(input_tensor, axis=axis, keepdims=False)
return (
ivy.log(
ivy.sum(
ivy.exp(input_tensor - max_input_tensor),
axis=axis,
keepdims=keepdims,
)
)
+ max_input_tensor
).astype(input_tensor.dtype)
@to_ivy_arrays_and_back
def reduce_max(input_tensor, axis=None, keepdims=False, name="reduce_max"):
return ivy.max(input_tensor, axis=axis, keepdims=keepdims)
@to_ivy_arrays_and_back
def reduce_mean(input_tensor, axis=None, keepdims=False, name="reduce_mean"):
if ivy.exists(axis):
axis = ivy.to_list(axis)
return ivy.mean(input_tensor, axis=axis, keepdims=keepdims)
@to_ivy_arrays_and_back
def reduce_min(input_tensor, axis=None, keepdims=False, name="reduce_min"):
return ivy.min(input_tensor, axis=axis, keepdims=keepdims)
@to_ivy_arrays_and_back
def reduce_prod(input_tensor, axis=None, keepdims=False, name="reduce_prod"):
return ivy.prod(input_tensor, axis=axis, keepdims=keepdims).astype(
input_tensor.dtype
)
@to_ivy_arrays_and_back
def reduce_std(input_tensor, axis=None, keepdims=False, name="reduce_std"):
return ivy.std(input_tensor, axis=axis, keepdims=keepdims)
@to_ivy_arrays_and_back
def reduce_sum(input_tensor, axis=None, keepdims=False, name="reduce_sum"):
input_tensor = ivy.array(input_tensor)
return ivy.sum(input_tensor, axis=axis, keepdims=keepdims).astype(
input_tensor.dtype
)
@to_ivy_arrays_and_back
def reduce_variance(input_tensor, axis=None, keepdims=False, name="reduce_variance"):
return ivy.var(input_tensor, axis=axis, keepdims=keepdims)
@with_supported_device_and_dtypes(
{
"2.15.0 and below": {
"cpu": ("float32", "float64"),
"gpu": ("bfloat16", "float16", "float32", "float64"),
}
},
"tensorflow",
)
@to_ivy_arrays_and_back
def rint(x, name=None):
return ivy.round(x)
@to_ivy_arrays_and_back
def round(x, name=None):
return ivy.round(x)
@to_ivy_arrays_and_back
def rsqrt(x, name=None):
return ivy.reciprocal(ivy.sqrt(x))
@to_ivy_arrays_and_back
def scalar_mul(scalar, x, name="scalar_mul"):
scalar, x = check_tensorflow_casting(scalar, x)
return ivy.multiply(x, scalar).astype(x.dtype)
@with_unsupported_dtypes(
{"2.15.0 and below": ("float16", "bool", "int16", "int8")},
"tensorflow",
)
@to_ivy_arrays_and_back
def segment_sum(data, segment_ids, name="segment_sum"):
data = ivy.array(data)
segment_ids = ivy.array(segment_ids)
ivy.utils.assertions.check_equal(
list(segment_ids.shape), [list(data.shape)[0]], as_array=False
)
sum_array = ivy.zeros(
tuple([int(segment_ids[-1] + 1)] + (list(data.shape))[1:]), dtype=data.dtype
)
for i in range((segment_ids).shape[0]):
sum_array[segment_ids[i]] = sum_array[segment_ids[i]] + data[i]
return sum_array
@to_ivy_arrays_and_back
def sigmoid(x, name=None):
return ivy.sigmoid(x)
@with_supported_dtypes(
{
"2.15.0 and below": (
"bfloat16",
"float16",
"float32",
"float64",
"complex64",
"complex128",
)
},
"tensorflow",
)
@to_ivy_arrays_and_back
def sin(x, name=None):
return ivy.sin(x)
@to_ivy_arrays_and_back
def sinh(x, name=None):
return ivy.sinh(x)
@to_ivy_arrays_and_back
def softmax(logits, axis=None, name=None):
return ivy.softmax(logits, axis=axis)
@to_ivy_arrays_and_back
def softplus(features, name=None):
return ivy.softplus(features)
@with_supported_dtypes(
{"2.15.0 and below": ("bfloat32", "float32", "float64")}, "tensorflow"
)
@to_ivy_arrays_and_back
def softsign(features, name=None):
return ivy.divide(features, ivy.abs(features) + 1)
@to_ivy_arrays_and_back
def sqrt(x, name=None):
return ivy.sqrt(x)
@to_ivy_arrays_and_back
def square(x, name=None):
return ivy.square(x)
@with_supported_dtypes(
{
"2.15.0 and below": (
"bfloat16",
"float16",
"float32",
"float64",
"int32",
"int64",
"complex64",
"complex128",
)
},
"tensorflow",
)
@to_ivy_arrays_and_back
def squared_difference(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
res = ivy.square(ivy.subtract(x, y))
if isinstance(res, complex):
res = res.real - res.imag * 1j # Changing the sign of the imaginary part
return res
return res
@to_ivy_arrays_and_back
def subtract(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
return ivy.subtract(x, y)
@to_ivy_arrays_and_back
def tan(x, name=None):
return ivy.tan(x)
@with_supported_dtypes(
{"2.15.0 and below": ("float16", "float32", "float64", "complex64", "complex128")},
"tensorflow",
)
@to_ivy_arrays_and_back
def tanh(x, name=None):
return ivy.tanh(x)
@to_ivy_arrays_and_back
def top_k(input, k=1, sorted=True, name=None):
return ivy.top_k(input, k, sorted=sorted)
@to_ivy_arrays_and_back
def truediv(x, y, name="truediv"):
x, y = check_tensorflow_casting(x, y)
x_dtype = ivy.dtype(x)
if x_dtype in ["int8", "uint8", "int16", "uint16"]:
return ivy.divide(ivy.astype(x, ivy.float32), ivy.astype(y, ivy.float32))
elif x_dtype in ["int32", "uint32", "int64", "uint64"]:
return ivy.divide(ivy.astype(x, ivy.float64), ivy.astype(y, ivy.float64))
return ivy.divide(x, y)
@to_ivy_arrays_and_back
def unsorted_segment_mean(
data, segment_ids, num_segments, name="unsorted_segment_mean"
):
ivy.utils.assertions.check_equal(
list(segment_ids.shape), [list(data.shape)[0]], as_array=False
)
x = ivy.zeros(tuple([num_segments] + (list(data.shape))[1:]))
count = ivy.zeros((num_segments,))
for i in range((segment_ids).shape[0]):
x[segment_ids[i]] = x[segment_ids[i]] + data[i]
count[segment_ids[i]] += 1
for j in range(num_segments):
x[j] = ivy.divide(x[j], count[j])
return x
@to_ivy_arrays_and_back
def unsorted_segment_min(data, segment_ids, num_segments, name="unsorted_segment_min"):
data = ivy.array(data)
segment_ids = ivy.array(segment_ids)
ivy.utils.assertions.check_equal(
list(segment_ids.shape), [list(data.shape)[0]], as_array=False
)
min_array = ivy.zeros(
tuple([num_segments.item()] + (list(data.shape))[1:]), dtype=ivy.int32
)
for i in range((segment_ids).shape[0]):
min_array[segment_ids[i]] = ivy.minimum(min_array[segment_ids[i]], data[i])
return min_array
@to_ivy_arrays_and_back
def unsorted_segment_sqrt_n(
data, segment_ids, num_segments, name="unsorted_segement_sqrt_n"
):
ivy.utils.assertions.check_equal(
list(segment_ids.shape), [list(data.shape)[0]], as_array=False
)
x = ivy.zeros(tuple([num_segments] + (list(data.shape))[1:]))
count = ivy.zeros((num_segments,))
for i in range((segment_ids).shape[0]):
x[segment_ids[i]] = x[segment_ids[i]] + data[i]
count[segment_ids[i]] += 1
for j in range(num_segments):
x[j] = ivy.divide(x[j], ivy.sqrt(count[j]))
return x
@to_ivy_arrays_and_back
def unsorted_segment_sum(data, segment_ids, num_segments, name="unsorted_segment_sum"):
data = ivy.array(data)
segment_ids = ivy.array(segment_ids)
ivy.utils.assertions.check_equal(
list(segment_ids.shape), [list(data.shape)[0]], as_array=False
)
sum_array = ivy.zeros(
tuple([num_segments.item()] + (list(data.shape))[1:]), dtype=ivy.int32
)
for i in range((segment_ids).shape[0]):
sum_array[segment_ids[i]] = sum_array[segment_ids[i]] + data[i]
return sum_array
@with_supported_dtypes(
{"2.15.0 and below": ("float32", "float64", "complex64", "complex128")},
"tensorflow",
)
@to_ivy_arrays_and_back
def xdivy(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
if (x == 0).all():
return 0.0
return ivy.divide(x, y)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.15.0 and below": ("float32", "float64")}, "tensorflow")
def xlog1py(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
return x * ivy.log1p(y)
@to_ivy_arrays_and_back
def xlogy(x, y, name=None):
return ivy.xlogy(x, y)
@to_ivy_arrays_and_back
def zero_fraction(value, name="zero_fraction"):
zero = ivy.zeros(tuple(value.shape), dtype=ivy.float32)
x = ivy.array(value, dtype=ivy.float32)
count_zero = ivy.sum(ivy.equal(x, zero))
count_nonzero = ivy.sum(ivy.not_equal(x, zero))
return ivy.divide(count_zero, ivy.add(count_zero, count_nonzero))
@to_ivy_arrays_and_back
@with_supported_dtypes(
{
"2.15.0 and below": ("float32", "float64"),
},
"tensorflow",
)
def zeta(x, q, name=None):
return ivy.zeta(x, q)
| ivy/ivy/functional/frontends/tensorflow/math.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/math.py",
"repo_id": "ivy",
"token_count": 13493
} | 39 |
# global
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
import ivy.functional.frontends.torch as torch_frontend
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None):
if len(ivy.shape(batch1)) != 3 or len(ivy.shape(batch2)) != 3:
raise RuntimeError("input must be 3D matrices")
batch1, batch2 = torch_frontend.promote_types_of_torch_inputs(batch1, batch2)
ret = ivy.matmul(batch1, batch2, out=out)
ret = ivy.sum(ret, axis=0, keepdims=False, dtype=ivy.dtype(ret), out=out)
alpha, ret = torch_frontend.promote_types_of_torch_inputs(alpha, ret)
ret = ivy.multiply(alpha, ret, out=out)
beta, input = torch_frontend.promote_types_of_torch_inputs(beta, input)
beta_input = ivy.multiply(beta, input, out=out)
beta_input, ret = torch_frontend.promote_types_of_torch_inputs(beta_input, ret)
return ivy.add(beta_input, ret, out=out)
@to_ivy_arrays_and_back
def addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None):
if len(ivy.shape(mat1)) != 2 or len(ivy.shape(mat2)) != 2:
raise RuntimeError("input must be 2D matrices")
mat1, mat2 = torch_frontend.promote_types_of_torch_inputs(mat1, mat2)
ret = ivy.matmul(mat1, mat2, out=out)
alpha, ret = torch_frontend.promote_types_of_torch_inputs(alpha, ret)
ret = ivy.multiply(alpha, ret, out=out)
beta, input = torch_frontend.promote_types_of_torch_inputs(beta, input)
beta_input = ivy.multiply(beta, input, out=out)
beta_input, ret = torch_frontend.promote_types_of_torch_inputs(beta_input, ret)
return ivy.add(beta_input, ret, out=out)
@to_ivy_arrays_and_back
def addmv(input, mat, vec, *, beta=1, alpha=1, out=None):
if len(ivy.shape(mat)) != 2 or len(ivy.shape(vec)) != 1:
raise RuntimeError("input must be 2D matrix and 1D vector")
mat, vec = torch_frontend.promote_types_of_torch_inputs(mat, vec)
ret = ivy.matmul(mat, vec, out=out)
alpha, ret = torch_frontend.promote_types_of_torch_inputs(alpha, ret)
ret = ivy.multiply(alpha, ret, out=out)
beta, input = torch_frontend.promote_types_of_torch_inputs(beta, input)
beta_input = ivy.multiply(beta, input, out=out)
beta_input, ret = torch_frontend.promote_types_of_torch_inputs(beta_input, ret)
return ivy.add(beta_input, ret, out=out)
@to_ivy_arrays_and_back
def addr(input, vec1, vec2, *, beta=1, alpha=1, out=None):
if len(ivy.shape(vec1)) != 1 or len(ivy.shape(vec2)) != 1:
raise RuntimeError("input must be 1D vectors")
vec1, vec2 = torch_frontend.promote_types_of_torch_inputs(vec1, vec2)
ret = ivy.outer(vec1, vec2, out=out)
alpha, ret = torch_frontend.promote_types_of_torch_inputs(alpha, ret)
ret = ivy.multiply(alpha, ret, out=out)
beta, input = torch_frontend.promote_types_of_torch_inputs(beta, input)
beta_input = ivy.multiply(beta, input, out=out)
beta_input, ret = torch_frontend.promote_types_of_torch_inputs(beta_input, ret)
return ivy.add(beta_input, ret, out=out)
@to_ivy_arrays_and_back
def baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None):
if len(ivy.shape(batch1)) != 3 or len(ivy.shape(batch2)) != 3:
raise RuntimeError("input must be batched 2D matrices")
batch1, batch2 = torch_frontend.promote_types_of_torch_inputs(batch1, batch2)
ret = ivy.matmul(batch1, batch2, out=out)
alpha, ret = torch_frontend.promote_types_of_torch_inputs(alpha, ret)
ret = ivy.multiply(alpha, ret, out=out)
beta, input = torch_frontend.promote_types_of_torch_inputs(beta, input)
beta_input = ivy.multiply(beta, input, out=out)
beta_input, ret = torch_frontend.promote_types_of_torch_inputs(beta_input, ret)
return ivy.add(beta_input, ret, out=out)
@to_ivy_arrays_and_back
def bmm(input, mat2, *, out=None):
if len(ivy.shape(input)) != 3 or len(ivy.shape(mat2)) != 3:
raise RuntimeError("input must be 3D matrices")
input, mat2 = torch_frontend.promote_types_of_torch_inputs(input, mat2)
return ivy.matmul(input, mat2, out=out)
@to_ivy_arrays_and_back
def chain_matmul(*matrices, out=None):
return ivy.multi_dot(matrices, out=out)
@to_ivy_arrays_and_back
def cholesky(input, upper=False, *, out=None):
return ivy.cholesky(input, upper=upper, out=out)
@to_ivy_arrays_and_back
def det(input):
return torch_frontend.linalg.det(input)
@to_ivy_arrays_and_back
def dot(input, other, *, out=None):
if len(input.shape) == 1 and len(other.shape) == 1:
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.matmul(input, other, out=out)
else:
raise RuntimeError("input must be 1D vectors")
@to_ivy_arrays_and_back
def ger(input, vec2, *, out=None):
input, vec2 = torch_frontend.promote_types_of_torch_inputs(input, vec2)
return ivy.outer(input, vec2, out=out)
@to_ivy_arrays_and_back
def inner(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.inner(input, other, out=out)
@to_ivy_arrays_and_back
def inverse(input, *, out=None):
return torch_frontend.linalg.inv(input, out=out)
@to_ivy_arrays_and_back
def logdet(input):
return ivy.det(input).log()
@to_ivy_arrays_and_back
def matmul(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.matmul(input, other, out=out)
@to_ivy_arrays_and_back
def matrix_power(A, n, *, out=None):
return torch_frontend.linalg.matrix_power(A, n, out=out)
@to_ivy_arrays_and_back
def matrix_rank(input, tol=None, symmetric=False, *, out=None):
return ivy.matrix_rank(input, atol=tol, hermitian=symmetric, out=out)
@to_ivy_arrays_and_back
def mm(input, mat2, *, out=None):
if len(ivy.shape(input)) != 2 or len(ivy.shape(mat2)) != 2:
raise RuntimeError("input must be 2D matrices")
input, mat2 = torch_frontend.promote_types_of_torch_inputs(input, mat2)
return ivy.matmul(input, mat2, out=out)
@to_ivy_arrays_and_back
def mv(input, vec, *, out=None):
if len(ivy.shape(input)) != 2 or len(ivy.shape(vec)) != 1:
raise RuntimeError("input must be 2D matrix and 1D vector")
input, vec = torch_frontend.promote_types_of_torch_inputs(input, vec)
return ivy.matmul(input, vec, out=out)
@to_ivy_arrays_and_back
def outer(input, vec2, *, out=None):
input, vec2 = torch_frontend.promote_types_of_torch_inputs(input, vec2)
return ivy.outer(input, vec2, out=out)
@to_ivy_arrays_and_back
def pinverse(input, rcond=1e-15):
return ivy.pinv(input, rtol=rcond)
@to_ivy_arrays_and_back
def qr(input, some=True, *, out=None):
if some:
ret = ivy.qr(input, mode="reduced")
else:
ret = ivy.qr(input, mode="complete")
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
@to_ivy_arrays_and_back
def slogdet(A, *, out=None):
return torch_frontend.linalg.slogdet(A, out=out)
@to_ivy_arrays_and_back
def svd(input, some=True, compute_uv=True, *, out=None):
# TODO: add compute_uv
if some:
ret = ivy.svd(input, full_matrices=False)
else:
ret = ivy.svd(input, full_matrices=True)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def trapezoid(y, x=None, *, dx=None, dim=-1):
if x is not None:
y, x = torch_frontend.promote_types_of_torch_inputs(y, x)
return ivy.trapz(y, x=x, dx=dx, axis=dim)
@to_ivy_arrays_and_back
def vdot(input, other, *, out=None):
if len(input.shape) != 1 or len(other.shape) != 1:
raise RuntimeError("input must be 1D vectors")
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
ret = ivy.vecdot(input, other, out=out)
return ret.squeeze(0) if ret.ndim == 1 else ret
# alias to fix mm transpilation issue as mm() gets mapped to spmm() after transpilation
spmm = mm
| ivy/ivy/functional/frontends/torch/blas_and_lapack_ops.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/blas_and_lapack_ops.py",
"repo_id": "ivy",
"token_count": 3522
} | 40 |
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def linear(input, weight, bias=None):
return ivy.linear(input, weight, bias=bias)
| ivy/ivy/functional/frontends/torch/nn/functional/linear_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/functional/linear_functions.py",
"repo_id": "ivy",
"token_count": 123
} | 41 |
# global
from typing import Iterable
import math
# local
import ivy
import ivy.functional.frontends.torch as torch_frontend
from ivy.functional.frontends.numpy.creation_routines.from_existing_data import (
array as np_frontend_array,
)
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.func_wrapper import with_supported_dtypes
from ivy.func_wrapper import with_supported_device_and_dtypes
from ivy.functional.frontends.torch.func_wrapper import (
_to_ivy_array,
numpy_to_torch_style_args,
)
class Tensor:
def __init__(self, array, device=None, _init_overload=False, requires_grad=False):
if _init_overload:
self._ivy_array = (
array if isinstance(array, ivy.Array) else ivy.array(array)
)
else:
self._ivy_array = ivy.array(
array, dtype=torch_frontend.float32, device=device
)
self._grads = None
self._requires_grad = requires_grad
self.grad_fn = None
if not _init_overload:
self._is_leaf = True
else:
self._is_leaf = False
self._requires_grad = requires_grad
def __len__(self):
return len(self._ivy_array)
def __repr__(self):
return str(self.ivy_array.__repr__()).replace(
"ivy.array", "ivy.frontends.torch.Tensor"
)
def __hash__(self):
return id(self)
# Properties #
# ---------- #
@property
def ivy_array(self):
return self._ivy_array
@property
def device(self):
return self.ivy_array.device
@property
def dtype(self):
return self.ivy_array.dtype
@property
def shape(self):
return Size(self.ivy_array.shape)
@property
def real(self):
return self.ivy_array.real
@property
def imag(self):
return self.ivy_array.imag
@property
def ndim(self):
return self.dim()
@property
def T(self):
if self.ndim == 1:
return self
return torch_frontend.permute(self, list(range(self.ndim))[::-1])
@property
def mH(self):
return torch_frontend.adjoint(self)
@property
def data(self):
return torch_frontend.tensor(
ivy.stop_gradient(self.ivy_array, preserve_type=False)
)
@property
def grad(self):
return self._grads
@property
def requires_grad(self):
return self._requires_grad
@property
def is_leaf(self):
return self._is_leaf
@property
def get_device(self):
if self.device == "cpu":
return -1
else:
return int(self.device.split(":")[-1])
# Setters #
# --------#
@device.setter
def cuda(self, device=None):
self.device = device
return self
@ivy_array.setter
def ivy_array(self, array):
self._ivy_array = array if isinstance(array, ivy.Array) else ivy.array(array)
@requires_grad.setter
def requires_grad(self, requires_grad):
self._requires_grad = requires_grad
@is_leaf.setter
def is_leaf(self, is_leaf):
self._is_leaf = is_leaf
# Instance Methods #
# ---------------- #
def reshape(self, *args, shape=None):
if args and shape:
raise TypeError("reshape() got multiple values for argument 'shape'")
if shape is not None:
return torch_frontend.reshape(self, shape)
if args:
if isinstance(args[0], (tuple, list, ivy.Shape, ivy.NativeShape)):
shape = args[0]
return torch_frontend.reshape(self, shape)
else:
return torch_frontend.reshape(self, args)
else:
raise ValueError("reshape() got no values for argument 'shape'")
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
@with_unsupported_dtypes({"2.6.0 and below": ("float16",)}, "paddle")
def reshape_as(self, other):
return torch_frontend.reshape(self, other.shape)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def add(self, other, *, alpha=1):
return torch_frontend.add(self, other, alpha=alpha)
# @with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def divide(self, other, *, out=None):
return torch_frontend.divide(self, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def sub(self, other, *, alpha=1):
return torch_frontend.sub(self, other, alpha=alpha)
def chunk(self, chunks, dim=0):
return torch_frontend.chunk(self, chunks, dim=dim)
@numpy_to_torch_style_args
def any(self, dim=None, keepdim=False):
return torch_frontend.any(self, dim=dim, keepdim=keepdim)
@numpy_to_torch_style_args
def all(self, dim=None, keepdim=False):
return torch_frontend.all(self, dim=dim, keepdim=keepdim)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def add_(self, other, *, alpha=1):
self.ivy_array = self.add(other, alpha=alpha).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def addmm(self, mat1, mat2, *, beta=1, alpha=1):
return torch_frontend.addmm(self, mat1, mat2, beta=beta, alpha=alpha)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def addmm_(self, mat1, mat2, *, beta=1, alpha=1):
self.ivy_array = self.addmm(mat1, mat2, beta=beta, alpha=alpha).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def addmv(self, mat, vec, *, beta=1, alpha=1):
return torch_frontend.addmv(self, mat, vec, beta=beta, alpha=alpha)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def addmv_(self, mat, vec, *, beta=1, alpha=1):
self.ivy_array = torch_frontend.addmv(
self, mat, vec, beta=beta, alpha=alpha
).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def addbmm(self, batch1, batch2, *, beta=1, alpha=1):
return torch_frontend.addbmm(self, batch1, batch2, beta=beta, alpha=alpha)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def addbmm_(self, batch1, batch2, *, beta=1, alpha=1):
self.ivy_array = self.addbmm(batch1, batch2, beta=beta, alpha=alpha).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def subtract_(self, other, *, alpha=1):
self.ivy_array = self.sub(other, alpha=alpha).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def asin(self):
return torch_frontend.asin(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def asin_(self):
self.ivy_array = self.asin().ivy_array
return self
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def sum(self, dim=None, keepdim=False, *, dtype=None):
return torch_frontend.sum(self, dim=dim, keepdim=keepdim, dtype=dtype)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def sin(self):
return torch_frontend.sin(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def sin_(self):
self.ivy_array = self.sin().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def sinh(self):
return torch_frontend.sinh(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def sinh_(self):
self.ivy_array = self.sinh().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def cos(self):
return torch_frontend.cos(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def cos_(self):
self.ivy_array = self.cos().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def cosh(self):
return torch_frontend.cosh(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def cosh_(self):
self.ivy_array = self.cosh().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def atan(self):
return torch_frontend.atan(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def atan_(self):
self.ivy_array = self.atan().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def atan2(self, other):
return torch_frontend.atan2(self, other)
def view(self, *args, size=None):
"""Reshape Tensor.
possible arguments are either:
- size
- tuple of ints
- list of ints
- torch.Size object
- ints
Parameters
----------
args:int arguments
size: optional shape
Returns reshaped tensor
-------
"""
if ivy.exists(size) and not args:
shape_tup = size
elif args and not ivy.exists(size):
if (
isinstance(args[0], (tuple, list, ivy.Shape, ivy.NativeShape))
or type(args[0]).__name__ == "Size"
) and len(args) == 1:
shape_tup = args[0]
else:
shape_tup = args
else:
raise ValueError(
"View only accepts as argument ints, tuple or list of ints or "
"the keyword argument size."
)
return torch_frontend.reshape(self, shape_tup)
def float(self, memory_format=None):
self.ivy_array = ivy.astype(self.ivy_array, ivy.float32, copy=False)
return self
def double(self):
return self.to(torch_frontend.float64)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def asinh(self):
return torch_frontend.asinh(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def asinh_(self):
self.ivy_array = self.asinh().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def tan(self):
return torch_frontend.tan(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def tan_(self):
self.ivy_array = self.tan().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def tanh(self):
return torch_frontend.tanh(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def tanh_(self):
self.ivy_array = self.tanh().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def atanh(self):
return torch_frontend.atanh(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def atanh_(self):
self.ivy_array = self.atanh().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def log(self):
return torch_frontend.log(self)
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, "torch")
def log2_(self):
self.ivy_array = self.log2().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def logit(self):
return torch_frontend.logit(self)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "uint16")}, "torch")
def copy_(self, other, non_blocking=False):
ivy.utils.assertions.check_one_way_broadcastable(
self.ivy_array.shape, torch_frontend.tensor(other).ivy_array.shape
)
self._ivy_array = torch_frontend.tensor(other).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def log_(self):
self.ivy_array = self.log().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def log2(self):
return torch_frontend.log2(self)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def relu(self):
return torch_frontend.nn.functional.relu(self)
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
def amax(self, dim=None, keepdim=False):
return torch_frontend.amax(self, dim=dim, keepdim=keepdim)
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
def amin(self, dim=None, keepdim=False):
return torch_frontend.amin(self, dim=dim, keepdim=keepdim)
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("complex", "float16")}, "torch")
def aminmax(self, dim=None, keepdim=False):
return torch_frontend.aminmax(self, dim=dim, keepdim=keepdim)
def abs(self):
return torch_frontend.abs(self)
def abs_(self):
self.ivy_array = self.abs().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def logical_and(self, other):
return torch_frontend.logical_and(self, other)
def logical_not(self, *, out=None):
return torch_frontend.logical_not(self, out=out)
def logical_not_(self):
self.ivy_array = ivy.astype(self.logical_not().ivy_array, self.dtype)
return self
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def logical_or(self, other):
return torch_frontend.logical_or(self, other)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def logical_xor(self, other):
return torch_frontend.logical_xor(self, other)
def bitwise_not(self):
return torch_frontend.bitwise_not(self)
def bitwise_and(self, other):
return torch_frontend.bitwise_and(self, other)
@with_supported_dtypes({"2.2 and below": ("integer",)}, "torch")
def bitwise_or(self, other):
return torch_frontend.bitwise_or(self, other)
def bitwise_left_shift(self, other):
return torch_frontend.bitwise_left_shift(self, other)
@with_supported_dtypes({"2.2 and below": ("integer",)}, "torch")
def bitwise_or_(self, other):
self.ivy_array = self.bitwise_or(other).ivy_array
return self
def contiguous(self, memory_format=None):
return torch_frontend.tensor(self)
def new_ones(
self,
*args,
size=None,
dtype=None,
device=None,
requires_grad=False,
layout=None,
pin_memory=False,
):
if dtype is None:
dtype = self.dtype
if device is None:
device = self.device
if size is None:
size = (
args[0]
if isinstance(args[0], (tuple, list, ivy.Shape, ivy.NativeShape))
else args
)
return torch_frontend.ones(
size, dtype=dtype, device=device, requires_grad=requires_grad
)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def floor(self, *, out=None):
return torch_frontend.floor(self)
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"uint8",
"uint32",
"uint16",
"uint64",
"complex128",
"complex64",
)
},
"torch",
)
def not_equal(self, other, *, out=None):
return torch_frontend.not_equal(self, other, out=out)
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"uint8",
"uint32",
"uint16",
"uint64",
"complex128",
"complex64",
)
},
"torch",
)
def not_equal_(self, other, *, out=None):
self.ivy_array = self.not_equal(other).ivy_array
return self
def eq(self, other):
return torch_frontend.eq(self, other)
def equal(self, other):
return torch_frontend.equal(self, other)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
def erf(self, *, out=None):
return torch_frontend.erf(self, out=out)
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "bfloat16")}, "torch"
)
def erf_(self, *, out=None):
self.ivy_array = self.erf(out=out).ivy_array
return self
@with_supported_device_and_dtypes(
{"2.2 and below": {"cpu": ("float32", "float64")}},
"torch",
)
def erfc_(self, *, out=None):
return torch_frontend.erfc(self, out=out)
def new_zeros(
self,
*args,
size=None,
dtype=None,
device=None,
requires_grad=False,
layout=None,
pin_memory=False,
):
if size and args:
raise TypeError("new_zeros() got multiple values for argument 'size'")
if dtype is None:
dtype = self.dtype
if device is None:
device = self.device
if size is None:
size = args[0] if isinstance(args[0], (tuple, list, ivy.Shape)) else args
return torch_frontend.zeros(
size=size, dtype=dtype, device=device, requires_grad=requires_grad
)
def to(self, *args, **kwargs):
if len(args) > 0:
if hasattr(args[0], "ivy_array") or ivy.is_array(args[0]):
if self.dtype == ivy.dtype(args[0]) and self.device == ivy.dev(args[0]):
return self
else:
cast_tensor = self.clone()
cast_tensor.ivy_array = ivy.asarray(
self.ivy_array,
dtype=ivy.dtype(args[0]),
device=ivy.dev(args[0]),
)
return cast_tensor
if (
isinstance(args[0], ivy.NativeDtype)
or isinstance(args[0], ivy.Dtype)
and hasattr(args[0], "as_native_dtype")
or args[0] in ivy._all_ivy_dtypes_str
):
if self.dtype == ivy.as_ivy_dtype(args[0]):
return self
else:
cast_tensor = self.clone()
cast_tensor.ivy_array = ivy.asarray(self.ivy_array, dtype=args[0])
return cast_tensor
if isinstance(args[0], (ivy.Device, ivy.NativeDevice, str)):
if isinstance(args[0], str) and not isinstance(
args[0], (ivy.Device, ivy.NativeDevice)
):
ivy.utils.assertions.check_elem_in_list(
args[0],
[
"cpu",
"cuda",
"mps",
"xpu",
"mkldnn",
"opengl",
"opencl",
"ideep",
"hip",
"ve",
"ort",
"mlc",
"xla",
"lazy",
"vulkan",
"meta",
"hpu",
],
)
if self.device == ivy.as_ivy_dev(args[0]):
return self
else:
cast_tensor = self.clone()
cast_tensor.ivy_array = ivy.asarray(self.ivy_array, device=args[0])
return cast_tensor
else:
if (
"dtype" in kwargs
and "device" in kwargs
and self.dtype == kwargs["dtype"]
and self.device == kwargs["device"]
):
return self
else:
cast_tensor = self.clone()
cast_tensor.ivy_array = ivy.asarray(
self.ivy_array,
device=kwargs["device"] if "device" in kwargs else self.device,
dtype=kwargs["dtype"] if "dtype" in kwargs else self.dtype,
)
return cast_tensor
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def acos(self):
return torch_frontend.acos(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def acos_(self):
self.ivy_array = self.acos().ivy_array
return self
def new_tensor(
self,
data,
*,
dtype=None,
device=None,
requires_grad=False,
layout=None,
pin_memory=False,
):
dtype = ivy.dtype(self.ivy_array) if dtype is None else dtype
device = ivy.dev(self.ivy_array) if device is None else device
_data = ivy.asarray(data, copy=True, dtype=dtype, device=device)
return torch_frontend.tensor(_data)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def view_as(self, other):
return self.view(size=other.shape)
def expand(self, *args, size=None):
if args and size:
raise TypeError("expand() got multiple values for argument 'size'")
if args:
if isinstance(args[0], (tuple, list, ivy.Shape, ivy.NativeShape)):
size = args[0]
else:
size = args
if isinstance(size, (tuple, list)):
size = tuple(
s.item() if isinstance(s, torch_frontend.Tensor) else s for s in size
)
return torch_frontend.tensor(ivy.expand(self.ivy_array, tuple(size)))
def expand_as(self, other):
return self.expand(
ivy.shape(other.ivy_array if isinstance(other, Tensor) else other)
)
def detach(self):
return torch_frontend.tensor(
ivy.stop_gradient(self.ivy_array, preserve_type=False)
)
def detach_(self):
self.ivy_array = self.detach().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("uint16",)}, "torch")
@numpy_to_torch_style_args
def unsqueeze(self, dim):
return torch_frontend.unsqueeze(self, dim)
@numpy_to_torch_style_args
def unsqueeze_(self, dim):
self.ivy_array = self.unsqueeze(dim).ivy_array
return self
def ravel(self):
return torch_frontend.ravel(self)
def split(self, split_size, dim=0):
return torch_frontend.split(self, split_size, dim)
def tensor_split(self, indices_or_sections, dim=0):
return torch_frontend.tensor_split(self, indices_or_sections, dim)
def vsplit(self, indices_or_sections, /):
return torch_frontend.vsplit(self, indices_or_sections)
def hsplit(self, indices_or_sections, /):
return torch_frontend.hsplit(self, indices_or_sections)
def dsplit(
self,
indices_or_sections,
/,
):
return torch_frontend.dsplit(self, indices_or_sections)
def dim(self):
return self.ivy_array.ndim
@with_supported_dtypes(
{"2.5.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
)
def heaviside(self, values, *, out=None):
return torch_frontend.heaviside(self, values, out=out)
def new_full(
self,
size,
fill_value,
*,
dtype=None,
device=None,
requires_grad=False,
layout=None,
pin_memory=False,
):
dtype = ivy.dtype(self.ivy_array) if dtype is None else dtype
device = ivy.dev(self.ivy_array) if device is None else device
_data = ivy.full(size, fill_value, dtype=dtype, device=device)
return torch_frontend.tensor(_data)
def new_empty(
self,
size,
*,
dtype=None,
device=None,
requires_grad=False,
layout=None,
pin_memory=False,
):
dtype = ivy.dtype(self.ivy_array) if dtype is None else dtype
device = ivy.dev(self.ivy_array) if device is None else device
_data = ivy.empty(size, dtype=dtype, device=device)
return torch_frontend.tensor(_data)
def unfold(self, dimension, size, step):
slices = []
self_shape = tuple(self.shape)
for i in range(0, self_shape[dimension] - size + 1, step):
slicing = [slice(None)] * len(self.shape)
slicing[dimension] = slice(i, i + size)
slices.append(self.ivy_array[tuple(slicing)])
stacked = torch_frontend.stack(slices, dim=dimension)
new_shape = list(self.shape)
num_slices = (self.shape[dimension] - size) // step + 1
new_shape[dimension] = num_slices
new_shape.insert(dimension + 1, size)
reshaped = stacked.reshape(new_shape)
dims = list(range(len(stacked.shape)))
dims[-2], dims[-1] = dims[-1], dims[-2]
return reshaped.permute(*dims)
def long(self, memory_format=None):
self.ivy_array = ivy.astype(self.ivy_array, ivy.int64, copy=False)
return self
@numpy_to_torch_style_args
def max(self, dim=None, keepdim=False):
return torch_frontend.max(self, dim=dim, keepdim=keepdim)
@with_unsupported_dtypes(
{
"2.2 and below": (
"complex",
"bfloat16",
"bool",
"uint16",
"uint32",
"uint64",
)
},
"torch",
)
def maximum(self, other, *, out=None):
return torch_frontend.maximum(self, other=other, out=out)
@property
def is_quantized(self):
return "q" in ivy.dtype(self.ivy_array)
@property
def is_cuda(self):
return "gpu" in ivy.dev(self.ivy_array)
@property
def is_meta(self):
return "meta" in ivy.dev(self.ivy_array)
@with_unsupported_dtypes({"2.2 and below": ("uint16", "bool")}, "torch")
def positive(self):
return torch_frontend.positive(self)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def pow(self, exponent):
return torch_frontend.pow(self, exponent)
def unflatten(self, dim, sizes):
return torch_frontend.unflatten(self, dim, sizes)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def pow_(self, exponent):
self.ivy_array = self.pow(exponent).ivy_array
return self
def size(self, dim=None):
shape = self.ivy_array.shape
if dim is None:
return shape
try:
return shape[dim]
except IndexError as e:
raise IndexError(
f"Dimension out of range (expected to be in range of [{len(shape)},"
f" {len(shape) - 1}], but got {dim}"
) from e
def matmul(self, other):
return torch_frontend.matmul(self, other)
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch"
)
def matrix_power(self, n, *, out=None):
return torch_frontend.linalg.matrix_power(self, n, out=out)
def argwhere(self):
return torch_frontend.argwhere(self)
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("complex", "bool")}, "torch")
def argmax(self, dim=None, keepdim=False):
return torch_frontend.argmax(self, dim=dim, keepdim=keepdim)
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
def argmin(self, dim=None, keepdim=False):
return torch_frontend.argmin(self, dim=dim, keepdim=keepdim)
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
def argsort(self, dim=-1, descending=False):
return torch_frontend.argsort(self, dim=dim, descending=descending)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def ceil(self):
return torch_frontend.ceil(self)
@numpy_to_torch_style_args
def min(self, dim=None, keepdim=False):
return torch_frontend.min(self, dim=dim, keepdim=keepdim)
def permute(self, *args, dims=None):
if args and dims:
raise TypeError("permute() got multiple values for argument 'dims'")
if dims is not None:
return torch_frontend.permute(self, dims)
if args:
if isinstance(args[0], (tuple, list, ivy.Shape, ivy.NativeShape)):
dims = args[0]
return torch_frontend.permute(self, dims)
else:
return torch_frontend.permute(self, args)
else:
raise ValueError("permute() got no values for argument 'dims'")
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def mean(self, dim=None, keepdim=False):
return torch_frontend.mean(self, dim=dim, keepdim=keepdim)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
@numpy_to_torch_style_args
def nanmean(self, dim=None, keepdim=False):
return torch_frontend.nanmean(self, dim=dim, keepdim=keepdim)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
@numpy_to_torch_style_args
def nansum(self, dim=None, keepdim=False):
return torch_frontend.nansum(self, dim=dim, keepdim=keepdim)
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def median(self, dim=None, keepdim=False):
return torch_frontend.median(self, dim=dim, keepdim=keepdim)
def transpose(self, dim0, dim1):
return torch_frontend.transpose(self, dim0=dim0, dim1=dim1)
def transpose_(self, dim0, dim1):
self.ivy_array = self.transpose(dim0, dim1).ivy_array
return self
def t(self):
return torch_frontend.t(self)
def flatten(self, start_dim=0, end_dim=-1):
return torch_frontend.flatten(self, start_dim, end_dim)
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def cumsum(self, dim, *, dtype=None):
return torch_frontend.cumsum(self, dim, dtype=dtype)
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def cumsum_(self, dim, *, dtype=None):
self.ivy_array = self.cumsum(dim, dtype=dtype).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def inverse(self):
return torch_frontend.inverse(self)
@with_unsupported_dtypes({"2.2 and below": ("bool", "bfloat16")}, "torch")
def neg(self):
return torch_frontend.negative(self)
@with_unsupported_dtypes({"2.2 and below": ("bool",)}, "torch")
def neg_(self):
self.ivy_array = torch_frontend.negative(self).ivy_array
return self
__neg__ = neg
@with_unsupported_dtypes({"2.2 and below": ("bool", "bfloat16")}, "torch")
def negative(self):
return torch_frontend.negative(self)
def int(self, memory_format=None):
self.ivy_array = ivy.astype(self.ivy_array, ivy.int32, copy=False)
return self
def half(self, memory_format=None):
self.ivy_array = ivy.astype(self.ivy_array, ivy.float16, copy=False)
return self
def bool(self, memory_format=None):
self.ivy_array = ivy.astype(self.ivy_array, ivy.bool, copy=False)
return self
def type(self, dtype=None, non_blocking=False, **kwargs):
if ivy.exists(dtype):
self.ivy_array = ivy.astype(self.ivy_array, dtype)
return self
else:
return str(self.dtype)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def type_as(self, other):
if self.dtype != other.dtype:
return torch_frontend.tensor(ivy.astype(self.ivy_array, other.dtype))
return self
def byte(self, memory_format=None):
self.ivy_array = ivy.astype(self.ivy_array, ivy.uint8, copy=False)
return self
@numpy_to_torch_style_args
def squeeze(self, dim=None):
return torch_frontend.squeeze(self, dim)
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("uint16",)}, "torch")
def squeeze_(self, dim=None):
self.ivy_array = self.squeeze(dim).ivy_array
return self
def flip(self, dims):
return torch_frontend.flip(self, dims)
def fliplr(self):
return torch_frontend.fliplr(self)
def sort(self, dim=-1, descending=False):
return torch_frontend.sort(self, dim=dim, descending=descending)
def tril(self, diagonal=0):
return torch_frontend.tril(self, diagonal=diagonal)
def tril_(self, diagonal=0):
self.ivy_array = self.tril(diagonal=diagonal).ivy_array
return self
def index_select(self, dim, index):
return torch_frontend.index_select(self, dim, index)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
def clamp(self, min=None, max=None):
return torch_frontend.clamp(self, min=min, max=max)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
def clamp_(self, min=None, max=None):
self.ivy_array = self.clamp(min=min, max=max).ivy_array
return self
@with_unsupported_dtypes(
{"2.2 and below": ("bool", "bfloat16", "float16", "complex")}, "torch"
)
def clamp_min(self, min=None):
return torch_frontend.clamp(self, min=min)
def clamp_min_(self, min=None):
self.ivy_array = self.clamp_min(min).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def sqrt(self):
return torch_frontend.sqrt(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def rsqrt(self):
return torch_frontend.rsqrt(self)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def rsqrt_(self):
self.ivy_array = self.rsqrt().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def sqrt_(self):
self.ivy_array = self.sqrt().ivy_array
return self
def where(self, condition, other):
return torch_frontend.tensor(torch_frontend.where(condition, self, other))
def clone(self, memory_format=None):
return torch_frontend.tensor(ivy.array(self.ivy_array, copy=True))
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def acosh(self):
return torch_frontend.acosh(self)
def masked_fill(self, mask, value):
return torch_frontend.tensor(
torch_frontend.where(mask, value, self), dtype=self.dtype
)
def masked_fill_(self, mask, value):
self.ivy_array = self.masked_fill(mask, value).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def index_add_(self, dim, index, source, *, alpha=1):
self.ivy_array = torch_frontend.index_add(
self, dim, index, source, alpha=alpha
).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def index_add(self, dim, index, source, *, alpha=1):
return torch_frontend.index_add(
self._ivy_array, dim, index, source, alpha=alpha
)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def acosh_(self):
self.ivy_array = self.acosh().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def numpy(self):
return np_frontend_array(self.ivy_array)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def sigmoid(self):
return torch_frontend.sigmoid(self)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def sigmoid_(self):
self.ivy_array = self.sigmoid().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def softmax(self, dim=None, dtype=None):
return torch_frontend.nn.functional.softmax(self, dim=dim, dtype=dtype)
def repeat_interleave(self, repeats, dim=None, *, output_size=None):
return torch_frontend.repeat_interleave(self, repeats, dim)
def repeat(self, *args, repeats=None):
if args and repeats:
raise ivy.utils.exceptions.IvyException(
"repeat() got multiple values for argument 'repeats'"
)
if args:
if isinstance(args[0], (tuple, list, ivy.Shape, ivy.NativeShape)):
repeats = args[0]
else:
repeats = args
elif not isinstance(repeats, (tuple, list)):
raise ivy.utils.exceptions.IvyException(
"repeat(): argument 'repeats' must be tuple of ints"
)
return torch_frontend.tile(self, repeats)
@numpy_to_torch_style_args
def unbind(self, dim=0):
return torch_frontend.unbind(self, dim=dim)
def remainder(self, other, *, out=None):
return torch_frontend.remainder(self, other, out=out)
@with_supported_dtypes(
{"2.2 and below": ("float16", "float32", "float64", "bfloat16")}, "torch"
)
def reciprocal_(self):
self.ivy_array = torch_frontend.reciprocal(self).ivy_array
return self
def remainder_(self, other, *, out=None):
self.ivy_array = torch_frontend.remainder(self, other, out=out).ivy_array
return self
def bitwise_not_(self):
self.ivy_array = self.bitwise_not().ivy_array
return self
def bitwise_and_(self, other):
self.ivy_array = self.bitwise_and(other).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def atan2_(self, other):
self.ivy_array = self.atan2(other).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def fmax(self, other):
return torch_frontend.fmax(self, other)
def fmin(self, other):
return torch_frontend.fmin(self, other)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def log_softmax(self, dim=None, _stack_level=3, dtype=None):
return torch_frontend.nn.functional.log_softmax(self, dim=dim, dtype=dtype)
def isfinite(self):
return torch_frontend.isfinite(self)
def msort(self):
return torch_frontend.msort(self)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
def trunc(self):
return torch_frontend.trunc(self)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
def trunc_(self):
self.ivy_array = self.trunc().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
def fix(self):
return torch_frontend.fix(self)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
def fix_(self):
self.ivy_array = self.fix().ivy_array
return self
def isinf(self):
return torch_frontend.isinf(self._ivy_array)
def is_complex(self):
return torch_frontend.is_complex(self._ivy_array)
@with_unsupported_dtypes({"2.2 and below": ("uint16", "bfloat16")}, "torch")
def is_floating_point(self):
return torch_frontend.is_floating_point(self._ivy_array)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def isreal(self):
return torch_frontend.isreal(self._ivy_array)
def addr(self, vec1, vec2, *, beta=1, alpha=1, out=None):
return torch_frontend.addr(self, vec1, vec2, beta=beta, alpha=alpha, out=out)
def addr_(self, vec1, vec2, *, beta=1, alpha=1):
self.ivy_array = self.addr(vec1, vec2, beta=beta, alpha=alpha).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def dot(self, tensor):
return torch_frontend.dot(self, tensor)
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, "torch")
def bernoulli(self, p, *, generator=None, out=None):
return torch_frontend.bernoulli(
self._ivy_array, p, generator=generator, out=out
)
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, "torch")
def bernoulli_(self, p, *, generator=None, out=None):
self.ivy_array = self.bernoulli(p, generator=generator, out=out).ivy_array
return self
def numel(self):
shape = self.shape
return int(ivy.astype(ivy.prod(shape), ivy.int64))
# Special Methods #
# -------------------#
def __bool__(self):
if len(self.shape) == sum(self.shape):
return self.ivy_array.to_scalar().__bool__()
raise ValueError(
"The truth value of an array with more than one element is ambiguous. "
"Use a.any() or a.all()"
)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __add__(self, other):
return torch_frontend.add(self, other)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __mod__(self, other):
return torch_frontend.remainder(self, other)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __pow__(self, exponent):
return self.pow(exponent)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __rpow__(self, other):
return torch_frontend.pow(other, self)
def __long__(self, memory_format=None):
return self.long()
def __getitem__(self, query, /):
ivy_args = ivy.nested_map(_to_ivy_array, [self, query])
ret = ivy.get_item(*ivy_args)
return torch_frontend.Tensor(ret, _init_overload=True)
def __setitem__(self, key, value, /):
key, value = ivy.nested_map(_to_ivy_array, [key, value])
self.ivy_array[key] = value
def __iter__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d tensor not supported")
for i in range(self.shape[0]):
yield self[i]
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __radd__(self, other):
return torch_frontend.add(other, self)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __mul__(self, other):
return torch_frontend.mul(self, other)
@with_unsupported_dtypes({"2.2 and below": "bfloat16"}, "torch")
def __matmul__(self, other):
return torch_frontend.matmul(self, other)
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"int8",
"int16",
"bool",
"uint8",
)
},
"torch",
)
def __rmul__(self, other):
return torch_frontend.mul(other, self)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __sub__(self, other):
return torch_frontend.subtract(self, other)
def __truediv__(self, other):
return torch_frontend.div(self, other)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
def __floordiv__(self, other):
return torch_frontend.floor_divide(self, other)
def __iadd__(self, other):
ret = torch_frontend.add(self, other)
self.ivy_array = ivy.inplace_update(
self.ivy_array, ivy.astype(ret.ivy_array, self.dtype)
)
return self
def __imod__(self, other):
ret = torch_frontend.remainder(self, other)
self.ivy_array = ivy.inplace_update(
self.ivy_array, ivy.astype(ret.ivy_array, self.dtype)
)
return self
def __imul__(self, other):
ret = torch_frontend.mul(self, other)
self.ivy_array = ivy.inplace_update(
self.ivy_array, ivy.astype(ret.ivy_array, self.dtype)
)
return self
def __isub__(self, other):
ret = torch_frontend.subtract(self, other)
self.ivy_array = ivy.inplace_update(
self.ivy_array, ivy.astype(ret.ivy_array, self.dtype)
)
return self
def __itruediv__(self, other):
ret = torch_frontend.div(self, other)
self.ivy_array = ivy.inplace_update(
self.ivy_array, ivy.astype(ret.ivy_array, self.dtype)
)
return self
def __int__(self):
item = self.item()
if isinstance(item, complex):
if item.imag != 0:
raise TypeError("can't convert complex to int without overflow")
item = item.real
return int(item)
def __float__(self):
item = self.item()
if isinstance(item, complex):
if item.imag != 0:
raise TypeError("can't convert complex to float without overflow")
item = item.real
return float(item)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __eq__(self, other):
return torch_frontend.eq(self, other)
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
def __gt__(self, other):
return torch_frontend.greater(self, other)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __ge__(self, other):
return torch_frontend.greater_equal(self, other)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __ne__(self, other):
return self.ne(other)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __rsub__(self, other):
return torch_frontend.subtract(other, self)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __lt__(self, other):
return torch_frontend.less(self, other)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __le__(self, other):
return torch_frontend.less_equal(self, other)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def __or__(self, other):
return torch_frontend.bitwise_or(self, other)
@with_supported_dtypes({"2.2 and below": ("integer", "bool")}, "torch")
def __invert__(self):
return torch_frontend.bitwise_not(self)
def __and__(self, other):
return torch_frontend.bitwise_and(self, other)
def __iand__(self, other):
self.ivy_array = self.bitwise_and(other).ivy_array
return self
def new(self):
return torch_frontend.tensor([], dtype=self.dtype, device=self.device)
def __array__(self, dtype=None):
if dtype is None:
return ivy.to_numpy(self.ivy_array)
else:
return ivy.to_numpy(self.ivy_array).astype(dtype, copy=False)
def __array_wrap__(self, array):
if array.dtype == bool:
array = array.astype("uint8")
return torch_frontend.tensor(array)
def bitwise_xor(self, other):
return torch_frontend.bitwise_xor(self, other)
def bitwise_xor_(self, other):
self.ivy_array = self.bitwise_xor(other).ivy_array
return self
def item(self):
if all(dim == 1 for dim in self.shape):
return self.ivy_array.to_scalar()
else:
raise ValueError(
"only one element tensors can be converted to Python scalars"
)
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def cumprod(self, dim, dtype=None):
return torch_frontend.cumprod(self, dim, dtype=dtype)
@numpy_to_torch_style_args
def count_nonzero(self, dim):
return torch_frontend.count_nonzero(self, dim=dim)
def cov(self, /, *, correction=1, fweights=None, aweights=None):
return torch_frontend.cov(
self, correction=correction, fweights=fweights, aweights=aweights
)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, "torch")
def exp(self):
return torch_frontend.exp(self)
@with_supported_dtypes(
{"2.2 and below": ("bfloat16", "float32", "float64")}, "torch"
)
def expm1(self):
return torch_frontend.expm1(self)
# remove "bfloat16" from the below decorator after fixing ivy.Array.__repr__ method
@with_unsupported_dtypes(
{"2.2 and below": ("bfloat16", "float16", "complex")}, "torch"
)
def expm1_(self):
self.ivy_array = torch_frontend.expm1(self).ivy_array
return self
# fmt: off
@with_unsupported_dtypes({"2.2 and below": ("int8", "int16", "int32", "int64", "uint8", "bool", "float16",)},"torch",) # noqa
def exp_(self):
self.ivy_array = self.exp().ivy_array
return self
# fmt: on
def mul(self, other):
return torch_frontend.mul(self, other)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def ceil_(self):
self.ivy_array = torch_frontend.ceil(self).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def mul_(self, other):
self.ivy_array = self.mul(other).ivy_array
# the return dtype is the same as the input dtype
self.ivy_array = self.to(self.dtype).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, "torch")
def round(self, *, decimals=0):
return torch_frontend.round(self, decimals=decimals)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, "torch")
def round_(self, *, decimals=0):
self.ivy_array = self.round(decimals=decimals).ivy_array
return self
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
def cross(self, other, dim=-1):
return torch_frontend.cross(self, other, dim=dim)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def det(self):
return torch_frontend.det(self)
def reciprocal(self):
return torch_frontend.reciprocal(self)
def fill_(self, value):
ret = torch_frontend.full_like(
self, value, dtype=self.dtype, device=self.device
)
self.ivy_array = ivy.inplace_update(self.ivy_array, ret)
return self
def nonzero(self, as_tuple=False):
return torch_frontend.nonzero(self, as_tuple=as_tuple)
def mm(self, mat2):
return torch_frontend.mm(self, mat2)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, "torch")
def square(self):
return torch_frontend.square(self._ivy_array)
@with_supported_dtypes(
{
"2.2 and below": (
"float16",
"float32",
"float64",
"int16",
"int32",
"int64",
"uint8",
"int8",
"complex64",
"complex128",
)
},
"torch",
)
def square_(self):
self.ivy_array = torch_frontend.square(self._ivy_array).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def log10(self):
return torch_frontend.log10(self._ivy_array)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def log10_(self):
self.ivy_array = self.log10().ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("uint16",)}, "torch")
def zero_(self):
ret = torch_frontend.zeros_like(self)
self.ivy_array = ivy.inplace_update(self.ivy_array, ret)
return self
def short(self, memory_format=None):
self.ivy_array = ivy.astype(self.ivy_array, ivy.int16, copy=False)
return self
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def prod(self, dim=None, keepdim=False, *, dtype=None):
return torch_frontend.prod(self, dim=dim, keepdim=keepdim, dtype=dtype)
def div(self, other, *, rounding_mode=None):
return torch_frontend.div(self, other, rounding_mode=rounding_mode)
def div_(self, other, *, rounding_mode=None):
self.ivy_array = self.div(other, rounding_mode=rounding_mode).ivy_array
return self
@with_supported_dtypes(
{"2.2 and below": ("float16", "float32", "float64", "bfloat16")}, "torch"
)
def true_divide_(self, other):
self.ivy_array = self.div(other, rounding_mode=None).ivy_array
return self
def normal_(self, mean=0, std=1, *, generator=None):
self.ivy_array = ivy.random_normal(
mean=mean,
std=std,
shape=self.ivy_array.shape,
dtype=self.dtype,
device=self.device,
)
return self
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def addcdiv(self, tensor1, tensor2, *, value=1):
return torch_frontend.addcdiv(self, tensor1, tensor2, value=value)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def addcmul(self, tensor1, tensor2, *, value=1):
return torch_frontend.addcmul(self, tensor1, tensor2, value=value)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def addcmul_(self, tensor1, tensor2, *, value=1):
self.ivy_array = self.addcmul(tensor1, tensor2, value=value).ivy_array
return self
sign_decorator_dtypes = ("float16", "complex", "bool")
@with_unsupported_dtypes({"2.2 and below": sign_decorator_dtypes}, "torch")
def sign(self):
return torch_frontend.sign(self._ivy_array)
@with_unsupported_dtypes({"2.2 and below": sign_decorator_dtypes}, "torch")
def sign_(self):
self.ivy_array = self.sign().ivy_array
return self
@numpy_to_torch_style_args
def std(self, dim=None, unbiased=True, keepdim=False, *, out=None):
return torch_frontend.std(
self, dim=dim, unbiased=unbiased, keepdim=keepdim, out=out
)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def fmod(self, other, *, out=None):
return torch_frontend.fmod(self, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def fmod_(self, other):
self.ivy_array = self.fmod(other).ivy_array
return self
def norm(self, p="fro", dim=None, keepdim=False, dtype=None):
return torch_frontend.norm(self, p=p, dim=dim, keepdim=keepdim, dtype=dtype)
def tolist(self):
return self._ivy_array.to_list()
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def multiply(self, other, *, out=None):
return torch_frontend.multiply(self, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def multiply_(self, other, *, out=None):
self.ivy_array = torch_frontend.multiply(self, other, out=out).ivy_array
return self
@numpy_to_torch_style_args
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
def topk(self, k, dim=None, largest=True, sorted=True):
return torch_frontend.topk(self, k, dim=dim, largest=largest, sorted=sorted)
rshift_dtypes = ("float16", "bfloat16", "float32", "float64", "bool", "complex")
@with_unsupported_dtypes({"2.2 and below": rshift_dtypes}, "torch")
def bitwise_right_shift(self, other, *, out=None):
return torch_frontend.bitwise_right_shift(self._ivy_array, other)
@with_supported_dtypes(
{"2.2 and below": ("uint8", "int8", "int32", "int64")}, "torch"
)
def bitwise_right_shift_(self, other, *, out=None):
self.ivy_array = self.bitwise_right_shift(other, out=out).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def logdet(self):
chol = torch_frontend.cholesky(self)
return 2 * torch_frontend.sum(
torch_frontend.log(torch_frontend.real(torch_frontend.diagonal(chol)))
)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def copysign(self, other, *, out=None):
return torch_frontend.copysign(self, other, out=out)
@with_supported_dtypes(
{"2.2 and below": ("float16", "float32", "float64")}, "torch"
)
def copysign_(self, other, *, out=None):
self.ivy_array = self.copysign(other, out=out).ivy_array
return self
@with_unsupported_dtypes(
{"2.2 and below": ("complex", "bfloat16", "bool")}, "torch"
)
def greater(self, other, *, out=None):
return torch_frontend.greater(self, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "bool")}, "torch")
def greater_(self, other):
self.ivy_array = ivy.astype(self.greater(other).ivy_array, self.dtype)
return self
@with_unsupported_dtypes(
{"2.2 and below": ("complex", "bfloat16", "bool")}, "torch"
)
def greater_equal(self, other, *, out=None):
return torch_frontend.greater_equal(self, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "bool")}, "torch")
def greater_equal_(self, other):
self.ivy_array = ivy.astype(self.greater_equal(other).ivy_array, self.dtype)
return self
@with_unsupported_dtypes(
{"2.2 and below": ("complex", "bfloat16", "bool")}, "torch"
)
def less(self, other, *, out=None):
return torch_frontend.less(self, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "bool")}, "torch")
def less_(self, other):
self.ivy_array = ivy.astype(self.less(other).ivy_array, self.dtype)
return self
@with_unsupported_dtypes(
{"2.2 and below": ("complex", "bfloat16", "bool")}, "torch"
)
def less_equal(self, other, *, out=None):
return torch_frontend.less_equal(self, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "bool")}, "torch")
def less_equal_(self, other):
self.ivy_array = ivy.astype(self.less_equal(other).ivy_array, self.dtype)
return self
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def eq_(self, other):
self.ivy_array = ivy.astype(
torch_frontend.eq(self, other).ivy_array, self.dtype
)
return self
@numpy_to_torch_style_args
def var(self, dim=None, *, correction=1, keepdim=False):
return torch_frontend.var(self, dim=dim, unbiased=correction, keepdim=keepdim)
def narrow(self, dim, start, length):
return torch_frontend.narrow(self, dim=dim, start=start, length=length)
def as_strided(self, size, stride, storage_offset=None):
return torch_frontend.as_strided(
self, size=size, stride=stride, storage_offset=storage_offset
)
def stride(self, dim=None):
strides = [
stride // math.ceil(ivy.dtype_bits(self.dtype) / 8)
for stride in self.ivy_array.strides
]
if dim is not None:
return strides[dim]
return strides
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "bfloat16")}, "torch"
)
def log1p(self):
promoted_type = ivy.promote_types(self.dtype, "float32")
res = torch_frontend.log1p(self)
return res.to(promoted_type)
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, "torch")
def log1p_(self):
promoted_type = ivy.promote_types(self.dtype, "float32")
res = torch_frontend.log1p(self)
self.ivy_array = res.to(promoted_type).ivy_array
return self
def baddbmm(self, batch1, batch2, *, beta=1, alpha=1):
return torch_frontend.baddbmm(
self, batch1=batch1, batch2=batch2, beta=beta, alpha=alpha
)
def baddbmm_(self, batch1, batch2, *, beta=1, alpha=1):
self.ivy_array = torch_frontend.baddbmm(
self, batch1=batch1, batch2=batch2, beta=beta, alpha=alpha
).ivy_array
return self
def bmm(self, mat2):
return torch_frontend.bmm(self, mat2=mat2)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def floor_(self):
self.ivy_array = self.floor().ivy_array
return self
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"complex",
"float64",
"int8",
"int64",
)
},
"torch",
)
def diff(self, n=1, dim=-1, prepend=None, append=None):
return torch_frontend.diff(self, n=n, dim=dim, prepend=prepend, append=append)
def diag(self, diagonal=0):
return torch_frontend.diag(self, diagonal=diagonal)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def diagonal(self, offset=0, dim1=0, dim2=1):
return torch_frontend.diagonal(self, offset=offset, dim1=dim1, dim2=dim2)
def gather(self, dim, index):
return torch_frontend.gather(self, dim=dim, index=index)
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "int32", "int64")}, "torch"
)
def scatter_add_(self, dim, index, src):
self.ivy_array = ivy.put_along_axis(self.ivy_array, index, src, dim, mode="sum")
return self
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "int32", "int64")}, "torch"
)
def scatter_(self, dim, index, src, *, reduce=None):
if reduce is None:
reduce = "replace"
else:
mode_mappings = {
"add": "sum",
"multiply": "mul",
}
reduce = mode_mappings.get(reduce, reduce)
self.ivy_array = ivy.put_along_axis(
self.ivy_array, index, src, dim, mode=reduce
)
return self
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "int32", "int64")}, "torch"
)
def scatter_reduce_(self, dim, index, src, reduce, *, include_self=True):
if reduce == "prod":
reduce = "mul"
self.ivy_array = ivy.put_along_axis(
self.ivy_array, index, src, dim, mode=reduce
)
return self
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "int32", "int64")}, "torch"
)
def scatter_add(self, dim, index, src):
return torch_frontend.scatter_add(self, dim, index, src)
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "int32", "int64")}, "torch"
)
def scatter(self, dim, index, src):
return torch_frontend.scatter_reduce(self, dim, index, src, reduce="replace")
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "int32", "int64")}, "torch"
)
def scatter_reduce(self, dim, index, src, reduce, *, include_self=True):
return torch_frontend.scatter_reduce(self, dim, index, src, reduce=reduce)
def take_along_dim(self, indices, dim):
return torch_frontend.take_along_dim(self, indices=indices, dim=dim)
def movedim(self, source, destination):
return torch_frontend.movedim(self, source=source, destination=destination)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def addcdiv_(self, tensor1, tensor2, *, value=1):
self.ivy_array = self.addcdiv(
tensor1=tensor1, tensor2=tensor2, value=value
).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, "torch")
def cholesky(self, upper=False):
return torch_frontend.cholesky(self, upper=upper)
def tile(self, *reps):
if (
isinstance(reps, Iterable)
and len(reps) == 1
and isinstance(reps[0], Iterable)
):
reps = reps[0]
return torch_frontend.tile(self, reps)
def apply_(self, callable, /):
if self.device != "cpu":
raise ValueError("apply_ is only supported on cpu tensors")
self.ivy_array = callable(self.ivy_array)
return self
def requires_grad_(self, requires_grad=True):
self._requires_grad = requires_grad
return self
def backward(self, gradient=None, retain_graph=None, create_graph=False):
if gradient is None and int(torch_frontend.numel(self)) > 1:
raise RuntimeError("grad can be implicitly created only for scalar outputs")
if self.grad_fn is None and self._grads is None:
assert self.shape == gradient.shape, "Mismatch in shape"
self._grads = gradient
return
_grad_list = self.grad_fn(
gradient if gradient is not None else torch_frontend.tensor(1.0)
)
for idx, next_function in enumerate(self.grad_fn.next_functions):
if next_function.__self__.grad_fn is not None:
next_function.__self__.backward(_grad_list[idx])
else:
next_function(_grad_list[idx])
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def logaddexp(self, other):
return torch_frontend.logaddexp(self, other)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def logaddexp2(self, other):
self.ivy_array = torch_frontend.logaddexp2(self, other).ivy_array
return self
def angle(self):
return torch_frontend.angle(self)
@with_supported_dtypes(
{
"2.5.0 and below": (
"int64",
"float64",
"complex128",
"float32",
"complex64",
"int32",
)
},
"paddle",
)
def adjoint(self):
return torch_frontend.adjoint(self)
@with_unsupported_dtypes(
{"2.2 and below": ("int16", "float16", "bfloat16")}, "torch"
)
def conj(self):
return torch_frontend.conj(self)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def svd(self, some=True, compute_uv=True, *, out=None):
return torch_frontend.svd(self, some=some, compute_uv=compute_uv, out=out)
@with_unsupported_dtypes(
{"2.2 and below": ("float16", "bfloat16", "float32", "float64", "complex")},
"torch",
)
def gcd(self, other, *, out=None):
return torch_frontend.gcd(self, other, out=out)
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
"uint16",
"bool",
"complex64",
"complex128",
)
},
"torch",
)
def isnan(self):
return torch_frontend.isnan(self)
def char(self):
self.ivy_array = ivy.asarray(self.ivy_array, dtype=torch_frontend.char)
return self
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
"float32",
"float64",
"complex",
"uint8",
"int8",
)
},
"torch",
)
def lcm(self, other, *, out=None):
return torch_frontend.lcm(self, other, out=out)
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
"float32",
"float64",
"complex",
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
)
},
"torch",
)
def lcm_(self, other, *, out=None):
self.ivy_array = self.lcm(other, out=out).ivy_array
return self
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"int8",
"uint8",
"int16",
"complex128",
"complex64",
"bool",
)
},
"torch",
)
def triu_(self, diagonal=0):
self.ivy_array = torch_frontend.triu(self, diagonal).ivy_array
return self
@with_unsupported_dtypes(
{"2.2 and below": ("float16", "bfloat16")},
"torch",
)
def quantile(self, q, dim=None, keepdim=False, *, interpolation="linear", out=None):
return torch_frontend.quantile(
self, q, dim=dim, keepdim=keepdim, interpolation=interpolation, out=out
)
@with_unsupported_dtypes(
{
"2.2 and below": (
"int8",
"int16",
"uint8",
"uint16",
"uint32",
"uint64",
"bfloat16",
"float64",
)
},
"torch",
)
def random_(
self,
from_=0,
to=None,
*,
generator=None,
):
if to is None:
if ivy.is_float_dtype(self.ivy_array):
to = ivy.finfo(self.dtype).max
else:
to = ivy.iinfo(self.dtype).max
self.ivy_array = ivy.random_uniform(
low=from_, high=to, shape=self.size(), dtype=self.dtype
)
return self.ivy_array
@with_unsupported_dtypes(
{
"2.2 and below": (
"integer",
"unsigned",
"bfloat16",
"bool",
"complex",
)
},
"torch",
)
def uniform_(self, from_=0, to=1, *, generator=None):
ret = ivy.random_uniform(
low=from_, high=to, shape=self.shape, dtype=self.dtype, seed=generator
)
self._ivy_array = ivy.inplace_update(
self._ivy_array, ivy.astype(ret, self._ivy_array.dtype)
)
return self
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, "torch")
def frac(self, name=None):
return torch_frontend.frac(self._ivy_array)
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
"torch",
)
def sinc(self):
return torch_frontend.sinc(self)
@with_supported_dtypes(
{
"2.2 and below": (
"float32",
"float64",
"bfloat16",
)
},
"torch",
)
def sinc_(self):
self.ivy_array = torch_frontend.sinc(self).ivy_array
return self
@with_unsupported_dtypes({"2.2 and below": ("uint8",)}, "torch")
def index_fill(self, dim, index, value):
arr = torch_frontend.moveaxis(self, dim, 0)
arr[ivy.to_list(index)] = value
arr = torch_frontend.moveaxis(self, 0, dim)
return arr
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"int8",
"uint8",
"uint32",
"uint16",
"uint64",
"int16",
"float16",
"complex128",
"complex64",
"bool",
)
},
"torch",
)
def unique_consecutive(self, return_inverse, return_counts, dim):
return torch_frontend.unique_consecutive(
self, return_inverse, return_counts, dim
)
@with_unsupported_dtypes(
{
"2.2 and below": (
"uint16",
"uint32",
"uint64",
"bfloat16",
"float16",
"complex64",
"complex128",
)
},
"torch",
)
def cummax(self, dim):
return torch_frontend.cummax(self, dim)
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"int8",
"uint8",
"uint32",
"uint16",
"uint64",
"int16",
"complex128",
"complex64",
)
},
"torch",
)
def triu(self, diagonal=0):
return torch_frontend.triu(self, diagonal)
@with_unsupported_dtypes(
{"2.2 and below": ("bfloat16",)},
"torch",
)
def xlogy_(self, *, other, out=None):
self.ivy_array = torch_frontend.xlogy(self, other, out=out).ivy_array
return self
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"uint8",
"uint32",
"uint16",
"uint64",
"complex128",
"complex64",
)
},
"torch",
)
def ne(self, other):
return self.not_equal(other)
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"uint8",
"uint32",
"uint16",
"uint64",
"complex128",
"complex64",
)
},
"torch",
)
def ne_(self, other):
return self.not_equal_(other)
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"int8",
"uint8",
"uint32",
"uint16",
"uint64",
"int16",
"float16",
"complex128",
"complex64",
"bool",
)
},
"torch",
)
def unique(self, sorted=True, return_inverse=False, return_counts=False, dim=None):
return torch_frontend.unique(self, sorted, return_inverse, return_counts, dim)
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
"torch",
)
def xlogy(self, *, other, out=None):
return torch_frontend.xlogy(self, other, out=out)
@with_unsupported_dtypes({"2.2 and below": "complex"}, "torch")
def minimum(self, other, *, out=None):
return torch_frontend.minimum(self, other=other, out=out)
def rad2deg(self, *, out=None):
return torch_frontend.rad2deg(self, out=out)
@with_supported_dtypes(
{"2.2 and below": "valid"},
"torch",
)
def corrcoef(self):
return torch_frontend.corrcoef(self)
def index_put(self, indices, values, accumulate=False):
ret = self.clone()
if accumulate:
ret[indices[0]] += values
else:
ret[indices[0]] = values
return ret
def index_put_(self, indices, values, accumulate=False):
def _set_add(index):
self[index] += values
def _set(index):
self[index] = values
if accumulate:
ivy.map(fn=_set_add, unique={"index": indices})
else:
ivy.map(fn=_set, unique={"index": indices})
return self
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
def erfinv(self, *, out=None):
return torch_frontend.erfinv(self, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
def erfinv_(self, *, out=None):
ret = self.erfinv(out=out)
self._ivy_array = ivy.inplace_update(
self._ivy_array, ivy.astype(ret.ivy_array, self._ivy_array.dtype)
)
return self
# Method aliases
absolute, absolute_ = abs, abs_
clip, clip_ = clamp, clamp_
ndimension = dim
subtract = sub
sub_ = subtract_
arctan = atan
arctan_ = atan_
arctan2 = atan2
arctan2_ = atan2_
gt = greater
gt_ = greater_
arcsinh = asinh
arcsinh_ = asinh_
arcsin = asin
arcsin_ = asin_
arctanh = atanh
arctanh_ = atanh_
arccosh = acosh
arccosh_ = acosh_
arccos = acos
arccos_ = acos_
ge = greater_equal
ge_ = greater_equal_
lt = less
lt_ = less_
le = less_equal
le_ = less_equal_
class Size(tuple):
def __new__(cls, iterable=()):
iterable = ivy.Shape([]) if iterable == () else iterable
new_iterable = []
for i, item in enumerate(iterable):
if isinstance(item, int):
new_iterable.append(item)
continue
try:
new_iterable.append(int(item))
except Exception as e:
raise TypeError(
f"Expected int, but got {type(item)} at index {i}"
) from e
return super().__new__(cls, tuple(new_iterable))
def __init__(self, shape=()) -> None:
shape = ivy.Shape([]) if shape == () else shape
self._ivy_shape = shape if isinstance(shape, ivy.Shape) else ivy.shape(shape)
def __repr__(self):
return f'ivy.frontends.torch.Size([{", ".join(str(d) for d in self)}])'
@property
def ivy_shape(self):
return self._ivy_shape
def numel(self):
return int(ivy.astype(ivy.prod(self), ivy.int64))
| ivy/ivy/functional/frontends/torch/tensor.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/tensor.py",
"repo_id": "ivy",
"token_count": 38095
} | 42 |
from . import activations
from .activations import *
from . import constants
from .constants import *
from . import creation
from .creation import *
from . import data_type
from .data_type import *
from . import device
from .device import *
from . import elementwise
from .elementwise import *
from . import general
from .general import *
from . import gradients
from .gradients import *
from . import layers
from .layers import *
from . import linear_algebra as linalg
from .linear_algebra import *
from . import losses
from .losses import *
from . import manipulation
from .manipulation import *
from . import meta
from .meta import *
from . import nest
from .nest import *
from . import norms
from .norms import *
from . import random
from .random import *
from . import searching
from .searching import *
from . import set
from .set import *
from . import sorting
from .sorting import *
from . import statistical
from .statistical import *
from . import utility
from .utility import *
from . import control_flow_ops
from .control_flow_ops import *
import types
__all__ = [
name
for name, thing in globals().items()
if not (
name.startswith("_")
or name == "ivy"
or (callable(thing) and "ivy" not in thing.__module__)
or (isinstance(thing, types.ModuleType) and "ivy" not in thing.__name__)
)
]
del types
| ivy/ivy/functional/ivy/__init__.py/0 | {
"file_path": "ivy/ivy/functional/ivy/__init__.py",
"repo_id": "ivy",
"token_count": 419
} | 43 |
# local
from ivy.utils.backend import current_backend
def bind_custom_gradient_function(func, custom_grad_func):
"""Bind a custom gradient function to a function.
Parameters
----------
func
Function for which we compute the gradients of the output with respect to.
custom_grad_func
Custom gradient function. Should accept a tuple containing the (output, inputs)
and the upstream gradients w.r.t previous operations.
Returns
-------
ret
the function
"""
return current_backend(None).bind_custom_gradient_function(func, custom_grad_func)
def vjp(func, *primals):
"""Compute a (reverse-mode) vector-Jacobian product of `func`.
Parameters
----------
func : callable
Function to be differentiated.
primals
sequence of primal values at which the Jacobian of `func` should be evaluated.
Returns
-------
ret
The output of `func` evaluated at `primals`. And a function from a cotangent
vector representing the vector-Jacobian product of fun evaluated at primals.
"""
return current_backend(None).vjp(func, *primals)
def jvp(func, primals, tangents):
"""Compute a (forward-mode) Jacobian-vector product of `func`.
Parameters
----------
func : callable
Function to be differentiated.
primals
sequence of primal values at which the Jacobian of `func` should be evaluated.
tangents
sequence of tangent vectors giving the Jacobian-vector product of `func`
evaluated at `primals`.
Returns
-------
ret
The output of `func` evaluated at `primals`. And the Jacobian-vector product of
function evaluated at primals with tangents.
"""
return current_backend(None).jvp(func, primals, tangents)
| ivy/ivy/functional/ivy/experimental/gradients.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/gradients.py",
"repo_id": "ivy",
"token_count": 621
} | 44 |
"""Collection of gradient Ivy functions."""
# global
from typing import Sequence, Union, Optional, Tuple, Callable
import numpy as np
import itertools
# local
import ivy
from ivy.utils.backend import current_backend
from ivy.func_wrapper import (
handle_array_function,
inputs_to_ivy_arrays,
to_native_arrays_and_back,
handle_out_argument,
handle_nestable,
handle_array_like_without_promotion,
handle_device,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
# Helpers #
# ------- #
def _get_duplicate_index_chains(xs):
"""Generate a list of duplicate index chains for a given nested
structure."""
duplicate_index_chains = ()
if isinstance(xs, ivy.Container):
duplicate_index_chains = xs.cont_duplicate_array_keychains()
elif isinstance(xs, (list, tuple, dict)):
duplicate_index_chains = ivy.duplicate_array_index_chains(xs)
return duplicate_index_chains
def _arrays_to_float_variables(xs, xs_grad_idxs=None):
"""Convert all required arrays to float variables for gradient
calculation."""
def inner_fn(x):
if ivy.is_array(x, exclusive=True):
if ivy.is_int_dtype(x.dtype):
x = ivy.astype(x, ivy.default_float_dtype())
elif _is_variable(x):
x = ivy.stop_gradient(x, preserve_type=False)
return _variable(x)
return x
# Convert all required arrays to float variables
def map_fn(x):
return ivy.nested_map(inner_fn, x, include_derived=True, shallow=False)
if xs_grad_idxs is not None:
xs_required = ivy.multi_index_nest(xs, xs_grad_idxs)
ivy.nested_map(map_fn, xs_required, include_derived=True)
ivy.set_nest_at_indices(xs, xs_grad_idxs, xs_required)
return xs
return ivy.nested_map(map_fn, xs, include_derived=True, shallow=False)
def _get_required_native_variables(xs, xs_grad_idxs):
"""Extract all required native variables from a nested structure."""
# To make sure that only the required arrays are converted to native arrays
xs = ivy.nested_map(ivy.to_ivy, xs, include_derived=True, shallow=False)
if xs_grad_idxs is not None:
xs_required = ivy.multi_index_nest(xs, xs_grad_idxs)
ivy.nested_map(ivy.to_native, xs_required, include_derived=True)
ivy.set_nest_at_indices(xs, xs_grad_idxs, xs_required)
else:
xs = ivy.nested_map(ivy.to_native, xs, include_derived=True, shallow=False)
def map_fn(x):
if ivy.is_native_array(x):
return x
return None
# Extract all those required native arrays and None for all others
xs = ivy.nested_map(
map_fn, xs, include_derived=True, to_mutable=True, shallow=False
)
# Prune all None values
none_idxs = ivy.nested_argwhere(xs, lambda x: x is None)
not _check_if_empty(none_idxs) and ivy.prune_nest_at_indices(
xs, list(reversed(none_idxs))
)
xs = (
xs
if ivy.is_array(xs)
else (
xs.cont_prune_empty()
if isinstance(xs, ivy.Container)
else ivy.prune_empty(xs)
)
)
# return a single array instead of a list if possible, otherwise return the nest
if isinstance(xs, list) and len(xs) == 1:
return xs[0]
return xs
def _get_required_float_variables(xs, xs_grad_idxs):
"""Convert all required arrays to float variables for gradient calculation.
Also, returns a list of duplicate index chains for the nested
structure.
"""
if (ivy.is_ivy_container(xs) or ivy.is_array(xs)) and xs_grad_idxs == ((0,),):
xs_grad_idxs = None
duplicate_index_chains = _get_duplicate_index_chains(xs)
xs = _to_ivy(xs)
xs = _arrays_to_float_variables(xs, xs_grad_idxs=xs_grad_idxs)
xs = _set_duplicates(xs, duplicate_index_chains)
xs_required = _get_required_native_variables(xs, xs_grad_idxs)
required_duplicate_index_chains = _get_duplicate_index_chains(xs_required)
return (
xs,
xs_grad_idxs,
xs_required,
required_duplicate_index_chains,
duplicate_index_chains,
)
def _get_native_variables_and_indices(x, reshape=True, idxs=None, create_var=False):
"""Extract all relevant results from the output nested structure of a
function."""
def map_fn(x_):
if ivy.is_array(x_):
x_ = ivy.to_ivy(x_) if ivy.is_native_array(x_) else x_
if create_var:
x_ = x_ if _is_variable(x_, exclusive=True) else _variable(x_)
if len(x_.shape) == 0:
return ivy.to_native(x_)
if reshape:
if x_.size == 1:
if reshape:
return ivy.to_native(ivy.reshape(x_, []))
return ivy.to_native(x_)
else:
return ivy.to_ivy(x_)
else:
return ivy.to_native(x_)
return x_
if ivy.is_array(x):
return [], map_fn(x)
x = ivy.nested_map(map_fn, x, include_derived=True, shallow=False)
arr_idxs = ivy.nested_argwhere(x, lambda x: ivy.is_native_array(x))
if _check_if_empty(arr_idxs):
return arr_idxs, []
else:
if idxs is not None:
arr_idxs = [
arr_idx
for arr_idx in arr_idxs
if "_".join(str(x) for x in arr_idx) in _idxs_to_str(idxs)
]
arr_values = ivy.multi_index_nest(x, arr_idxs)
arr_idxs = _idxs_to_str(arr_idxs)
return arr_idxs, arr_values
def _set_duplicates(xs, duplicate_index_chains):
"""Set the duplicates in the nested structure to have the same
reference."""
originals = list(
map(
lambda key_chains: [key_chains[0]] * (len(key_chains) - 1),
duplicate_index_chains,
)
)
originals = ivy.multi_index_nest(xs, list(itertools.chain(*originals)))
duplicates = list(
map(lambda index_chains: list(index_chains[1:]), duplicate_index_chains)
)
nullifying_index_chains = (
list(
map(
lambda index_chain: index_chain.split("/"),
list(itertools.chain(*duplicates)),
)
)
if isinstance(xs, ivy.Container)
else list(itertools.chain(*duplicates))
)
ivy.set_nest_at_indices(xs, nullifying_index_chains, originals)
return xs
def _get_y_and_ret_idxs(func_ret, ret_grad_idxs, create_var=False, reshape=True):
"""Get the relevant outputs from the function return value."""
if (ivy.is_ivy_container(func_ret) or ivy.is_array(func_ret)) and ret_grad_idxs == [
[0]
]:
ret_grad_idxs = None
ret_idxs, ret_values = _get_native_variables_and_indices(
func_ret, idxs=ret_grad_idxs, create_var=create_var, reshape=reshape
)
if ret_values is None or (isinstance(ret_values, list) and len(ret_values) == 0):
return func_ret, {}
if isinstance(ret_values, list) and len(ret_values) == 1 and ret_grad_idxs is None:
y = ret_values[0]
else:
y = ret_values
return ret_grad_idxs, y, ret_idxs
def _get_native_y(y):
"""Convert all outputs to native arrays."""
array_idxs = ivy.nested_argwhere(y, lambda x: ivy.is_native_array(x))
y_final = []
if isinstance(array_idxs, list) and np.asarray(array_idxs, "object").size > 0:
y_final = ivy.multi_index_nest(y, array_idxs)
return y_final
def _stop_grad_and_index(func_ret, retain_grads, grads):
"""Stop gradient propagation of the function results."""
if not retain_grads:
func_ret = ivy.nested_map(
lambda x: ivy.stop_gradient(x) if ivy.is_array(x) else x,
func_ret,
include_derived=True,
)
if isinstance(grads, dict):
grads = ivy.Container(grads)
return func_ret, grads
def _process_func_ret_and_grads(func_ret, grads, retain_grads):
"""Stop gradients propagation.
Set the gradients of non-finite values to zero, and stopping
gradient propagation of the function results.
"""
grads = _non_finite_to_zero(grads)
func_ret, grads = _stop_grad_and_index(func_ret, retain_grads, grads)
grads = _to_ivy(grads)
return func_ret, grads
def _check_if_empty(idxs):
return not isinstance(idxs, list) or np.asarray(idxs, dtype="object").size == 0
def _idxs_to_str(idxs):
return ["_".join(list(map(lambda x: str(x), idxs[i]))) for i in range(len(idxs))]
def _to_ivy(xs):
return ivy.nested_map(
lambda x: ivy.to_ivy(x) if ivy.is_array(x) else x,
xs,
include_derived=True,
shallow=False,
)
def _non_finite_to_zero(xs):
return ivy.nested_map(
lambda x: ivy.where(ivy.isfinite(x), x, 0.0) if ivy.is_array(x) else x,
xs,
include_derived=True,
shallow=False,
)
def _flatten_containers(inputs):
"""Flatten containers into a single tuple of arrays.
Returns a flattened tuple of arrays and the indices of the arrays in
the original containers.
"""
if ivy.is_array(inputs) or ivy.is_ivy_container(inputs):
inputs = (inputs,)
values = []
ret_idxs = []
for idx, input in enumerate(inputs):
if isinstance(input, ivy.Container):
grad_arr_idxs = ivy.nested_argwhere(input, lambda x: ivy.is_array(x))
grad_arr_values = ivy.multi_index_nest(input, grad_arr_idxs)
values.extend(grad_arr_values)
ret_idxs.append(grad_arr_idxs)
elif ivy.is_array(input):
values.append(input)
ret_idxs.append(None)
return tuple(values), ret_idxs
def _rebuild_flattened_containers(outputs, ret_idxs):
"""Rebuild the containers from the flattened arrays into a single tuple."""
rebuilt_outputs = []
curr_idx = 0
for ret_idx in ret_idxs:
if ret_idx is None:
rebuilt_outputs.append(outputs[curr_idx])
curr_idx += 1
else:
cont = ivy.Container()
num_elements = len(ret_idx)
cont_outputs = outputs[curr_idx : curr_idx + num_elements]
ivy.insert_into_nest_at_indices(cont, ret_idx, cont_outputs)
rebuilt_outputs.append(cont)
curr_idx += num_elements
return tuple(rebuilt_outputs)
# Private Variable Helpers #
# -------------------------#
def _variable(x):
x = ivy.to_native(x, nested=True)
ret = ivy.nested_map(
current_backend(x).variable, x, include_derived=True, shallow=False
)
return ivy.nested_map(ivy.to_ivy, ret, include_derived=True)
def _is_variable(x, exclusive=False, to_ignore=None) -> bool:
x = ivy.to_native(x, nested=True, to_ignore=to_ignore)
return ivy.nested_map(
lambda x: current_backend(x).is_variable(x, exclusive=exclusive),
x,
include_derived=True,
shallow=False,
to_ignore=to_ignore,
)
def _variable_data(
x: Union[ivy.Array, ivy.NativeArray],
) -> Union[ivy.Array, ivy.NativeArray]:
"""Get the contents of the input.
Parameters
----------
x
Input array.
Returns
-------
ret
An array with contents of the input.
"""
x = ivy.to_native(x, nested=True)
ret = ivy.nested_map(
lambda x: current_backend(x).variable_data(x), x, include_derived=True
)
return ivy.nested_map(ivy.to_ivy, ret, include_derived=True)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def stop_gradient(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
preserve_type: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Stop gradient computation.
Parameters
----------
x
Array for which to stop the gradient.
preserve_type
Whether to preserve gradient computation on ivy.Array instances. Default is
True.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The same array x, but with no gradient information.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([1., 2., 3.])
>>> y = ivy.stop_gradient(x, preserve_type=True)
>>> print(y)
ivy.array([1., 2., 3.])
>>> x = ivy.zeros((2, 3))
>>> ivy.stop_gradient(x, preserve_type=False, out=x)
>>> print(x)
ivy.array([[0., 0., 0.],
[0., 0., 0.]])
With one :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> y = ivy.stop_gradient(x, preserve_type=False)
>>> print(y)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> ivy.stop_gradient(x, preserve_type=True, out=x)
>>> print(x)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
"""
return current_backend(x).stop_gradient(x, preserve_type=preserve_type, out=out)
# AutoGrad #
@handle_exceptions
@handle_device
def execute_with_gradients(
func,
xs: Union[ivy.Array, ivy.NativeArray],
/,
*,
retain_grads: bool = False,
xs_grad_idxs: Sequence[Sequence[Union[str, int]]] = ((0,),),
ret_grad_idxs: Sequence[Sequence[Union[str, int]]] = ((0,),),
) -> Tuple[ivy.Array, ivy.Array]:
"""Call function func with input of xs variables, and return the function
result func_ret and the gradients of each output variable w.r.t each input
variable,
Parameters
----------
func
Function for which we compute the gradients of the output with respect to xs
input.
xs
Variables for which to compute the function gradients with respective to. This
can be a single array or an arbitrary nest of arrays.
retain_grads
Whether to retain the gradients of the returned values. (Default value = False)
xs_grad_idxs
Indices of the input arrays to compute gradients with respect to. If None,
gradients are returned with respect to all input arrays. If ``xs`` is an
``ivy.Array`` or ``ivy.Container``, the default value is ``None``, otherwise the
default value is ``[[0]]``.
ret_grad_idxs
Indices of the returned arrays for which to return computed gradients. If None,
gradients are returned for all returned arrays. If the returned object from the
``func`` is an ``ivy.Array`` or ``ivy.Container``, the default value is ``None``
otherwise the default value is ``[[0]]``.
Returns
-------
ret
the function result func_ret and a dictionary of gradients of each output
variable w.r.t each input variable.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[1, 4, 6], [2, 6, 9]])
>>> func = lambda x: ivy.mean(ivy.square(x))
>>> func_ret = ivy.execute_with_gradients(func, x, retain_grads=True)
>>> print(func_ret)
(ivy.array(29.), ivy.array([[0.33333334, 1.33333337, 2. ],
[0.66666669, 2. , 3. ]]))
With :class:`ivy.Container` input:
>>> x = ivy.Container(a = ivy.array([1, 4, 6]),
... b = ivy.array([2, 6, 9]))
>>> func = lambda x: ivy.mean(ivy.square(x))
>>> func_ret = ivy.execute_with_gradients(func, x, retain_grads=True)
>>> print(func_ret)
({
a: ivy.array(17.666666),
b: ivy.array(40.333332)
},
{
a: {
a: ivy.array([0.66666669, 2.66666675, 4.]),
b: ivy.array([0., 0., 0.])
},
b: {
a: ivy.array([0., 0., 0.]),
b: ivy.array([1.33333337, 4., 6.])
}
})
"""
return current_backend(None).execute_with_gradients(
func,
xs,
retain_grads=retain_grads,
xs_grad_idxs=xs_grad_idxs,
ret_grad_idxs=ret_grad_idxs,
)
execute_with_gradients.computes_gradients = True
@handle_exceptions
def value_and_grad(func: Callable) -> Callable:
"""Create a function that evaluates both func and the gradient of func.
Parameters
----------
func
Function for which we compute the gradients of the output with respect to xs
input.
Returns
-------
ret
A function that returns both func and the gradient of func.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[4.6, 2.1, 5], [2.8, 1.3, 6.2]])
>>> func = lambda x: ivy.mean(ivy.square(x))
>>> grad_fn = ivy.value_and_grad(func)
>>> value_grad = grad_fn(x)
>>> print(value_grad)
(ivy.array(16.42333412), ivy.array([[1.5333333 , 0.69999999, 1.66666675],
[0.93333334, 0.43333334, 2.0666666 ]]))
"""
return current_backend(None).value_and_grad(func)
value_and_grad.computes_gradients = True
@handle_exceptions
def jac(func: Callable) -> Callable:
"""Call function func, and return func's Jacobian partial derivatives.
Parameters
----------
func
Function for which we compute the gradients of the output with respect to xs
input.
Returns
-------
ret
the Jacobian function
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[4.6, 2.1, 5], [2.8, 1.3, 6.2]])
>>> func = lambda x: ivy.mean(ivy.square(x))
>>> jac_fn = ivy.jac(func)
>>> jacobian = jac_fn(x)
>>> print(jacobian)
ivy.array([[1.53 , 0.7 , 1.67 ],
... [0.933, 0.433, 2.07 ]])
"""
return current_backend(None).jac(func)
jac.computes_gradients = True
@handle_exceptions
def grad(func: Callable, argnums: Union[int, Sequence[int]] = 0) -> Callable:
"""Call function func, and return func's gradients.
Parameters
----------
func
Function for which we compute the gradients of the output with respect to xs
input.
argnums
Indices of the input arrays to compute gradients with respect to. Default is 0.
Returns
-------
ret
the grad function
Examples
--------
>>> x = ivy.array([[4.6, 2.1, 5], [2.8, 1.3, 6.2]])
>>> func = lambda x: ivy.mean(ivy.square(x))
>>> grad_fn = ivy.grad(func)
>>> grad = grad_fn(x)
>>> print(grad)
ivy.array([[1.53 , 0.7 , 1.67 ],
... [0.933, 0.433, 2.07 ]])
"""
return current_backend(None).grad(func, argnums=argnums)
grad.computes_gradients = True
# Optimizer Steps #
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def adam_step(
dcdw: Union[ivy.Array, ivy.NativeArray],
mw: Union[ivy.Array, ivy.NativeArray],
vw: Union[ivy.Array, ivy.NativeArray],
step: Union[int, float],
/,
*,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-7,
out: Optional[ivy.Array] = None,
) -> Tuple[ivy.Array, ivy.Array, ivy.Array]:
"""Compute adam step delta, given the derivatives of some cost c with
respect to weights ws, using ADAM update. `[reference]
<https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam>`_
Parameters
----------
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
mw
running average of the gradients
vw
running average of second moments of the gradients
step
training step
beta1
gradient forgetting factor (Default value = 0.9)
beta2
second moment of gradient forgetting factor (Default value = 0.999)
epsilon
divisor during adam update, preventing division by zero (Default value = 1e-7)
out
optional output array, for writing the effective grad of adam_step to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
The adam step delta.
Examples
--------
With :class:`ivy.Array` inputs:
>>> dcdw = ivy.array([1, 2, 3])
>>> mw = ivy.ones(3)
>>> vw = ivy.ones(1)
>>> step = ivy.array(3)
>>> adam_step_delta = ivy.adam_step(dcdw, mw, vw, step)
>>> print(adam_step_delta)
(ivy.array([0.2020105 , 0.22187898, 0.24144873]),
ivy.array([0.99999998, 1.09999998, 1.19999998]),
ivy.array([1.00000001, 1.00300001, 1.00800001]))
>>> dcdw = ivy.array([[1., 4., -3.], [2., 3., 0.5]])
>>> mw = ivy.zeros((2,3))
>>> vw = ivy.zeros(3)
>>> step = ivy.array(1)
>>> beta1 = 0.86
>>> beta2 = 0.95
>>> epsilon = 1e-6
>>> adam_step_delta = ivy.adam_step(dcdw, mw, vw, step, beta1=beta1, beta2=beta2,
... epsilon=epsilon)
>>> print(adam_step_delta)
(ivy.array([[ 1., 1., -1.],
[ 1., 1., 1.]]),
ivy.array([[ 0.14, 0.56, -0.42],
[ 0.28, 0.42, 0.07]]),
ivy.array([[0.05 , 0.8 , 0.45 ],
[0.2 , 0.45 , 0.0125]]))
>>> dcdw = ivy.array([0.1, -0.7, 2])
>>> mw = ivy.ones(1)
>>> vw = ivy.ones(1)
>>> step = ivy.array(3.6)
>>> out = ivy.zeros_like(dcdw)
>>> adam_step_delta = ivy.adam_step(dcdw, mw, vw, step, out=out)
>>> print(out)
ivy.array([0.17294501, 0.15770318, 0.20863818])
With one :class:`ivy.Container` input:
>>> dcdw = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> mw = ivy.array([1., 4., 9.])
>>> vw = ivy.array([0.,])
>>> step = ivy.array([3.4])
>>> beta1 = 0.87
>>> beta2 = 0.976
>>> epsilon = 1e-5
>>> adam_step_delta = ivy.adam_step(dcdw, mw, vw, step, beta1=beta1, beta2=beta2,
... epsilon=epsilon)
>>> print(adam_step_delta)
({
a: ivy.array([6.49e+04, 1.74e+01, 1.95e+01]),
b: ivy.array([2.02, 4.82, 8.17])
}, {
a: ivy.array([0.87, 3.61, 8.09]),
b: ivy.array([1.26, 4., 8.48])
}, {
a: ivy.array([0., 0.024, 0.096]),
b: ivy.array([0.216, 0.384, 0.6])
})
With multiple :class:`ivy.Container` inputs:
>>> dcdw = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> mw = ivy.Container(a=ivy.array([0., 0., 0.]),
... b=ivy.array([0., 0., 0.]))
>>> vw = ivy.Container(a=ivy.array([0.,]),
... b=ivy.array([0.,]))
>>> step = ivy.array([3.4])
>>> beta1 = 0.87
>>> beta2 = 0.976
>>> epsilon = 1e-5
>>> adam_step_delta = ivy.adam_step(dcdw, mw, vw, step, beta1=beta1, beta2=beta2,
... epsilon=epsilon)
>>> print(adam_step_delta)
({
a: ivy.array([0., 0.626, 0.626]),
b: ivy.array([0.626, 0.626, 0.626])
}, {
a: ivy.array([0., 0.13, 0.26]),
b: ivy.array([0.39, 0.52, 0.65])
}, {
a: ivy.array([0., 0.024, 0.096]),
b: ivy.array([0.216, 0.384, 0.6])
})
"""
step = float(step)
mw = ivy.add(beta1 * mw, (1 - beta1) * dcdw)
dcdw_sqrd = dcdw**2
vw = ivy.add(ivy.multiply(beta2, vw), (1 - beta2) * dcdw_sqrd)
vw_sqrt = ivy.maximum(vw, 0.0) ** 0.5
beta1_pow = beta1**step
beta2_pow = beta2**step
alpha = (1 - beta2_pow) ** 0.5 / (1 - beta1_pow + epsilon)
return ivy.divide(alpha * mw, vw_sqrt + epsilon, out=out), mw, vw
adam_step.out_index = 0
# Optimizer Updates #
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def optimizer_update(
w: Union[ivy.Array, ivy.NativeArray],
effective_grad: Union[ivy.Array, ivy.NativeArray],
lr: Union[float, ivy.Array, ivy.NativeArray],
/,
*,
stop_gradients: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Update weights ws of some function, given the true or effective
derivatives of some cost c with respect to ws, [dc/dw for w in ws].
Parameters
----------
w
Weights of the function to be updated.
effective_grad
Effective gradients of the cost c with respect to the weights ws,
[dc/dw for w in ws].
lr
Learning rate(s), the rate(s) at which the weights should be updated relative to
the gradient.
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The new function weights ws_new, following the optimizer updates.
Examples
--------
With :class:`ivy.Array` inputs:
>>> w = ivy.array([1., 2., 3.])
>>> effective_grad = ivy.zeros(3)
>>> lr = 3e-4
>>> ws_new = ivy.optimizer_update(w, effective_grad, lr)
>>> print(ws_new)
ivy.array([1., 2., 3.])
>>> w = ivy.array([1., 2., 3.])
>>> effective_grad = ivy.zeros(3)
>>> lr = 3e-4
>>> ws_new = ivy.optimizer_update(w, effective_grad, lr,
... out=None, stop_gradients=True)
>>> print(ws_new)
ivy.array([1., 2., 3.])
>>> w = ivy.array([[1., 2.], [4., 5.]])
>>> out = ivy.zeros_like(w)
>>> effective_grad = ivy.array([[4., 5.], [7., 8.]])
>>> lr = ivy.array([3e-4, 1e-2])
>>> ws_new = ivy.optimizer_update(w, effective_grad, lr, out=out)
>>> print(out)
ivy.array([[0.999, 1.95],
[4., 4.92]])
>>> w = ivy.array([1., 2., 3.])
>>> out = ivy.zeros_like(w)
>>> effective_grad = ivy.array([4., 5., 6.])
>>> lr = ivy.array([3e-4])
>>> ws_new = ivy.optimizer_update(w, effective_grad, lr,
... stop_gradients=False, out=out)
>>> print(out)
ivy.array([0.999, 2. , 3. ])
With one :class:`ivy.Container` input:
>>> w = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> effective_grad = ivy.array([0., 0., 0.])
>>> lr = 3e-4
>>> ws_new = ivy.optimizer_update(w, effective_grad, lr)
>>> print(ws_new)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
With multiple :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> effective_grad = ivy.Container(a=ivy.array([0., 0., 0.]),
... b=ivy.array([0., 0., 0.]))
>>> lr = 3e-4
>>> ws_new = ivy.optimizer_update(w, effective_grad, lr, out=w)
>>> print(w)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
>>> w = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> effective_grad = ivy.Container(a=ivy.array([0., 0., 0.]),
... b=ivy.array([0., 0., 0.]))
>>> lr = ivy.array([3e-4])
>>> ws_new = ivy.optimizer_update(w, effective_grad, lr,
... stop_gradients=False)
>>> print(ws_new)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
"""
deltas = effective_grad * lr
w = ivy.subtract(w, deltas, out=out)
if stop_gradients:
return ivy.stop_gradient(w, preserve_type=True, out=out)
return w
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def gradient_descent_update(
w: Union[ivy.Array, ivy.NativeArray],
dcdw: Union[ivy.Array, ivy.NativeArray],
lr: Union[float, ivy.Array, ivy.NativeArray],
/,
*,
stop_gradients: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Update weights ws of some function, given the derivatives of some cost c
with respect to ws, [dc/dw for w in ws].
Parameters
----------
w
Weights of the function to be updated.
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
lr
Learning rate(s), the rate(s) at which the weights should be updated relative to
the gradient.
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The new weights, following the gradient descent updates.
Examples
--------
With :class:`ivy.Array` inputs:
>>> w = ivy.array([[1., 2, 3],
... [4, 6, 1],
... [1, 0, 7]])
>>> dcdw = ivy.array([[0.5, 0.2, 0.1],
... [0.3, 0.6, 0.4],
... [0.4, 0.7, 0.2]])
>>> lr = ivy.array(0.1)
>>> new_weights = ivy.gradient_descent_update(w, dcdw, lr, stop_gradients=True)
>>> print(new_weights)
ivy.array([[ 0.95, 1.98, 2.99],
... [ 3.97, 5.94, 0.96],
... [ 0.96, -0.07, 6.98]])
>>> w = ivy.array([1., 2., 3.])
>>> dcdw = ivy.array([0.5, 0.2, 0.1])
>>> lr = ivy.array(0.3)
>>> out = ivy.zeros_like(w)
>>> ivy.gradient_descent_update(w, dcdw, lr, out=out)
>>> print(out)
ivy.array([0.85, 1.94, 2.97])
With one :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([1., 2., 3.]),
... b=ivy.array([3.48, 5.72, 1.98]))
>>> dcdw = ivy.array([0.5, 0.2, 0.1])
>>> lr = ivy.array(0.3)
>>> w_new = ivy.gradient_descent_update(w, dcdw, lr)
>>> print(w_new)
{
a: ivy.array([0.85, 1.94, 2.97]),
b: ivy.array([3.33, 5.66, 1.95])
}
With multiple :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([1., 2., 3.]),
... b=ivy.array([3.48, 5.72, 1.98]))
>>> dcdw = ivy.Container(a=ivy.array([0.5, 0.2, 0.1]),
... b=ivy.array([2., 3.42, 1.69]))
>>> lr = ivy.array(0.3)
>>> w_new = ivy.gradient_descent_update(w, dcdw, lr)
>>> print(w_new)
{
a: ivy.array([0.85, 1.94, 2.97]),
b: ivy.array([2.88, 4.69, 1.47])
}
"""
return ivy.optimizer_update(w, dcdw, lr, stop_gradients=stop_gradients, out=out)
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def lars_update(
w: Union[ivy.Array, ivy.NativeArray],
dcdw: Union[ivy.Array, ivy.NativeArray],
lr: Union[float, ivy.Array, ivy.NativeArray],
/,
*,
decay_lambda: float = 0,
stop_gradients: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Update weights ws of some function, given the derivatives of some cost c
with respect to ws, [dc/dw for w in ws], by applying Layerwise Adaptive
Rate Scaling (LARS) method.
Parameters
----------
w
Weights of the function to be updated.
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
lr
Learning rate, the rate at which the weights should be updated relative to the
gradient.
decay_lambda
The factor used for weight decay. Default is zero.
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The new function weights ws_new, following the LARS updates.
Examples
--------
With :class:`ivy.Array` inputs:
>>> w = ivy.array([[3., 1, 5],
... [7, 2, 9]])
>>> dcdw = ivy.array([[0.3, 0.1, 0.2],
... [0.1, 0.2, 0.4]])
>>> lr = ivy.array(0.1)
>>> new_weights = ivy.lars_update(w, dcdw, lr)
>>> print(new_weights)
ivy.array([[2.34077978, 0.78025991, 4.56051969],
... [6.78026009, 1.56051981, 8.12103939]])
>>> w = ivy.array([3., 1, 5])
>>> dcdw = ivy.array([0.3, 0.1, 0.2])
>>> lr = ivy.array(0.1)
>>> out = ivy.zeros_like(dcdw)
>>> ivy.lars_update(w, dcdw, lr, out=out)
>>> print(out)
ivy.array([2.52565837, 0.8418861 , 4.68377209])
With one :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([3.2, 2.6, 1.3]),
... b=ivy.array([1.4, 3.1, 5.1]))
>>> dcdw = ivy.array([0.2, 0.4, 0.1])
>>> lr = ivy.array(0.1)
>>> new_weights = ivy.lars_update(w, dcdw, lr)
>>> print(new_weights)
{
a: ivy.array([3.01132035, 2.22264051, 1.2056601]),
b: ivy.array([1.1324538, 2.56490755, 4.96622658])
}
With multiple :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([3.2, 2.6, 1.3]),
... b=ivy.array([1.4, 3.1, 5.1]))
>>> dcdw = ivy.Container(a=ivy.array([0.2, 0.4, 0.1]),
... b=ivy.array([0.3,0.1,0.2]))
>>> lr = ivy.array(0.1)
>>> new_weights = ivy.lars_update(w, dcdw, lr)
>>> print(new_weights)
{
a: ivy.array([3.01132035, 2.22264051, 1.2056601]),
b: ivy.array([0.90848625, 2.93616199, 4.77232409])
}
"""
w_norm = ivy.vector_norm(w)
lr = ivy.stable_divide(w_norm * lr, ivy.vector_norm(dcdw))
if decay_lambda > 0:
lr /= w_norm * decay_lambda
return ivy.gradient_descent_update(
w, dcdw, lr, stop_gradients=stop_gradients, out=out
)
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def adam_update(
w: Union[ivy.Array, ivy.NativeArray],
dcdw: Union[ivy.Array, ivy.NativeArray],
lr: Union[float, ivy.Array, ivy.NativeArray],
mw_tm1: Union[ivy.Array, ivy.NativeArray],
vw_tm1: Union[ivy.Array, ivy.NativeArray],
step: int,
/,
*,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-7,
stop_gradients: bool = True,
out: Optional[ivy.Array] = None,
) -> Tuple[ivy.Array, ivy.Array, ivy.Array]:
"""Update weights ws of some function, given the derivatives of some cost c
with respect to ws, using ADAM update. `[reference]
<https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam>`_
Parameters
----------
w
Weights of the function to be updated.
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
lr
Learning rate(s), the rate(s) at which the weights should be updated relative to
the gradient.
mw_tm1
running average of the gradients, from the previous time-step.
vw_tm1
running average of second moments of the gradients, from the previous time-step.
step
training step.
beta1
gradient forgetting factor (Default value = 0.9).
beta2
second moment of gradient forgetting factor (Default value = 0.999).
epsilon
divisor during adam update, preventing division by zero (Default value = 1e-7).
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output array, for writing the new function weights ws_new to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
The new function weights ws_new, and also new mw and vw, following the adam
updates.
Examples
--------
With :class:`ivy.Array` inputs:
>>> w = ivy.array([1., 2, 3])
>>> dcdw = ivy.array([0.5,0.2,0.1])
>>> lr = ivy.array(0.1)
>>> vw_tm1 = ivy.zeros(1)
>>> mw_tm1 = ivy.zeros(3)
>>> step = 1
>>> updated_weights = ivy.adam_update(w, dcdw, lr, mw_tm1, vw_tm1, step)
>>> print(updated_weights)
(ivy.array([0.90000075, 1.90000164, 2.9000032 ]),
ivy.array([0.05, 0.02, 0.01]),
ivy.array([2.50000012e-04, 4.00000063e-05, 1.00000016e-05]))
>>> w = ivy.array([[1., 2, 3],[4, 2, 4],[6, 4, 2]])
>>> dcdw = ivy.array([[0.1, 0.2, 0.3],[0.4, 0.5, 0.1],[0.1, 0.5, 0.3]])
>>> lr = ivy.array(0.1)
>>> mw_tm1 = ivy.zeros((3,3))
>>> vw_tm1 = ivy.zeros(3)
>>> step = 2
>>> beta1 = 0.9
>>> beta2 = 0.999
>>> epsilon = 1e-7
>>> out = ivy.zeros_like(w)
>>> stop_gradients = True
>>> updated_weights = ivy.adam_update(w, dcdw, lr, mw_tm1, vw_tm1, step,
... beta1=beta1, beta2=beta2,
... epsilon=epsilon, out=out,
... stop_gradients=stop_gradients)
>>> print(updated_weights)
(
ivy.array([[0.92558873, 1.92558754, 2.92558718],
[3.92558694, 1.92558682, 3.92558861],
[5.92558861, 3.92558694, 1.92558718]]),
ivy.array([[0.01, 0.02, 0.03],
[0.04, 0.05, 0.01],
[0.01, 0.05, 0.03]]),
ivy.array([[1.00000016e-05, 4.00000063e-05, 9.00000086e-05],
[1.60000025e-04, 2.50000012e-04, 1.00000016e-05],
[1.00000016e-05, 2.50000012e-04, 9.00000086e-05]])
)
With one :class:`ivy.Container` input:
>>> w = ivy.Container(a=ivy.array([1., 2., 3.]), b=ivy.array([4., 5., 6.]))
>>> dcdw = ivy.array([0.5, 0.2, 0.4])
>>> mw_tm1 = ivy.array([0., 0., 0.])
>>> vw_tm1 = ivy.array([0.])
>>> lr = ivy.array(0.01)
>>> step = 2
>>> updated_weights = ivy.adam_update(w, dcdw, mw_tm1, vw_tm1, lr, step)
>>> print(updated_weights)
({
a: ivy.array([1., 2., 3.]),
b: ivy.array([4., 5., 6.])
}, ivy.array([0.05, 0.02, 0.04]), ivy.array([0.01024, 0.01003, 0.01015]))
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> dcdw = ivy.Container(a=ivy.array([0.1,0.3,0.3]),
... b=ivy.array([0.3,0.2,0.2]))
>>> mw_tm1 = ivy.Container(a=ivy.array([0.,0.,0.]),
... b=ivy.array([0.,0.,0.]))
>>> vw_tm1 = ivy.Container(a=ivy.array([0.,]),
... b=ivy.array([0.,]))
>>> step = 3
>>> beta1 = 0.9
>>> beta2 = 0.999
>>> epsilon = 1e-7
>>> stop_gradients = False
>>> lr = ivy.array(0.001)
>>> updated_weights = ivy.adam_update(w, dcdw, lr, mw_tm1, vw_tm1, step,
... beta1=beta1,
... beta2=beta2, epsilon=epsilon,
... stop_gradients=stop_gradients)
>>> print(updated_weights)
({
a: ivy.array([0.99936122, 1.99936116, 2.99936128]),
b: ivy.array([3.99936128, 4.99936104, 5.99936104])
}, {
a: ivy.array([0.01, 0.03, 0.03]),
b: ivy.array([0.03, 0.02, 0.02])
}, {
a: ivy.array([1.00000016e-05, 9.00000086e-05, 9.00000086e-05]),
b: ivy.array([9.00000086e-05, 4.00000063e-05, 4.00000063e-05])
})
"""
effective_grads, mw, vw = ivy.adam_step(
dcdw, mw_tm1, vw_tm1, step, beta1=beta1, beta2=beta2, epsilon=epsilon
)
return (
ivy.optimizer_update(
w, effective_grads, lr, stop_gradients=stop_gradients, out=out
),
mw,
vw,
)
adam_update.out_index = 0
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def lamb_update(
w: Union[ivy.Array, ivy.NativeArray],
dcdw: Union[ivy.Array, ivy.NativeArray],
lr: Union[float, ivy.Array, ivy.NativeArray],
mw_tm1: Union[ivy.Array, ivy.NativeArray],
vw_tm1: Union[ivy.Array, ivy.NativeArray],
step: int,
/,
*,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-7,
max_trust_ratio: Union[int, float] = 10,
decay_lambda: float = 0,
stop_gradients: bool = True,
out: Optional[ivy.Array] = None,
) -> Tuple[ivy.Array, ivy.Array, ivy.Array]:
"""Update weights ws of some function, given the derivatives of some cost c
with respect to ws, [dc/dw for w in ws], by applying LAMB method.
Parameters
----------
w
Weights of the function to be updated.
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
lr
Learning rate(s), the rate(s) at which the weights should be updated relative to
the gradient.
mw_tm1
running average of the gradients, from the previous time-step.
vw_tm1
running average of second moments of the gradients, from the previous time-step.
step
training step.
beta1
gradient forgetting factor (Default value = 0.9).
beta2
second moment of gradient forgetting factor (Default value = 0.999).
epsilon
divisor during adam update, preventing division by zero (Default value = 1e-7).
max_trust_ratio
The maximum value for the trust ratio. (Default value = 10)
decay_lambda
The factor used for weight decay. (Default value = 0).
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output array, for writing the new function weights ws_new to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
The new function weights ws_new, following the LAMB updates.
Examples
--------
With :class:`ivy.Array` inputs:
>>> w = ivy.array([1., 2, 3])
>>> dcdw = ivy.array([0.5,0.2,0.1])
>>> lr = ivy.array(0.1)
>>> vw_tm1 = ivy.zeros(1)
>>> mw_tm1 = ivy.zeros(3)
>>> step = ivy.array(1)
>>> new_weights = ivy.lamb_update(w, dcdw, lr, mw_tm1, vw_tm1, step)
>>> print(new_weights)
(ivy.array([0.784, 1.78 , 2.78 ]),
... ivy.array([0.05, 0.02, 0.01]),
... ivy.array([2.5e-04, 4.0e-05, 1.0e-05]))
>>> w = ivy.array([[1., 2, 3],[4, 6, 1],[1, 0, 7]])
>>> dcdw = ivy.array([[0.5, 0.2, 0.1],[0.3, 0.6, 0.4],[0.4, 0.7, 0.2]])
>>> lr = ivy.array(0.1)
>>> mw_tm1 = ivy.zeros((3,3))
>>> vw_tm1 = ivy.zeros(3)
>>> step = ivy.array(1)
>>> beta1 = 0.9
>>> beta2 = 0.999
>>> epsilon = 1e-7
>>> max_trust_ratio = 10
>>> decay_lambda = 0
>>> out = ivy.zeros_like(w)
>>> stop_gradients = True
>>> new_weights = ivy.lamb_update(w, dcdw, lr, mw_tm1, vw_tm1, step, beta1=beta1,
... beta2=beta2, epsilon=epsilon,
... max_trust_ratio=max_trust_ratio,
... decay_lambda=decay_lambda, out=out,
... stop_gradients=stop_gradients)
>>> print(out)
ivy.array([[ 0.639, 1.64 , 2.64 ],
... [ 3.64 , 5.64 , 0.639],
... [ 0.639, -0.361, 6.64 ]])
With one :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([1., 2., 3.]), b=ivy.array([4., 5., 6.]))
>>> dcdw = ivy.array([3., 4., 5.])
>>> mw_tm1 = ivy.array([0., 0., 0.])
>>> vw_tm1 = ivy.array([0.])
>>> lr = ivy.array(1.)
>>> step = ivy.array([2])
>>> new_weights = ivy.lamb_update(w, dcdw, mw_tm1, vw_tm1, lr, step)
>>> print(new_weights)
({
a: ivy.array([1., 2., 3.]),
b: ivy.array([4., 5., 6.])
}, ivy.array([0.3, 0.4, 0.5]), ivy.array([1.01, 1.01, 1.02]))
With multiple :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([1.,3.,5.]),
... b=ivy.array([3.,4.,2.]))
>>> dcdw = ivy.Container(a=ivy.array([0.2,0.3,0.6]),
... b=ivy.array([0.6,0.4,0.7]))
>>> mw_tm1 = ivy.Container(a=ivy.array([0.,0.,0.]),
... b=ivy.array([0.,0.,0.]))
>>> vw_tm1 = ivy.Container(a=ivy.array([0.,]),
... b=ivy.array([0.,]))
>>> step = ivy.array([3.4])
>>> beta1 = 0.9
>>> beta2 = 0.999
>>> epsilon = 1e-7
>>> max_trust_ratio = 10
>>> decay_lambda = 0
>>> stop_gradients = True
>>> lr = ivy.array(0.5)
>>> new_weights = ivy.lamb_update(w, dcdw, lr, mw_tm1, vw_tm1, step, beta1=beta1,
... beta2=beta2, epsilon=epsilon,
... max_trust_ratio=max_trust_ratio,
... decay_lambda=decay_lambda,
... stop_gradients=stop_gradients)
>>> print(new_weights)
({
a: ivy.array([-0.708, 1.29, 3.29]),
b: ivy.array([1.45, 2.45, 0.445])
}, {
a: ivy.array([0.02, 0.03, 0.06]),
b: ivy.array([0.06, 0.04, 0.07])
}, {
a: ivy.array([4.0e-05, 9.0e-05, 3.6e-04]),
b: ivy.array([0.00036, 0.00016, 0.00049])
})
"""
r1 = ivy.vector_norm(w)
eff_grads, mw, vw = ivy.adam_step(
dcdw, mw_tm1, vw_tm1, step, beta1=beta1, beta2=beta2, epsilon=epsilon
)
if decay_lambda > 0:
r2 = ivy.vector_norm(eff_grads + decay_lambda * w)
else:
r2 = ivy.vector_norm(eff_grads)
r = ivy.minimum(ivy.stable_divide(r1, r2), ivy.array(max_trust_ratio))
lr = r * lr
return (
ivy.optimizer_update(w, eff_grads, lr, stop_gradients=stop_gradients, out=out),
mw,
vw,
)
lamb_update.out_index = 0
| ivy/ivy/functional/ivy/gradients.py/0 | {
"file_path": "ivy/ivy/functional/ivy/gradients.py",
"repo_id": "ivy",
"token_count": 22129
} | 45 |
"""Converters from Native Modules to Ivy Modules."""
# global
import functools
from typing import Optional, Dict, List
import re # noqa
import inspect
# local
import ivy
from ivy.utils.backend import current_backend
def to_ivy_module(
native_module=None,
native_module_class=None,
args=None,
kwargs=None,
device=None,
devices=None,
inplace_update=False,
):
"""Convert an instance of a trainable module from a native framework into a
trainable ivy.Module instance.
Parameters
----------
native_module
The module in the native framework to convert, required if native_module_class
is not given.
Default is ``None``.
native_module_class
The class of the native module, required if native_module is not given.
Default is ``None``.
args
Positional arguments to pass to the native module class. Default is ``None``.
kwargs
Key-word arguments to pass to the native module class. Default is ``None``.
device
The device on which to create module variables. Default is ``None``.
devices
The devices on which to create module variables. Default is ``None``.
inplace_update
For backends with dedicated variable classes, whether to update these inplace.
Default is ``False``.
Returns
-------
ret
The new trainable ivy.Module instance.
"""
return current_backend().to_ivy_module(
native_module,
native_module_class,
args,
kwargs,
device,
devices,
inplace_update,
)
class ModuleConverters:
# Module Converters #
@staticmethod
def from_haiku_module(
native_module,
params_hk=None,
rng_seed=0,
constructor_args: Optional[List] = None,
constructor_kwargs: Optional[Dict] = None,
instance_args: Optional[List] = None,
instance_kwargs: Optional[Dict] = None,
device=None,
devices=None,
):
"""Convert a Haiku module instance to an Ivy module instance.
Parameters
----------
native_module
The module in the native framework to convert(class or instance).
params_hk
Haiku parameters to pass to the constructor of the native module.
Default is ``None``.
rng_seed
Seed used to initialize haiku parameters is initializing from a class.
Default is ``0``.
constructor_args
Positional arguments to pass to the constructor of the native module.
Default is ``None``.
constructor_kwargs
Key-word arguments to pass to the constructor of the native module.
Default is ``None``.
instance_args
Positional arguments to pass to the forward pass of the native module.
Default is ``None``.
instance_kwargs
Key-word arguments to pass to the forward pass of the native module.
Default is ``None``.
device
The device on which to create module variables. Default is ``None``.
devices
The devices on which to create module variables. Default is ``None``.
Returns
-------
ret
The new trainable torch module instance.
"""
try:
import haiku as hk
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"`haiku` was not found installed on your system. Please proceed "
"to install it and restart your interpreter to see the changes."
) from exc
try:
from haiku._src.data_structures import FlatMapping # noqa
except (ImportError, AttributeError) as exc:
raise ImportError(
"Unable to import `FlatMapping` from `haiku`. Please check if the "
"requested attribute exists."
) from exc
c_args = ivy.default(constructor_args, [])
c_kwargs = ivy.default(constructor_kwargs, {})
i_args = ivy.default(instance_args, [])
i_kwargs = ivy.default(instance_kwargs, {})
i_args, i_kwargs = ivy.args_to_native(*i_args, **i_kwargs)
transformed_module = native_module
if inspect.isclass(native_module):
if len(i_args) == 0 and len(i_kwargs) == 0:
raise ivy.utils.exceptions.IvyException(
"both instance_args and instance_kwargs cannot be none"
" when passing a native class"
)
def forward_fn(*a, **kw):
model = native_module(*c_args, **c_kwargs)
return model(*i_args, **i_kwargs)
transformed_module = hk.transform(forward_fn)
params_hk = transformed_module.init(rng_seed, *i_args, **i_kwargs)
from ivy.stateful.module import _HaikuIvyModule
return _HaikuIvyModule(
*i_args,
params_hk=params_hk,
native_module=transformed_module,
device=device,
devices=devices,
**i_kwargs,
)
@staticmethod
def from_flax_module(
native_module,
params_fx=None,
rng_seed=0,
constructor_args: Optional[List] = None,
constructor_kwargs: Optional[Dict] = None,
instance_args: Optional[List] = None,
instance_kwargs: Optional[Dict] = None,
device=None,
devices=None,
):
"""Convert a Flax module instance to an Ivy module instance.
Parameters
----------
native_module
The module in the native framework to convert(class or instance).
params_fx
Flax parameters to pass to the constructor of the native module.
Default is ``None``.
rng_seed
Seed used to initialize flax parameters is initializing from a class.
Default is ``0``.
constructor_args
Positional arguments to pass to the constructor of the native module.
Default is ``None``.
constructor_kwargs
Key-word arguments to pass to the constructor of the native module.
Default is ``None``.
instance_args
Positional arguments to pass to the forward pass of the native module.
Default is ``None``.
instance_kwargs
Key-word arguments to pass to the forward pass of the native module.
Default is ``None``.
device
The device on which to create module variables. Default is ``None``.
devices
The devices on which to create module variables. Default is ``None``.
Returns
-------
ret
The new trainable ivy.Module instance.
"""
try:
import flax # noqa
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"`flax` was not found installed on your system. Please proceed "
"to install it and restart your interpreter to see the changes."
) from exc
try:
import jax
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"`jax` was not found installed on your system. Please proceed "
"to install it and restart your interpreter to see the changes."
) from exc
c_args = ivy.default(constructor_args, [])
c_kwargs = ivy.default(constructor_kwargs, {})
i_args = ivy.default(instance_args, [])
i_kwargs = ivy.default(instance_kwargs, {})
i_args, i_kwargs = ivy.args_to_native(*i_args, **i_kwargs)
if isinstance(rng_seed, int):
rng_seed = jax.random.PRNGKey(rng_seed)
if inspect.isclass(native_module):
if len(i_args) == 0 and len(i_kwargs) == 0:
raise ivy.utils.exceptions.IvyException(
"both instance_args and instance_kwargs cannot be none"
" when passing a native class"
)
native_module = native_module(*c_args, **c_kwargs)
params_fx = native_module.init(rng_seed, *i_args, **i_kwargs)
from ivy.stateful.module import _FlaxIvyModule
return _FlaxIvyModule(
*i_args,
params_fx=params_fx,
native_module=native_module,
device=device,
devices=devices,
**i_kwargs,
)
@staticmethod
def from_keras_module(
native_module=None,
constructor_args: Optional[List] = None,
constructor_kwargs: Optional[Dict] = None,
instance_args: Optional[List] = None,
instance_kwargs: Optional[Dict] = None,
device=None,
devices=None,
):
"""Convert a Keras module instance to an Ivy module instance.
Parameters
----------
native_module
The module in the native framework to convert(class or instance).
constructor_args
Positional arguments to pass to the constructor of the native module.
Default is ``None``.
constructor_kwargs
Key-word arguments to pass to the constructor of the native module.
Default is ``None``.
instance_args
Positional arguments to pass to the forward pass of the native module.
Default is ``None``.
instance_kwargs
Key-word arguments to pass to the forward pass of the native module.
Default is ``None``.
device
The device on which to create module variables. Default is ``None``.
devices
The devices on which to create module variables. Default is ``None``.
Returns
-------
ret
The new trainable ivy.Module instance.
"""
c_args = ivy.default(constructor_args, [])
c_kwargs = ivy.default(constructor_kwargs, {})
i_args = ivy.default(instance_args, [])
i_kwargs = ivy.default(instance_kwargs, {})
if inspect.isclass(native_module):
if len(i_args) == 0 and len(i_kwargs) == 0:
raise ivy.utils.exceptions.IvyException(
"both instance_args and instance_kwargs cannot be none"
" when passing a native class"
)
native_module = native_module(*c_args, **c_kwargs)
input_shape = i_args[0].shape
native_module.build((input_shape[-1],))
from ivy.stateful.module import _KerasIvyModule
return _KerasIvyModule(
*i_args,
native_module=native_module,
device=device,
devices=devices,
**i_kwargs,
)
@staticmethod
def from_paddle_module(
native_module=None,
constructor_args: Optional[List] = None,
constructor_kwargs: Optional[Dict] = None,
instance_args: Optional[List] = None,
instance_kwargs: Optional[Dict] = None,
device=None,
devices=None,
):
"""Convert a Paddle layer instance to an Ivy module instance.
Parameters
----------
native_module
The module in the native framework to convert(class or instance)
constructor_args
Positional arguments to pass to the constructor of the native module.
Default is ``None``.
constructor_kwargs
Key-word arguments to pass to the constructor of the native module.
Default is ``None``.
instance_args
Positional arguments to pass to the forward pass of the native module.
Default is ``None``.
instance_kwargs
Key-word arguments to pass to the forward pass of the native module.
Default is ``None``.
device
The device on which to create module variables. Default is ``None``.
devices
The devices on which to create module variables. Default is ``None``.
Returns
-------
ret
The new trainable ivy.Module instance.
"""
c_args = ivy.default(constructor_args, [])
c_kwargs = ivy.default(constructor_kwargs, {})
i_args = ivy.default(instance_args, [])
i_kwargs = ivy.default(instance_kwargs, {})
if inspect.isclass(native_module):
native_module = native_module(*c_args, **c_kwargs)
from ivy.stateful.module import _PaddleIvyModule
return _PaddleIvyModule(
*i_args,
native_module=native_module,
device=device,
devices=devices,
**i_kwargs,
)
@staticmethod
def from_torch_module(
native_module=None,
constructor_args: Optional[List] = None,
constructor_kwargs: Optional[Dict] = None,
instance_args: Optional[List] = None,
instance_kwargs: Optional[Dict] = None,
device=None,
devices=None,
inplace_update=False,
):
"""Convert a Torch module instance to an Ivy module instance.
Parameters
----------
native_module
The module in the native framework to convert(class or instance)
constructor_args
Positional arguments to pass to the constructor of the native module.
Default is ``None``.
constructor_kwargs
Key-word arguments to pass to the constructor of the native module.
Default is ``None``.
instance_args
Positional arguments to pass to the forward pass of the native module.
Default is ``None``.
instance_kwargs
Key-word arguments to pass to the forward pass of the native module.
Default is ``None``.
device
The device on which to create module variables. Default is ``None``.
devices
The devices on which to create module variables. Default is ``None``.
inplace_update
For backends with dedicated variable classes, whether to update these
inplace. Default is ``False``.
Returns
-------
ret
The new trainable ivy.Module instance.
"""
try:
import torch # noqa
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"`torch` was not found installed on your system. Please proceed "
"to install it and restart your interpreter to see the changes."
) from exc
c_args = ivy.default(constructor_args, [])
c_kwargs = ivy.default(constructor_kwargs, {})
i_args = ivy.default(instance_args, [])
i_kwargs = ivy.default(instance_kwargs, {})
if inspect.isclass(native_module):
native_module = native_module(*c_args, **c_kwargs)
from ivy.stateful.module import _TorchIvyModule
return _TorchIvyModule(
*i_args,
native_module=native_module,
device=device,
devices=devices,
inplace_update=inplace_update,
**i_kwargs,
)
def to_keras_module(self):
"""Convert a `ivy.Module` module instance to a `tf.keras.Model`
instance.
Returns
-------
ret
The new trainable `tf.keras.Model` instance.
"""
try:
import tensorflow as tf
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"`tensorflow` was not found installed on your system. Please proceed "
"to install it and restart your interpreter to see the changes."
) from exc
class KerasModel(tf.keras.Model):
def __init__(self, ivy_module):
super().__init__()
self._ivy_module = ivy_module
self._parameters = {}
self._assign_variables()
self._populate_params()
self._propagate_params()
def _assign_variables(self):
ivy.set_backend("tensorflow")
self._ivy_module.v = self._ivy_module.v.cont_map(
lambda x, kc: (
ivy.to_new_backend(x.ivy_array.data, native=True)
if hasattr(x, "_ivy_array")
else ivy.to_new_backend(x, native=True)
),
)
self._ivy_module.v.cont_map(
lambda x, kc: (
self.add_weight(
name=kc, shape=x.shape, dtype=x.dtype, trainable=True
)
if x is not None
else x
)
)
model_weights = []
self._ivy_module.v.cont_map(
lambda x, kc: (
model_weights.append(ivy.to_numpy(x)) if x is not None else x
)
)
self.set_weights(model_weights)
ivy.previous_backend()
def _populate_params(self):
self._parameters = {
re.sub(r":([0-9]+)$", "", param.name).replace(
f"{self.name}/", ""
): param
for param in self.variables
}
def _propagate_params(self):
def __update_param(ivy_module, x, kc):
if kc not in self._parameters:
return x
# Update param in the underneath ivy module
module = ivy_module
keys = re.split("[/.]", kc)
for key in keys[:-1]:
module = module.__getattribute__(key)
if hasattr(module, "_update_v"):
module._update_v({keys[-1]: self._parameters[kc]})
return getattr(module, keys[-1])
self._ivy_module.v = self._ivy_module.v.cont_map(
functools.partial(__update_param, self._ivy_module),
inplace=True,
)
def call(self, *args, training=None, **kwargs):
ret = self._ivy_module(*args, **kwargs)
ret = ivy.nested_map(
lambda x: (
x.ivy_array.data
if hasattr(x, "_ivy_array")
else ivy.to_native(x)
),
ret,
)
return ret
def __call__(self, *args, **kwargs):
if ivy.backend != "tensorflow":
ivy.set_backend("tensorflow")
args, kwargs = ivy.args_to_new_backend(*args, native=True, **kwargs)
ivy.previous_backend()
else:
args, kwargs = ivy.args_to_new_backend(*args, native=True, **kwargs)
return super().__call__(*args, **kwargs)
def to_device(self, device):
self._ivy_module._module_graph.to_device(device)
model_weights = ivy.nested_map(
lambda x: (
ivy.to_native(ivy.to_device(x, device))
if ivy.is_array(x)
else x
),
self.weights,
)
self.set_weights(model_weights)
keras_module = KerasModel(self)
return keras_module
| ivy/ivy/stateful/converters.py/0 | {
"file_path": "ivy/ivy/stateful/converters.py",
"repo_id": "ivy",
"token_count": 9484
} | 46 |
import os
import logging
import json
from packaging import tags
from urllib import request
from tqdm import tqdm
def _get_paths_from_binaries(binaries, root_dir=""):
"""Get all the paths from the binaries.json into a list."""
paths = []
ext = "pyd" if os.name == "nt" else "so"
if isinstance(binaries, str):
return [os.path.join(root_dir, binaries + "." + ext)]
elif isinstance(binaries, dict):
for k, v in binaries.items():
paths += _get_paths_from_binaries(v, os.path.join(root_dir, k))
else:
for i in binaries:
paths += _get_paths_from_binaries(i, root_dir)
return paths
def check_for_binaries():
folder_path = os.sep.join(__file__.split(os.sep)[:-3])
binaries_path = os.path.join(folder_path, "binaries.json")
available_configs_path = os.path.join(folder_path, "available_configs.json")
initial = True
if os.path.exists(binaries_path):
binaries_dict = json.load(open(binaries_path))
available_configs = json.load(open(available_configs_path))
binaries_paths = _get_paths_from_binaries(binaries_dict, folder_path)
# verify if all binaries are available
for path in binaries_paths:
if not os.path.exists(path):
if initial:
config_str = "\n".join(
[
f"{module} : {', '.join(configs)}"
for module, configs in available_configs.items()
]
)
logging.warning(
"\tSome binaries seem to be missing in your system. This could "
"be either because we don't have compatible binaries for your "
"system or that newer binaries were available. In the latter "
"case, calling ivy.utils.cleanup_and_fetch_binaries() should "
"fetch the binaries binaries. Feel free to create an issue on "
"https://github.com/unifyai/ivy.git in case of the former\n"
)
logging.warning(
"\nFollowing are the supported configurations"
f" :\n{config_str}\n"
)
initial = False
logging.warning(f"\t{path} not found.")
if not initial:
print()
def cleanup_and_fetch_binaries(clean=True):
folder_path = os.sep.join(__file__.split(os.sep)[:-3])
binaries_path = os.path.join(folder_path, "binaries.json")
available_configs_path = os.path.join(folder_path, "available_configs.json")
if os.path.exists(binaries_path):
binaries_dict = json.load(open(binaries_path))
available_configs = json.load(open(available_configs_path))
binaries_paths = _get_paths_from_binaries(binaries_dict, folder_path)
binaries_exts = {path.split(".")[-1] for path in binaries_paths}
# clean up existing binaries
if clean:
print("Cleaning up existing binaries...", end="\r")
for root, _, files in os.walk(folder_path, topdown=True):
for file in files:
if file.split(".")[-1] in binaries_exts:
os.remove(os.path.join(root, file))
print("Cleaning up existing binaries --> done")
print("Downloading new binaries...")
all_tags = list(tags.sys_tags())
version = os.environ["VERSION"] if "VERSION" in os.environ else "main"
terminate = False
# download binaries for the tag with highest precedence
with tqdm(total=len(binaries_paths)) as pbar:
for tag in all_tags:
if terminate:
break
for path in binaries_paths:
module = path[len(folder_path) :][1:].split(os.sep)[1]
if (
os.path.exists(path)
or str(tag) not in available_configs[module]
):
continue
folders = path.split(os.sep)
_, file_path = os.sep.join(folders[:-1]), folders[-1]
ext = "pyd" if os.name == "nt" else "so"
file_name = f"{file_path[:-(len(ext)+1)]}_{tag}.{ext}"
search_path = f"{module}/{file_name}"
try:
response = request.urlopen(
"https://github.com/unifyai/binaries/raw/"
f"{version}/{search_path}",
timeout=40,
)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
f.write(response.read())
terminate = path == binaries_paths[-1]
pbar.update(1)
except request.HTTPError:
break
if terminate:
print("Downloaded all binaries!")
else:
print(
"Couldn't download all binaries. Try importing ivy to get more "
"details about the missing binaries."
)
| ivy/ivy/utils/binaries.py/0 | {
"file_path": "ivy/ivy/utils/binaries.py",
"repo_id": "ivy",
"token_count": 2763
} | 47 |
import os
this_dir = os.path.dirname(os.path.realpath(__file__))
func_folder = os.path.join(this_dir, "array_api_methods_to_test")
# api function filepaths
func_fnames = os.listdir(func_folder)
func_fnames.sort()
func_fpaths = [os.path.join(func_folder, fname) for fname in func_fnames]
# all filepaths
fpaths = func_fpaths
# test lists
framework_tests_to_run = {
"jax": [],
"numpy": [],
"torch": [],
"tensorflow": [],
}
framework_tests_to_skip = {
"jax": [],
"numpy": [],
"torch": [],
"tensorflow": [],
}
# add from each filepath
for fpath in fpaths:
# extract contents
with open(fpath, "r") as file:
contents = file.read()
# update tests to run and skip
contents = [line.replace("__", "") for line in contents.split("\n")]
for framework in framework_tests_to_run:
tests_to_run = []
tests_to_skip = []
for s in contents:
if s == "":
continue
if ("#" not in s) or (
"#" in s
and (framework not in s.lower())
and any(f in s.lower() for f in framework_tests_to_run)
):
tests_to_run += (
[f"test_{s}"]
if "#" not in s
else ["test_" + s.split("#")[1].split(" ")[0]]
)
else:
tests_to_skip += ["test_" + s[1:].split(" ")[0]]
framework_tests_to_run[framework] += tests_to_run
framework_tests_to_skip[framework] += tests_to_skip
for framework in framework_tests_to_skip:
# prune tests to skip
framework_tests_to_skip[framework] = [
tts
for tts in framework_tests_to_skip[framework]
if not max(tts in ttr for ttr in framework_tests_to_run[framework])
]
# save to file
for framework in framework_tests_to_run:
with open(
os.path.join(this_dir, ".array_api_tests_k_flag_" + framework), "w+"
) as file:
file.write(
"("
+ " or ".join(framework_tests_to_run[framework])
+ ") and not ("
+ " or ".join(framework_tests_to_skip[framework])
+ ")"
)
| ivy/ivy_tests/array_api_testing/write_array_api_tests_k_flag.py/0 | {
"file_path": "ivy/ivy_tests/array_api_testing/write_array_api_tests_k_flag.py",
"repo_id": "ivy",
"token_count": 1166
} | 48 |