text
stringlengths 17
362k
| id
stringlengths 13
115
| metadata
dict | __index_level_0__
int64 0
75
|
---|---|---|---|
from . import counting
from .counting import *
from . import searching
from .searching import *
from . import sorting
from .sorting import *
| ivy/ivy/functional/frontends/numpy/sorting_searching_counting/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/sorting_searching_counting/__init__.py",
"repo_id": "ivy",
"token_count": 36
} | 37 |
class NodeProto:
def __init__(self):
self._fn = None
self._fn_mod = None
self._fn_name = None
self.input = None
self.output = None
self.name = None
def __call__(self, *args, **kwargs):
return self._fn(*args, **kwargs)
class GraphProto:
def __init__(self):
self.node = None
self.name = None
self.input = None
self.output = None
| ivy/ivy/functional/frontends/onnx/proto.py/0 | {
"file_path": "ivy/ivy/functional/frontends/onnx/proto.py",
"repo_id": "ivy",
"token_count": 206
} | 38 |
# local
from ..math import * # noqa: F401
import ivy
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
# NOTE:
# Only inplace functions are to be added in this file.
# Please add non-inplace counterparts to `/frontends/paddle/math.py`.
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def ceil_(x, name=None):
return ivy.ceil(x, out=x)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
)
@to_ivy_arrays_and_back
def clip_(x, min=None, max=None, name=None):
ivy.utils.assertions.check_all_or_any_fn(
min,
max,
fn=ivy.exists,
type="any",
limit=[1, 2],
message="at most one of min or max can be None",
)
if min is None:
min = ivy.min(x)
if max is None:
max = ivy.max(x)
res = ivy.clip(x, min, max)
if res.dtype != x.dtype:
res = ivy.astype(res, x.dtype)
return res
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def exp_(x, name=None):
return ivy.inplace_update(x, exp(x))
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def floor_(x, name=None):
return ivy.inplace_update(x, floor(x))
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def lerp_(x, y, weight, name=None):
return ivy.inplace_update(x, lerp(x, y, weight))
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def reciprocal_(x, name=None):
return ivy.inplace_update(x, reciprocal(x))
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def round_(x, name=None):
return ivy.inplace_update(x, round(x))
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def rsqrt_(x, name=None):
return ivy.inplace_update(x, reciprocal(sqrt(x)))
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def sqrt_(x, name=None):
return ivy.inplace_update(x, sqrt(x))
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def subtract_(x, y, name=None):
return ivy.inplace_update(x, subtract(x, y))
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def tanh_(x, name=None):
return ivy.inplace_update(x, tanh(x))
| ivy/ivy/functional/frontends/paddle/tensor/math.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/tensor/math.py",
"repo_id": "ivy",
"token_count": 1169
} | 39 |
from .special import *
| ivy/ivy/functional/frontends/scipy/special/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/special/__init__.py",
"repo_id": "ivy",
"token_count": 6
} | 40 |
from abc import ABCMeta, abstractmethod
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
from ivy.functional.frontends.sklearn.utils.validation import column_or_1d
class BaseCrossValidator(metaclass=ABCMeta):
def split(self, X, y=None, groups=None):
indices = ivy.arange(X.shape[0])
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[ivy.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
def _iter_test_masks(self, X=None, y=None, groups=None):
for test_index in self._iter_test_indices(X, y, groups):
test_mask = ivy.zeros(X.shape[0], dtype="bool")
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
pass
class KFold(BaseCrossValidator):
def __init__(
self,
n_splits=5,
*,
shuffle=False,
random_state=None,
):
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def _iter_test_indices(self, X=None, y=None, groups=None):
n_samples = X.shape[0]
indices = ivy.arange(n_samples)
if self.shuffle:
indices = ivy.shuffle(indices, seed=self.random_state)
n_splits = self.n_splits
fold_sizes = ivy.full(
n_splits, n_samples // n_splits, dtype=ivy.default_int_dtype()
)
fold_sizes[: n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
class StratifiedKFold(KFold):
def __init__(
self,
n_splits=5,
*,
shuffle=False,
random_state=None,
):
super().__init__(
n_splits=n_splits,
shuffle=shuffle,
random_state=random_state,
)
def _iter_test_indices(self, X=None, y=None, groups=None):
ivy.seed(seed_value=self.random_state)
y = ivy.array(y)
y = column_or_1d(y)
_, y_idx, y_inv, _ = ivy.unique_all(y)
class_perm = ivy.unique_inverse(y_idx)
y_encoded = class_perm[y_inv]
n_classes = len(y_idx)
y_order = ivy.sort(y_encoded)
allocation = ivy.asarray(
[
ivy.bincount(y_order[i :: self.n_splits], minlength=n_classes)
for i in range(self.n_splits)
]
)
test_folds = ivy.empty(len(y), dtype="int64")
for k in range(n_classes):
folds_for_class = ivy.arange(self.n_splits).repeat(allocation[:, k])
if self.shuffle:
folds_for_class = ivy.shuffle(folds_for_class)
test_folds[y_encoded == k] = folds_for_class
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
return super().split(X, y, groups)
@to_ivy_arrays_and_back
def train_test_split(
*arrays,
test_size=None,
train_size=None,
random_state=None,
shuffle=True,
stratify=None,
):
# TODO: Make it concise
# TODO: implement stratify
if stratify is not None:
raise NotImplementedError
if len(arrays) == 0:
raise ValueError("At least one array required as input")
if test_size is None and train_size is None:
test_size = 0.25
n_samples = arrays[0].shape[0]
n_train = (
ivy.floor(train_size * n_samples)
if isinstance(train_size, float)
else float(train_size)
if isinstance(train_size, int)
else None
)
n_test = (
ivy.ceil(test_size * n_samples)
if isinstance(test_size, float)
else float(test_size)
if isinstance(test_size, int)
else None
)
if train_size is None:
n_train = n_samples - n_test
elif test_size is None:
n_test = n_samples - n_train
n_train, n_test = int(n_train), int(n_test)
indices = ivy.arange(0, n_train + n_test)
if shuffle:
if random_state is not None:
ivy.seed(seed_value=random_state)
indices = ivy.shuffle(indices)
train_indices = indices[:n_train]
test_indices = indices[n_train:]
output = []
for array in arrays:
output.append(ivy.gather(array, train_indices, axis=0))
output.append(ivy.gather(array, test_indices, axis=0))
return tuple(output)
| ivy/ivy/functional/frontends/sklearn/model_selection/_split.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/model_selection/_split.py",
"repo_id": "ivy",
"token_count": 2350
} | 41 |
# local
import ivy
from ivy.functional.frontends.tensorflow.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_supported_dtypes, with_unsupported_dtypes
import ivy.functional.frontends.tensorflow.nn as tf_nn
@with_unsupported_dtypes({"2.15.0 and below": ("float16",)}, "tensorflow")
def depthwise_conv2d(
input,
filter,
strides,
padding,
rate=None,
name=None,
data_format=None,
dilations=None,
):
if rate:
dilations = rate
return tf_nn.depthwise_conv2d(
input,
filter,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
)
# should have float16 as well but sqrt doesn't support it
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.15.0 and below": ("float32",)}, "tensorflow")
def fused_batch_norm(
x,
scale,
offset,
mean=None,
variance=None,
epsilon=1e-3,
data_format="NHWC",
is_training=True,
name=None,
exponential_avg_factor=1.0,
):
min_epsilon = 1.001e-5
epsilon = epsilon if epsilon > min_epsilon else min_epsilon
dims = len(x.shape)
if data_format[1] == "C":
if dims == 4:
x = ivy.permute_dims(x, axes=(0, 2, 3, 1))
elif dims == 5:
x = ivy.permute_dims(x, axes=(0, 2, 3, 4, 1))
else:
raise ivy.utils.exceptions.IvyException(
f"input tensor must be of 4 or 5 dimensions, got {dims}"
)
scale = scale.astype(ivy.float32)
offset = offset.astype(ivy.float32)
old_mean = mean.astype(ivy.float32)
old_var = variance.astype(ivy.float32)
x = x.astype(ivy.float32)
if is_training:
depth = x.shape[-1]
rest_size = ivy.prod(x.shape) // depth
x_rest_by_depth = ivy.reshape(x, [rest_size, depth])
mean = ivy.mean(x_rest_by_depth, axis=0, keepdims=True)
variance = ivy.var(x_rest_by_depth, axis=0, keepdims=True)
y = ivy.reshape(
scale * (x_rest_by_depth - mean) / ivy.sqrt(variance + epsilon) + offset,
x.shape,
)
float_rest_size = ivy.astype(rest_size, x.dtype)
variance = (
variance * float_rest_size / (float_rest_size - 1)
if rest_size > 1
else variance
)
mean = ivy.reshape(
mean * exponential_avg_factor + old_mean * (1 - exponential_avg_factor),
old_mean.shape,
)
variance = ivy.reshape(
variance * exponential_avg_factor + old_var * (1 - exponential_avg_factor),
old_var.shape,
)
else:
y = scale * (x - old_mean) / ivy.sqrt(old_var + epsilon) + offset
# permute dimensions back
if data_format[1] == "C":
if dims == 4:
y = ivy.permute_dims(y, axes=(0, 3, 1, 2))
elif dims == 5:
y = ivy.permute_dims(y, axes=(0, 4, 1, 2, 3))
if is_training:
return y, mean, variance
else:
return y, old_mean, old_var
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{"2.15.0 and below": ("float16",)},
"tensorflow",
)
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None, input=None):
if input is not None and value is not None:
raise ivy.utils.exceptions.IvyException(
"Cannot specify both 'value' and 'input'."
)
return tf_nn.max_pool2d(
input if input is not None else value,
ksize,
strides,
padding,
data_format=data_format,
)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"float16",
"bfloat16",
)
},
"tensorflow",
)
def separable_conv2d(
input,
depthwise_filter,
pointwise_filter,
strides,
padding,
rate=None,
name=None,
data_format=None,
dilations=None,
):
if rate:
dilations = rate
return tf_nn.separable_conv2d(
input,
depthwise_filter,
pointwise_filter,
strides,
padding,
data_format=data_format,
dilations=dilations,
)
| ivy/ivy/functional/frontends/tensorflow/compat/v1/nn.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/compat/v1/nn.py",
"repo_id": "ivy",
"token_count": 2042
} | 42 |
# local
import ivy.functional.frontends.tensorflow as tf_frontend
class ResourceVariable(tf_frontend.Variable):
pass
| ivy/ivy/functional/frontends/tensorflow/python/ops/resource_variable_ops.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/python/ops/resource_variable_ops.py",
"repo_id": "ivy",
"token_count": 39
} | 43 |
# local
import ivy
import ivy.functional.frontends.torch as torch_frontend
_default_dtype = torch_frontend.float32
def can_cast(from_, to):
from_str = str(from_)
to_str = str(to)
if "float" in from_str and "bool" in to_str:
return False
if "float" in from_str and "int" in to_str:
return False
if "uint" in from_str and ("int" in to_str and "u" not in to_str):
if ivy.dtype_bits(to) < ivy.dtype_bits(from_):
return False
if "complex" in from_str and ("float" in to_str or "int" in to_str):
return False
if "bool" in to_str:
return from_str == to_str
return True
def get_default_dtype():
return _default_dtype
def promote_types(type1, type2, /):
return torch_frontend.promote_types_torch(type1, type2)
def set_default_dtype(d):
ivy.utils.assertions.check_elem_in_list(
d,
[
torch_frontend.float64,
torch_frontend.float32,
torch_frontend.float16,
torch_frontend.bfloat16,
],
message="only floating-point types are supported as the default type",
)
global _default_dtype
_default_dtype = d
return
| ivy/ivy/functional/frontends/torch/dtype.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/dtype.py",
"repo_id": "ivy",
"token_count": 541
} | 44 |
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"float16",
)
},
"torch",
)
@to_ivy_arrays_and_back
def batch_norm(
input,
running_mean,
running_var,
weight=None,
bias=None,
training=False,
momentum=0.1,
eps=1e-5,
):
normalized, mean, var = ivy.batch_norm(
input,
running_mean,
running_var,
offset=bias,
scale=weight,
training=training,
eps=eps,
momentum=momentum,
data_format="NCS",
)
if training:
ivy.inplace_update(running_mean, mean)
ivy.inplace_update(running_var, var)
return normalized
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
"torch",
)
def group_norm(input, num_groups, weight=None, bias=None, eps=1e-05):
return ivy.group_norm(
input, num_groups, scale=weight, offset=bias, data_format="NCS", eps=eps
)
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"float16",
)
},
"torch",
)
@to_ivy_arrays_and_back
def instance_norm(
input,
running_mean,
running_var,
weight=None,
bias=None,
use_input_stats=False,
momentum=0.1,
eps=1e-5,
):
normalized, mean, var = ivy.instance_norm(
input,
running_mean,
running_var,
offset=bias,
scale=weight,
training=use_input_stats,
eps=eps,
momentum=momentum,
data_format="NCS",
)
ivy.inplace_update(running_mean, mean)
ivy.inplace_update(running_var, var)
return normalized
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def layer_norm(input, normalized_shape, weight=None, bias=None, eps=1e-05):
shape = ivy.shape(input)
if isinstance(normalized_shape, int) and normalized_shape == shape[-1]:
axis = [-1]
else:
assert ivy.all(ivy.equal(normalized_shape, shape[-len(normalized_shape) :]))
axis = list(range(len(shape) - len(normalized_shape), len(shape)))
return ivy.layer_norm(input, axis, scale=weight, offset=bias, eps=eps)
| ivy/ivy/functional/frontends/torch/nn/functional/norms.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/functional/norms.py",
"repo_id": "ivy",
"token_count": 1189
} | 45 |
import sys
import ivy.functional.frontends.torch as torch
import ivy
from ivy.functional.frontends import set_frontend_to_specific_version
from . import ops
tensor = _frontend_array = torch.tensor
# setting to specific version #
# --------------------------- #
if ivy.is_local():
module = ivy.utils._importlib.import_cache[__name__]
else:
module = sys.modules[__name__]
set_frontend_to_specific_version(module)
| ivy/ivy/functional/frontends/torchvision/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torchvision/__init__.py",
"repo_id": "ivy",
"token_count": 141
} | 46 |
from typing import Union, Callable, Any, Iterable, Dict
import ivy
from ivy.utils.backend import current_backend
from ivy.func_wrapper import (
handle_array_like_without_promotion,
to_native_arrays_and_back,
)
def if_else(
cond: Callable,
body_fn: Callable,
orelse_fn: Callable,
vars: Dict[str, Union[ivy.Array, ivy.NativeArray]],
) -> Any:
"""Take a condition function and two functions as input. If the condition
is True, the first function is executed and its result is returned.
Otherwise, the second function is executed and its result is returned.
Parameters
----------
cond
A function returning a boolean.
body_fn
A callable function to be executed if the condition is True.
orelse_fn
A callable function to be executed if the condition is False.
vars
Additional variables to be passed to the functions.
Returns
-------
ret
The result of executing either body_fn or orelse_fn depending on the value of
cond.
Examples
--------
>>> cond = lambda x: True
>>> body_fn = lambda x: x + 1
>>> orelse_fn = lambda x: x - 1
>>> vars = (1,)
>>> result = ivy.if_else(cond, body_fn, orelse_fn, vars)
>>> print(result)
2
>>> cond = lambda x: True
>>> body_fn = lambda x: x * 2
>>> orelse_fn = lambda x: x / 2
>>> vars = ivy.array([1, 2, 3])
>>> result = ivy.if_else(cond, body_fn, orelse_fn, vars=(vars,))
>>> print(result)
ivy.array([0.5, 1.0, 1.5])
"""
@to_native_arrays_and_back
@handle_array_like_without_promotion
def _if_else(cond, body_fn, orelse_fn, **vars):
return current_backend().if_else(cond, body_fn, orelse_fn, vars)
return _if_else(cond, body_fn, orelse_fn, **vars)
def while_loop(
test_fn: Callable,
body_fn: Callable,
vars: Dict[str, Union[ivy.Array, ivy.NativeArray]],
) -> Any:
"""Take a test function, a body function and a set of variables as input.
The body function is executed repeatedly while the test function returns
True.
Parameters
----------
test_fn
A callable function that returns a boolean value representing whether the
loop should continue.
body_fn
A callable function to be executed repeatedly while the test function returns
True.
vars
Additional variables to be passed to the functions.
Returns
-------
ret
The final result of executing the body function.
Examples
--------
>>> i = 0
>>> test_fn = lambda i: i < 3
>>> body_fn = lambda i: i + 1
>>> result = ivy.while_loop(test_fn, body_fn, vars= (i,))
>>> print(result)
(3,)
>>> i = 0
>>> j = 1
>>> test_fn = lambda i, j: i < 3
>>> body_fn = lambda i, j: (i + 1, j * 2)
>>> vars = (i, j)
>>> result = ivy.while_loop(test_fn, body_fn, vars=vars)
>>> print(result)
(3, 8)
"""
@to_native_arrays_and_back
@handle_array_like_without_promotion
def _while_loop(test_fn, body_fn, **vars):
return current_backend().while_loop(test_fn, body_fn, vars)
return _while_loop(test_fn, body_fn, **vars)
def for_loop(
iterable: Iterable[Any],
body_fn: Callable,
vars: Iterable[Union[ivy.Array, ivy.NativeArray]],
):
"""Loops over an iterable, passing the current iteration along with a tuple
of variables into the provided body function.
Parameters
----------
iterable
The iterable to loop over.
body_fn
A function to call each iteration, first taking the iterator value
and then a tuple of extra parameters.
vars
Extra parameters to be passed to body_fn.
Returns
-------
ret
The loop's return value (if any).
Example
----
>>> def body_fn(k, args):
>>> print(k+1)
>>> return args
>>>
>>> lst = [5,6]
>>>
>>> ivy.for_loop(lst, body_fn, ())
5
6
"""
iterator = iterable.__iter__()
vars_dict = _tuple_to_dict(vars)
def test_fn(iterator, original_body, vars_dict):
try:
val = iterator.__next__()
except StopIteration:
return False
vars_tuple = original_body(val, _dict_to_tuple(vars_dict))
for k in range(len(vars_tuple)):
vars_dict[k] = vars_tuple[k]
return True
def empty_function(iterator, original_body, vars_dict):
return (iterator, original_body, vars_dict)
packed_vars = (iterator, body_fn, vars_dict)
return _dict_to_tuple(while_loop(test_fn, empty_function, packed_vars)[2])
def try_except(
body1: Callable,
body2: Callable,
vars: Iterable[Union[ivy.Array, ivy.NativeArray]],
):
try:
return body1(*vars)
except Exception as e:
return body2(*vars, e)
# todo (nightcrab) find a better place for these cmp functions
def cmp_is(left, right):
return left is right
def cmp_isnot(left, right):
return left is not right
def _tuple_to_dict(t):
return {k: t[k] for k in range(len(t))}
def _dict_to_tuple(d):
return tuple(d[k] for k in d)
| ivy/ivy/functional/ivy/control_flow_ops.py/0 | {
"file_path": "ivy/ivy/functional/ivy/control_flow_ops.py",
"repo_id": "ivy",
"token_count": 2125
} | 47 |
# global
from typing import Union, Optional
# local
import ivy
from ivy.func_wrapper import (
handle_nestable,
inputs_to_ivy_arrays,
handle_array_like_without_promotion,
handle_array_function,
to_native_arrays_and_back,
)
from ivy.utils.exceptions import handle_exceptions
# log_poisson_loss
@handle_exceptions
@handle_nestable
@inputs_to_ivy_arrays
@handle_array_function
def log_poisson_loss(
true: Union[ivy.Array, ivy.NativeArray],
pred: Union[ivy.Array, ivy.NativeArray],
/,
*,
compute_full_loss: bool = False,
axis: int = -1,
reduction: str = "none",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the log-likelihood loss between the prediction and the target
under the assumption that the target has a Poisson distribution. Caveat: By
default, this is not the exact loss, but the loss minus a constant term
[log(z!)]. That has no effect for optimization, but does not play well with
relative loss comparisons. To compute an approximation of the log factorial
term, specify ``compute_full_loss=True`` to enable Stirling's
Approximation.
Parameters
----------
true
input array containing true labels.
pred
input array containing Predicted labels.
compute_full_loss
whether to compute the full loss. If false, a constant term is dropped
in favor of more efficient optimization. Default: ``False``.
axis
the axis along which to compute the log-likelihood loss. If axis is ``-1``,
the log-likelihood loss will be computed along the last dimension.
Default: ``-1``.
reduction
``'none'``: No reduction will be applied to the output.
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The binary log-likelihood loss between the given distributions.
Examples
--------
>>> x = ivy.array([0, 0, 1, 0])
>>> y = ivy.array([0.25, 0.25, 0.25, 0.25])
>>> print(ivy.log_poisson_loss(x, y))
ivy.array([1.28402555, 1.28402555, 1.03402555, 1.28402555])
>>> z = ivy.array([0.1, 0.1, 0.7, 0.1])
>>> print(ivy.log_poisson_loss(x, z, reduction='mean'))
ivy.array(1.1573164)
"""
try:
assert true.shape == pred.shape
except ValueError as e:
raise ValueError(
"`pred` and `true` must have the same shape, received "
f"({pred.shape} vs {true.shape})."
) from e
loss = ivy.exp(pred) - pred * true
if compute_full_loss:
stirling_approx = (
(true * ivy.log(true)) - true + (0.5 * ivy.log(2 * ivy.pi * true))
)
cond = ivy.logical_and(true >= 0.0, true <= 1.0)
loss += ivy.where(cond, ivy.zeros_like(loss), stirling_approx)
if reduction == "sum":
return ivy.sum(loss, axis=axis, out=out)
elif reduction == "mean":
return ivy.mean(loss, axis=axis, out=out)
else:
return ivy.inplace_update(out, loss) if out is not None else loss
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def l1_loss(
input: Union[ivy.Array, ivy.NativeArray],
target: Union[ivy.Array, ivy.NativeArray],
/,
*,
reduction: str = "mean",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""
Compute L1 loss (Mean Absolute Error - MAE) between targeticted and input values.
Parameters
----------
input : Union[ivy.Array, ivy.NativeArray]
Input array containing input values.
target : Union[ivy.Array, ivy.NativeArray]
Input array containing targeted values.
reduction : str, optional
Reduction method for the output loss. Options:
"none" (no reduction), "mean" (mean of losses),
"sum" (sum of losses). Default: "mean".
out : Optional[ivy.Array], optional
Optional output array for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ivy.Array
The L1 loss (MAE) between the given input and targeticted values.
Examples
--------
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.array([0.5, 2.5, 2.0])
>>> ivy.l1_loss(x, y)
ivy.array(0.666)
>>> a = ivy.array([[1.0, 2.0], [3.0, 4.0]])
>>> b = ivy.array([[0.5, 1.5], [2.5, 3.5]])
>>> ivy.l1_loss(a, b)
ivy.array(0.5)
"""
loss = ivy.abs(target - input)
if reduction == "sum":
return ivy.sum(loss, out=out)
elif reduction == "mean":
return ivy.mean(loss, out=out)
else:
return ivy.inplace_update(out, loss) if out is not None else loss
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def huber_loss(
true: Union[ivy.Array, ivy.NativeArray],
pred: Union[ivy.Array, ivy.NativeArray],
/,
*,
delta: float = 1.0,
reduction: str = "mean",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the Huber loss (smooth L1 loss) between true and predicted
values.
Parameters
----------
true: array_like
The true (ground truth) values.
pred : array_like
The predicted values by the model.
delta : float, optional
The threshold parameter that determines the point where the loss transitions fro
-m
squared error to absolute error. Default is 1.0.
reduction : str, optional
The type of reduction to apply to the loss. Possible values are "mean" (default)
and "sum".
out : array_like, optional
Optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret : array_like
The Huber loss between the true and predicted values.
Examples
--------
>>> true = ivy.array([2, 4, 7, 1])
>>> pred = ivy.array([2.5, 3.5, 8, 0.8])
>>> huber_loss(true, pred, delta=1.0)
ivy.array([0.125, 0.125, 0.5 , 0.125])
>>> huber_loss(true, pred, delta=2.0)
ivy.array([0.125, 0.125, 0.5 , 0.2 ])
>>> huber_loss(true, pred, delta=0.5)
ivy.array([0.25 , 0.25 , 0. , 0.125])
"""
abs_diff = ivy.abs(true - pred)
quadratic_loss = 0.5 * (abs_diff**2)
linear_loss = delta * (abs_diff - 0.5 * delta)
loss = ivy.where(abs_diff <= delta, quadratic_loss, linear_loss)
if reduction == "sum":
return ivy.sum(loss, out=out)
elif reduction == "mean":
return ivy.mean(loss, out=out)
else:
return ivy.inplace_update(out, loss) if out is not None else loss
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def smooth_l1_loss(
input: Union[ivy.Array, ivy.NativeArray],
target: Union[ivy.Array, ivy.NativeArray],
/,
*,
beta: float = 1.0,
reduction: str = "mean",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the smooth L1 loss between two input tensors.
Parameters
----------
input : array_like
First input tensor.
target : array_like
Second input tensor.
beta : float, optional
The smooth parameter. Default is 1.0.
reduction : str, optional
Specifies the type of reduction to apply to the output.
Should be one of 'none', 'sum', or 'mean'. Default is 'mean'.
out : array, optional
Optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret : array
The smooth_l1_loss between the two input tensors.
Examples
--------
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.array([2.5, 1.8, 3.2])
>>> ivy.smooth_l1_loss(x, y, beta=1.0)
ivy.array(0.3467)
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.array([6.0, 2.0, 3.0])
>>> ivy.smooth_l1_loss(x, y, beta=1.0)
ivy.array(1.5)
>>> input = ivy.array([2.0, 3.0, 5.0, 7.0])
>>> target = ivy.array([2.5, 3.5, 5.5, 6.5])
>>> loss = ivy.smooth_l1_loss(input, target, beta=1.5, reduction='sum')
ivy.array(0.5)
>>> input = ivy.array([0.8, 1.2, 2.5, 3.7])
>>> target = ivy.array([0.9, 1.0, 2.3, 3.6])
>>> loss = ivy.smooth_l1_loss(input, target, beta=0.5, reduction='none')
ivy.array([0.0133, 0.0250, 0.0056, 0.0025])
>>> input = ivy.array([2.0, 3.0, 5.0, 7.0])
>>> target = ivy.array([2.5, 3.5, 5.5, 6.5])
>>> loss = ivy.smooth_l1_loss(input, target, beta=0.2, reduction='mean')
ivy.array(0.025)
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([1.5, 2.2, 3.7])
>>> y = ivy.native_array([2.1, 1.9, 3.5])
>>> print(ivy.smooth_l1_loss(x, y, beta=0.5))
ivy.array(0.0675)
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.0, 2.0, 3.0]))
>>> y = ivy.Container(a=ivy.array([2.5, 1.8, 3.2]))
>>> print(ivy.smooth_l1_loss(x, y, beta=1.0))
{
a: ivy.array(0.3467)
}
With a mix of :class:`ivy.Array` and :class:`ivy.NativeArray` inputs:
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.native_array([6.0, 2.0, 3.0])
>>> print(ivy.smooth_l1_loss(x, y, beta=0.5))
ivy.array(1.5)
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.Container(a=ivy.array([6.0, 2.0, 3.0]))
>>> print(ivy.smooth_l1_loss(x, y, beta=1.0))
{
a: ivy.array(1.5)
}
Instance Method Examples
~~~~~~~~~~~~~~~~~~~~~~~~
With :class:`ivy.Array` input:
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.array([2.5, 1.8, 3.2])
>>> print(x.smooth_l1_loss(y, beta=1.0))
ivy.array(0.3467)
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.0, 2.0, 3.0]))
>>> y = ivy.Container(a=ivy.array([2.5, 1.8, 3.2]))
>>> print(x.smooth_l1_loss(y, beta=1.0))
{
a: ivy.array(0.3467)
}
"""
if beta < 1e-5:
# if beta == 0, will result in nan gradients when
# the chain rule is applied due to pytorch implementation details
# (the False branch "0.5 * n ** 2 / 0" has an incoming gradient of
# zeros, rather than "no gradient"). To avoid this issue, we define
# small values of beta to be exactly l1 loss.
loss = ivy.abs(input - target)
else:
n = ivy.abs(input - target)
cond = n < beta
loss = ivy.where(cond, 0.5 * n**2 / beta, n - 0.5 * beta)
if reduction == "mean":
return ivy.mean(loss, out=out)
elif reduction == "sum":
return ivy.sum(loss, out=out)
elif reduction == "none":
return ivy.inplace_update(out, loss) if out is not None else loss
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def soft_margin_loss(
input: Union[ivy.Array, ivy.NativeArray],
target: Union[ivy.Array, ivy.NativeArray],
/,
*,
reduction: str = "mean",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the soft-margin hinge loss between predicted scores and true
binary labels.
Parameters
----------
input : array_like
True binary labels, of shape (batch_size,).
target : array_like
Predicted scores, of shape (batch_size,).
reduction : {'mean', 'sum', 'none'}, optional
Type of reduction to apply to the output. Default is 'mean'.
out : array_like, optional
Optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret : array
The soft-margin hinge loss between the predicted scores
and true binary labels.
Examples
--------
>>> input = ivy.array([1, 0, 1, 0])
>>> target = ivy.array([0.8, 0.2, -0.6, 1.5])
>>> ivy.soft_margin_loss(input, target)
ivy.array(0.6987)
>>> input = ivy.array([1, 1, 0, 0])
>>> target = ivy.array([0.8, 0.7, 0.2, 0.1])
>>> ivy.soft_margin_loss(input, target, reduction='sum')
ivy.array(2.1606)
>>> input = ivy.array([1, 1, 0, 0])
>>> target = ivy.array([0.8, 0.7, 0.2, 0.1])
>>> ivy.soft_margin_loss(input, target, reduction='none')
ivy.array([0.3711, 0.4032, 0.6931, 0.6931])
"""
loss = ivy.sum(ivy.log1p(ivy.exp(-input * target))) / input.size
if reduction == "sum":
return ivy.sum(loss, out=out)
elif reduction == "mean":
return ivy.mean(loss, out=out)
else:
return ivy.inplace_update(out, loss) if out is not None else loss
@handle_exceptions
@handle_nestable
@inputs_to_ivy_arrays
@handle_array_function
def kl_div(
input: Union[ivy.Array, ivy.NativeArray],
target: Union[ivy.Array, ivy.NativeArray],
/,
*,
reduction: str = "mean",
log_target=False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the Kullback-Leibler divergence loss between two input tensors
(conventionally, probability distributions).
Parameters
----------
input : array_like
Tensor of arbitrary shape in log-probabilities
target : array_like
Tensor of the same shape as input. See log_target for
the target’s interpretation
reduction : {'mean', 'sum', 'batchmean', 'none'}, optional
Type of reduction to apply to the output. Default is 'mean'.
log_target : bool
A flag indicating whether target is passed in the log space.
It is recommended to pass certain distributions (like softmax)
in the log space to avoid numerical issues caused by explicit log.
Default: False
Returns
-------
ret : array
The Kullback-Leibler divergence loss between the two input tensors.
Examples
--------
>>> input = ivy.array([[0.2, 0.8], [0.5, 0.5]])
>>> target = ivy.array([[0.6, 0.4], [0.3, 0.7]])
>>> ivy.kl_div(input, target)
ivy.array(-0.555969)
>>> input = ivy.array([[0.2, 0.8], [0.5, 0.5]])
>>> target = ivy.array([[0.6, 0.4], [0.3, 0.7]])
>>> ivy.kl_div(input, target, reduction='sum')
ivy.array(-2.223876)
>>> input = ivy.array([[0.2, 0.8], [0.5, 0.5]])
>>> target = ivy.array([[0.6, 0.4], [0.3, 0.7]])
>>> ivy.kl_div(input, target, reduction='batchmean')
ivy.array(-1.111938)
>>> input = ivy.array([0.2, 0.8], [0.5, 0.5])
>>> target = ivy.array([0.6, 0.4], [0.3, 0.7])
>>> ivy.kl_div(input, target, reduction='none')
ivy.array([[-0.42649534, -0.68651628],
[-0.51119184, -0.59967244]])
"""
if not log_target: # default
loss_pointwise = target * (ivy.log(target) - input)
else:
loss_pointwise = ivy.exp(target) * (target - input)
if reduction == "mean": # default
loss = ivy.mean(loss_pointwise)
elif reduction == "batchmean": # mathematically correct
loss = ivy.sum(loss_pointwise) / input.shape[0]
elif reduction == "sum":
loss = ivy.sum(loss_pointwise)
else: # reduction == "none"
loss = loss_pointwise
return ivy.inplace_update(out, loss) if out is not None else loss
kl_div.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_out_argument",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
def poisson_nll_loss(
input: Union[ivy.Array, ivy.NativeArray],
target: Union[ivy.Array, ivy.NativeArray],
*,
log_input: bool = True,
full: bool = False,
eps: float = 1e-8,
reduction: str = "mean",
) -> ivy.Array:
r"""Compute the Poisson Negative Log Likelihood Loss.
This function calculates the negative log likelihood loss
between the `input` and `target`under the assumption that
the target follows a Poisson distribution. By default, the loss
is not the exact loss, but the loss minus a constant term [log(z!)].
This omission does not affect optimization but can be significant for
relative loss comparisons. The Stirling's Approximation is used to
approximate the log factorial term when `full` is set to True.
Parameters
----------
input
Expectation of the underlying Poisson distribution.
target
Random sample from the Poisson distribution described by the input.
log_input
If `True`, the loss is computed as
:math:`exp(input) - target * input`. If `False`, the loss is computed as
:math:`input - target * log(input + eps)`. Default is `True`.
full
Whether to compute the full loss, i.e., to add the Stirling approximation term
:math:`target * log(target) - target + 0.5 * log(2 * pi * target)`.
Default is `False`.
eps
Small value to prevent evaluation of `log(0)` when `log_input` is `False`.
Default is 1e-8.
reduction
Specifies the reduction applied to the output.
Options are 'none', 'mean', or 'sum'.
'none': no reduction will be applied.
'mean': the output will be averaged.
'sum': the output will be summed.
Default is 'mean'.
Returns
-------
ret
An array of the same shape as `input` representing
the Poisson Negative Log Likelihood Loss.
Raises
------
ValueError
If the `input` and `target` tensors do not have the same shape.
Examples
--------
>>> input_tensor = ivy.array([1, 2, 3, 4], dtype=ivy.float64)
>>> target_tensor = ivy.array([2, 2, 2, 2], dtype=ivy.float64)
>>> loss = ivy.poisson_nll_loss(input_tensor, target_tensor, log_input=False)
>>> print(loss)
ivy.array(0.91097307)
"""
return ivy.current_backend().poisson_nll_loss(
input,
target,
log_input=log_input,
full=full,
eps=eps,
reduction=reduction,
)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
def hinge_embedding_loss(
input: Union[ivy.Array, ivy.NativeArray],
target: Union[ivy.Array, ivy.NativeArray],
*,
margin: float = 1.0,
reduction: str = "mean",
) -> ivy.Array:
r"""Measures loss from input `x` and label `y` with values 1 or -1. It
evaluates if two inputs are similar or not, often used for embedding or
semi-supervised learning.
Loss for the `n`-th sample:
.. math::
l_n = \begin{cases}
x_n, & \text{if}\; y_n = 1,\\
\max \{0, margin - x_n\}, & \text{if}\; y_n = -1,
\end{cases}
Total loss:
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
where :math:`L = \{l_1,\dots,l_N\}^\top` .
Parameters
----------
input
Input tensor with dtype float.
The shape is [N, \*], where N is batch size and `\*` represents
any number of additional dimensions.
label
Label tensor containing 1 or -1 with dtype float32 or float64.
Its shape matches that of the input.
margin
Sets the hyperparameter margin. Determines the necessary input size
for hinge_embedding_loss calculations when label is -1. Inputs smaller
than the margin are minimized with hinge_embedding_loss.
Default is 1.0.
reduction
Specifies how to aggregate the loss across the batch. Options are:
- ``'none'``: Returns the unreduced loss.
- ``'mean'``: Returns the mean loss.
- ``'sum'``: Returns the summed loss.
Default is ``'mean'``.
Shape
-----
- Input: :math:`(*)` where :math:`*` means, any number of dimensions. \
The sum operation operates over all the elements.
- Target: :math:`(*)`, same shape as the input
- Output: scalar. If :attr:`reduction` is ``'none'``,
then same shape as the input
Returns
-------
ret
Hinge embedding loss calculated from the input and label,
shaped based on the reduction method.
Examples
--------
>>> input_tensor = ivy.array([1, 2, 3, 4], dtype=ivy.float64)
>>> target_tensor = ivy.array([1, 1, 1, 1], dtype=ivy.float64)
>>> loss = ivy.hinge_embedding_loss(input_tensor, target_tensor, reduction="none")
>>> loss
ivy.array([1., 2., 3., 4.])
>>> input_tensor = ivy.array([21, 22], dtype=ivy.float32)
>>> target_tensor = ivy.array([-1, 1], dtype=ivy.float32)
>>> loss = ivy.hinge_embedding_loss(input_tensor,target_tensor,
... margin=2.0, reduction="sum")
>>> loss
ivy.array(22.)
"""
return ivy.current_backend().hinge_embedding_loss(
input,
target,
margin=margin,
reduction=reduction,
)
| ivy/ivy/functional/ivy/experimental/losses.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/losses.py",
"repo_id": "ivy",
"token_count": 9104
} | 48 |
"""Collection of Ivy loss functions."""
# local
import ivy
from typing import Optional, Union
from ivy.func_wrapper import (
handle_array_function,
handle_nestable,
handle_array_like_without_promotion,
inputs_to_ivy_arrays,
)
from ivy.utils.exceptions import handle_exceptions
# Helpers #
# ------- #
def _reduce_loss(red, loss, axis, out):
if red == "sum":
return ivy.negative(ivy.sum(loss, axis=axis), out=out)
elif red == "mean":
return ivy.negative(ivy.mean(loss, axis=axis), out=out)
else:
return ivy.negative(loss, out=out)
# Extra #
# ------#
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def cross_entropy(
true: Union[ivy.Array, ivy.NativeArray],
pred: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: int = -1,
epsilon: float = 1e-7,
reduction: str = "mean",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute cross-entropy between predicted and true discrete distributions.
Parameters
----------
true
input array containing true labels.
pred
input array containing the predicted labels.
axis
the axis along which to compute the cross-entropy. If axis is ``-1``,
the cross-entropy will be computed along the last dimension. Default: ``-1``.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when calculating
the loss. If epsilon is ``0``, no smoothing will be applied. Default: ``1e-7``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The cross-entropy loss between the given distributions
Examples
--------
>>> x = ivy.array([0, 0, 1, 0])
>>> y = ivy.array([0.25, 0.25, 0.25, 0.25])
>>> print(ivy.cross_entropy(x, y))
ivy.array(0.34657359)
>>> z = ivy.array([0.1, 0.1, 0.7, 0.1])
>>> print(ivy.cross_entropy(x, z))
ivy.array(0.08916873)
"""
ivy.utils.assertions.check_elem_in_list(reduction, ["none", "sum", "mean"])
pred = ivy.clip(pred, epsilon, 1 - epsilon)
log_pred = ivy.log(pred)
return _reduce_loss(reduction, log_pred * true, axis, out)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def binary_cross_entropy(
true: Union[ivy.Array, ivy.NativeArray],
pred: Union[ivy.Array, ivy.NativeArray],
/,
*,
from_logits: bool = False,
epsilon: float = 0.0,
reduction: str = "mean",
pos_weight: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
axis: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the binary cross entropy loss.
Parameters
----------
true
input array containing true labels.
pred
input array containing Predicted labels.
from_logits
Whether `pred` is expected to be a logits tensor. By
default, we assume that `pred` encodes a probability distribution.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when calculating the
loss. If epsilon is ``0``, no smoothing will be applied. Default: ``0``.
reduction
``'none'``: No reduction will be applied to the output.
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``.
pos_weight
a weight for positive examples. Must be an array with length equal to the number
of classes.
axis
Axis along which to compute crossentropy.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The binary cross entropy between the given distributions.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0, 1, 0, 0])
>>> y = ivy.array([0.2, 0.8, 0.3, 0.8])
>>> z = ivy.binary_cross_entropy(x, y)
>>> print(z)
ivy.array(0.60309976)
>>> x = ivy.array([[0, 1, 1, 0]])
>>> y = ivy.array([[2.6, 6.2, 3.7, 5.3]])
>>> z = ivy.binary_cross_entropy(x, y, reduction='mean')
>>> print(z)
ivy.array(7.6666193)
>>> x = ivy.array([[0, 1, 1, 0]])
>>> y = ivy.array([[2.6, 6.2, 3.7, 5.3]])
>>> pos_weight = ivy.array([1, 2, 3, 4])
>>> z = ivy.binary_cross_entropy(x, y, pos_weight=pos_weight, from_logits=True)
ivy.array(2.01348412)
>>> x = ivy.array([[0, 1, 1, 0]])
>>> y = ivy.array([[2.6, 6.2, 3.7, 5.3]])
>>> pos_weight = ivy.array([1, 2, 3, 4])
>>> z = ivy.binary_cross_entropy(x, y, pos_weight=pos_weight, from_logits=True, reduction='sum', axis=1)
>>> print(z)
ivy.array([8.05393649])
>>> x = ivy.array([[0, 1, 1, 0]])
>>> y = ivy.array([[2.6, 6.2, 3.7, 5.3]])
>>> z = ivy.binary_cross_entropy(x, y, reduction='none', epsilon=0.5)
>>> print(z)
ivy.array([[11.49992943, 3.83330965, 3.83330965, 11.49992943]])
>>> x = ivy.array([[0, 1, 0, 0]])
>>> y = ivy.array([[0.6, 0.2, 0.7, 0.3]])
>>> z = ivy.binary_cross_entropy(x, y, epsilon=1e-3)
>>> print(z)
ivy.array(1.02136981)
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([0, 1, 0, 1])
>>> y = ivy.native_array([0.2, 0.7, 0.2, 0.6])
>>> z = ivy.binary_cross_entropy(x, y)
>>> print(z)
ivy.array(0.32844672)
With a mix of :class:`ivy.Array` and :class:`ivy.NativeArray` inputs:
>>> x = ivy.array([0, 0, 1, 1])
>>> y = ivy.native_array([0.1, 0.2, 0.8, 0.6])
>>> z = ivy.binary_cross_entropy(x, y)
>>> print(z)
ivy.array(0.26561815)
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 0, 0]),b=ivy.array([0, 0, 1]))
>>> y = ivy.Container(a=ivy.array([0.6, 0.2, 0.3]),b=ivy.array([0.8, 0.2, 0.2]))
>>> z = ivy.binary_cross_entropy(x, y)
>>> print(z)
{
a: ivy.array(0.36354783),
b: ivy.array(1.14733934)
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.array([1 , 1, 0])
>>> y = ivy.Container(a=ivy.array([0.7, 0.8, 0.2]))
>>> z = ivy.binary_cross_entropy(x, y)
>>> print(z)
{
a: ivy.array(0.26765382)
}
Instance Method Examples
~~~~~~~~~~~~~~~~~~~~~~~~
Using :class:`ivy.Array` instance method:
>>> x = ivy.array([1, 0, 0, 0])
>>> y = ivy.array([0.8, 0.2, 0.2, 0.2])
>>> z = ivy.binary_cross_entropy(x, y)
>>> print(z)
ivy.array(0.22314337)
""" # noqa: E501
ivy.utils.assertions.check_elem_in_list(reduction, ["none", "sum", "mean"])
if not (0.0 <= epsilon <= 1.0):
raise ValueError("epsilon should be a float in [0, 1]")
if not from_logits and pos_weight is not None:
raise ValueError("pos_weight is only allowed when from_logits is set to True")
true = true.astype(pred.dtype)
epsilon = ivy.asarray(epsilon, dtype=pred.dtype)
true = true * (1.0 - epsilon) + 0.5 * epsilon
if from_logits:
if pos_weight is not None:
num_classes = pred.shape[0] if len(pred.shape) == 1 else pred.shape[1]
if pos_weight.shape[0] != num_classes:
raise ValueError(
"pos_weight must have the same size as the number of classes in"
" pred at non-singleton dimension 1"
)
epsilon_ = 1e-7
pred = ivy.sigmoid(pred)
pred = ivy.clip(pred, epsilon_, 1 - epsilon_)
loss = -(
true * -ivy.log(pred) * pos_weight + (1 - true) * -ivy.log(1 - pred)
)
else:
zeros = ivy.zeros_like(pred, dtype=pred.dtype)
cond = pred >= zeros
relu_logits = ivy.where(cond, pred, zeros)
neg_abs_logits = ivy.where(cond, -pred, pred)
loss = (
ivy.add(relu_logits - pred * true, ivy.log1p(ivy.exp(neg_abs_logits)))
* -1
)
else:
epsilon_ = 1e-7
pred = ivy.clip(pred, epsilon_, 1 - epsilon_)
loss = true * ivy.log(pred + epsilon_) + (1 - true) * ivy.log(
1 - pred + epsilon_
)
return _reduce_loss(reduction, loss, axis, out)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def sparse_cross_entropy(
true: Union[ivy.Array, ivy.NativeArray],
pred: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: int = -1,
epsilon: float = 1e-7,
reduction: str = "mean",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute sparse cross entropy between logits and labels.
Parameters
----------
true
input array containing the true labels as logits.
pred
input array containing the predicted labels as logits.
axis
the axis along which to compute the cross-entropy. If axis is ``-1``, the
cross-entropy will be computed along the last dimension. Default: ``-1``.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when calculating the
loss. If epsilon is ``0``, no smoothing will be applied. Default: ``1e-7``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The sparse cross-entropy loss between the given distributions
Examples
--------
With :class:`ivy.Array` input:
>> x = ivy.array([2])
>> y = ivy.array([0.1, 0.1, 0.7, 0.1])
>> print(ivy.sparse_cross_entropy(x, y))
ivy.array([0.08916873])
>>> x = ivy.array([3])
>>> y = ivy.array([0.1, 0.1, 0.7, 0.1])
>>> print(ivy.cross_entropy(x, y))
ivy.array(5.44832274)
>>> x = ivy.array([2,3])
>>> y = ivy.array([0.1, 0.1])
>>> print(ivy.cross_entropy(x, y))
ivy.array(5.75646281)
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([4])
>>> y = ivy.native_array([0.1, 0.2, 0.1, 0.1, 0.5])
>>> print(ivy.sparse_cross_entropy(x, y))
ivy.array([0.13862944])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([4]))
>>> y = ivy.Container(a=ivy.array([0.1, 0.2, 0.1, 0.1, 0.5]))
>>> print(ivy.sparse_cross_entropy(x, y))
{
a: ivy.array([0.13862944])
}
With a mix of :class:`ivy.Array` and :class:`ivy.NativeArray` inputs:
>>> x = ivy.array([0])
>>> y = ivy.native_array([0.1, 0.2, 0.6, 0.1])
>>> print(ivy.sparse_cross_entropy(x,y))
ivy.array([0.57564628])
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.array([0])
>>> y = ivy.Container(a=ivy.array([0.1, 0.2, 0.6, 0.1]))
>>> print(ivy.sparse_cross_entropy(x,y))
{
a: ivy.array([0.57564628])
}
Instance Method Examples
~~~~~~~~~~~~~~~~~~~~~~~~
With :class:`ivy.Array` input:
>>> x = ivy.array([2])
>>> y = ivy.array([0.1, 0.1, 0.7, 0.1])
>>> print(x.sparse_cross_entropy(y))
ivy.array([0.08916873])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([2]))
>>> y = ivy.Container(a=ivy.array([0.1, 0.1, 0.7, 0.1]))
>>> print(x.sparse_cross_entropy(y))
{
a: ivy.array([0.08916873])
}
"""
ivy.utils.assertions.check_elem_in_list(reduction, ["none", "sum", "mean"])
true = ivy.one_hot(true, pred.shape[axis])
return ivy.cross_entropy(
true, pred, axis=axis, epsilon=epsilon, reduction=reduction, out=out
)
| ivy/ivy/functional/ivy/losses.py/0 | {
"file_path": "ivy/ivy/functional/ivy/losses.py",
"repo_id": "ivy",
"token_count": 5359
} | 49 |
"""Collection of Ivy neural network layers as stateful classes."""
# flake8: noqa
# local
import ivy
from ivy.func_wrapper import handle_nestable
from ivy.stateful.initializers import GlorotUniform, Zeros
from ivy.stateful.module import Module
# ToDo: update docstrings and typehints according to ivy\layers
# Linear #
# -------#
class Linear(Module):
def __init__(
self,
input_channels,
output_channels,
/,
*,
weight_initializer=GlorotUniform(),
bias_initializer=Zeros(),
with_bias=True,
device=None,
v=None,
dtype=None,
):
"""Linear layer, also referred to as dense or fully connected. The
layer receives tensors with input_channels last dimension and returns a
new tensor with output_channels last dimension, following matrix
multiplication with the weight matrix and addition with the bias
vector.
Parameters
----------
input_channels
Number of input channels for the layer.
output_channels
Number of output channels for the layer.
weight_initializer
Initializer for the weights. Default is GlorotUniform.
bias_initializer
Initializer for the bias. Default is Zeros.
with_bias
Whether or not to include a bias term, default is ``True``.
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
v
the variables for the linear layer, as a container, constructed internally
by default.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
self._input_channels = input_channels
self._output_channels = output_channels
self._w_shape = (output_channels, input_channels)
self._b_shape = (output_channels,)
self._w_init = weight_initializer
self._b_init = bias_initializer
self._with_bias = with_bias
Module.__init__(self, device=device, v=v, dtype=dtype)
def _create_variables(self, device=None, dtype=None):
"""Create internal variables for the layer.
Parameters
----------
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
device = ivy.default(device, self.device)
dtype = ivy.default(dtype, self.dtype)
v = {
"w": self._w_init.create_variables(
self._w_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
)
}
if self._with_bias:
v = dict(
**v,
b=self._b_init.create_variables(
self._b_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
),
)
return v
def _forward(self, x):
"""Perform forward pass of the Linear layer.
Parameters
----------
x
Inputs to process *[batch_shape, in]*.
Returns
-------
ret
The outputs following the linear operation and bias addition
*[batch_shape, out]*
"""
return ivy.linear(x, self.v.w, bias=self.v.b if self._with_bias else None)
def _extra_repr(self) -> str:
return (
f"in_features={self._input_channels}, out_features={self._output_channels},"
f" with_bias={self._with_bias is True}"
)
# Dropout #
# --------#
class Dropout(Module):
def __init__(
self,
prob,
scale: bool = True,
dtype=None,
training: bool = True,
):
"""Dropout layer. The layer randomly zeroes some of the elements of the
input tensor with probability p using samples from a Bernoull
distribution.
Parameters
----------
prob
The probability of zeroing out each array element.
scale
Whether to scale the output by 1/(1-prob), default is ``True``.
dtype
the desired data type of the internal variables to be created.
Default is ``None``.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
"""
self._prob = prob
self._scale = scale
Module.__init__(self, device=None, v=None, dtype=dtype, training=training)
def _create_variables(self, device, dtype=None):
"""Create internal variables for the layer.
Parameters
----------
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
dtype
the desired data type of the internal variables to be created .
Default is ``None``.
"""
return {}
def _forward(self, inputs, dtype=None):
"""Perform forward pass of the Linear layer.
Parameters
----------
inputs
Inputs to process *[batch_shape, in]*.
dtype
the desired data type of the internal variables to be created .
Default is ``None``.
Returns
-------
ret
The outputs following the linear operation and bias addition
*[batch_shape, out]*
"""
return ivy.dropout(
inputs, self._prob, scale=self._scale, training=self.training, dtype=dtype
)
def _extra_repr(self) -> str:
s = "prob={prob}"
if not self._scale:
s += ", scale={scale}"
return s.format(prob=self._prob, scale=self._scale)
# Attention #
# ----------#
class MultiHeadAttention(Module):
def __init__(
self,
embed_dim=None,
/,
*,
key_dim=None,
value_dim=None,
num_heads=8,
head_dim=None,
dropout_rate=0.0,
use_proj_bias=True,
attention_axes=None,
scale=None,
device=None,
v=None,
build_mode="on_init",
dtype=None,
training=True,
):
"""Multi Head Attention layer.
Parameters
----------
embed_dim
The expected feature size in the input and output.
key_dim
The input feature size for key. If None, assumed equal to `embed_dim`.
Default None.
value_dim
The input feature size for value. If None, assumed equal to `embed_dim`.
Default None.
num_heads:
Number of parallel attention heads. Note that ``embed_dim`` will be split
across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
Default is 8.
head_dim
Size of each attention head for query and key.
Note that only two out of (``embed_dim``, ``num_heads``, and ``head_dim``) should be provided
Default is None.
dropout_rate
The dropout probability used on attention weights to drop some attention targets. 0 for no dropout.
Default is 0.
use_proj_bias
If specified, adds bias to input / output projection layers.
Default is True.
attention_axes
axes over which the attention is applied. `None` means attention over all axes, but batch, heads, and features.
Default is None.
scale
The value by which to scale the query-key similarity measure.
Default is head_dim^-0.5
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu' etc.
Default is cpu.
v
the variables for the attention layer, as a container,
constructed internally by default.
build_mode
How the Module is built, either on initialization (now),
explicitly by the user by calling
build(), or the first time the __call__ method is run.
Default is on initialization.
dtype
the desired data type of the internal variables to be created if not provided.
Default is ``None``.
training
If True, dropout is used, otherwise dropout is not activated.
"""
# proj
if num_heads and head_dim:
self._inner_dim = num_heads * head_dim
else:
self._inner_dim = embed_dim
self._embed_dim = embed_dim if embed_dim else num_heads * head_dim
self._key_dim = key_dim if key_dim else self._embed_dim
self._value_dim = value_dim if value_dim else self._embed_dim
self._num_heads = num_heads if num_heads else embed_dim // head_dim
self._head_dim = head_dim if head_dim else embed_dim // num_heads
self._dropout_rate = dropout_rate
self._use_proj_bias = use_proj_bias
self._attention_axes = attention_axes
self._scale = ivy.default(scale, self._head_dim**-0.5)
self._qkv_same_embed_dim = (
self._key_dim == self._embed_dim and self._value_dim == self._embed_dim
)
ivy.Module.__init__(
self,
device=device,
v=v,
build_mode=build_mode,
with_partial_v=True,
dtype=dtype,
training=training,
)
def _create_variables(self, device=None, dtype=None):
"""
Parameters
----------
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
device = ivy.default(device, self.device)
dtype = ivy.default(dtype, self.dtype)
v = dict(
out_proj_weights=GlorotUniform().create_variables(
(self._embed_dim, self._inner_dim),
device,
self._embed_dim,
self._inner_dim,
dtype=dtype,
),
)
if self._qkv_same_embed_dim:
v = dict(
**v,
in_proj_weights=GlorotUniform().create_variables(
(self._inner_dim * 3, self._embed_dim),
device,
self._inner_dim * 3,
self._embed_dim,
dtype=dtype,
),
)
else:
v = dict(
**v,
q_proj_weights=GlorotUniform().create_variables(
(self._inner_dim, self._embed_dim),
device,
self._inner_dim,
self._embed_dim,
dtype=dtype,
),
k_proj_weights=GlorotUniform().create_variables(
(self._inner_dim, self._key_dim),
device,
self._inner_dim,
self._key_dim,
dtype=dtype,
),
v_proj_weights=GlorotUniform().create_variables(
(self._inner_dim, self._value_dim),
device,
self._inner_dim,
self._value_dim,
dtype=dtype,
),
)
if self._use_proj_bias:
v = dict(
**v,
in_proj_bias=Zeros().create_variables(
self._inner_dim * 3,
device,
dtype=dtype,
),
out_proj_bias=Zeros().create_variables(
self._embed_dim,
device,
dtype=dtype,
),
)
return v
def _forward(
self,
query,
key=None,
value=None,
/,
*,
attention_mask=None,
is_causal=False,
return_attention_weights=False,
average_attention_weights=True,
):
"""Perform forward pass of the MultiHeadAttention layer.
Parameters
----------
query
query embeddings *[batch_shape,num_queries,query_dim]*.
key
key embeddings *[batch_shape,num_queries,key_dim]*.
value
value embeddings *[batch_shape,num_queries,value_dim]*.
attention_mask
The mask to apply to the query-key values. Default is ``None``.
*[batch_shape,num_queries,num_keys]*.
is_causal
If True, Uses a causal attention mask and ignores provided attention_mask.
return_attention_weights
If True, returns attention_weights alongside the output
as a tuple (output, attenion_weights). Defaults to `False`.
average_attention_weights
If true, indicates that the returned ``attention_weights`` should be averaged across
heads. Otherwise, ``attention_weights`` are provided separately per head. Note that this flag only has an
effect when ``return_attention_weights=True``. Default: ``True`` (i.e. average weights across heads)
Returns
-------
ret
The output following application of multi-head attention.
*[batch_shape,num_queries,out_feat_dim]* if input is batched
otherwise *[num_queries, out_feat_dim]
"""
return ivy.multi_head_attention(
query,
key=key,
value=value,
num_heads=self._num_heads,
scale=self._scale,
attention_mask=attention_mask,
in_proj_weights=(
self.v.in_proj_weights if self._qkv_same_embed_dim else None
),
q_proj_weights=(
None if self._qkv_same_embed_dim else self.v.q_proj_weights
),
k_proj_weights=(
None if self._qkv_same_embed_dim else self.v.k_proj_weights
),
v_proj_weights=(
None if self._qkv_same_embed_dim else self.v.v_proj_weights
),
out_proj_weights=self.v.out_proj_weights,
in_proj_bias=self.v.in_proj_bias if self._use_proj_bias else None,
out_proj_bias=self.v.out_proj_bias if self._use_proj_bias else None,
is_causal=is_causal,
return_attention_weights=return_attention_weights,
average_attention_weights=average_attention_weights,
dropout=self._dropout_rate,
training=self.training,
)
def _extra_repr(self) -> str:
return (
f"embed_dim={self._embed_dim}, key_dim={self._key_dim}, "
f"value_dim={self._value_dim}, num_heads={self.num_heads}, "
f"head_dim={self._head_dim}, dropout_rate={self.dropout_rate}, "
f"use_proj_bias={self._use_proj_bias}, "
f"attention_axes={self._attention_axes}, scale={self._scale}"
)
# Convolutions #
# -------------#
class Conv1D(Module):
def __init__(
self,
input_channels,
output_channels,
filter_size,
strides,
padding,
/,
*,
weight_initializer=GlorotUniform(),
bias_initializer=Zeros(),
with_bias=True,
data_format="NWC",
dilations=1,
device=None,
v=None,
dtype=None,
):
"""1D convolutional layer.
Parameters
----------
input_channels
Number of input channels for the layer.
output_channels
Number of output channels for the layer.
filter_size
Size of the convolutional filter.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or
list indicating the per-dimension paddings.
weight_initializer
Initializer for the weights. Default is GlorotUniform.
bias_initializer
Initializer for the bias. Default is Zeros.
with_bias
Whether or not to include a bias term, default is ``True``.
data_format
NWC" or "NCW". Defaults to "NWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
v
the variables for each of the conv layer, as a container,
constructed internally by default.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
self._input_channels = input_channels
self._output_channels = output_channels
self._filter_size = filter_size
self._strides = strides
self._padding = padding
self._w_shape = (filter_size, input_channels, output_channels)
self._b_shape = (
(1, 1, output_channels) if data_format == "NWC" else (1, output_channels, 1)
)
self._w_init = weight_initializer
self._b_init = bias_initializer
self._with_bias = with_bias
self._data_format = data_format
self._dilations = dilations
Module.__init__(self, device=device, v=v, dtype=dtype)
def _create_variables(self, device=None, dtype=None):
"""Create internal variables for the layer.
Parameters
----------
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
dtype
the desired data type of the internal variables to be created.
Default is ``None``.
"""
device = ivy.default(device, self.device)
dtype = ivy.default(dtype, self.dtype)
v = {
"w": self._w_init.create_variables(
self._w_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
)
}
if self._with_bias:
v = dict(
**v,
b=self._b_init.create_variables(
self._b_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
),
)
return v
def _forward(self, inputs):
"""Perform forward pass of the Conv1D layer.
Parameters
----------
inputs
Inputs to process *[batch_size,w,d_in]*
Returns
-------
ret
The outputs following the conv1d layer *[batch_size,new_w,d_out]*
"""
return ivy.conv1d(
inputs,
self.v.w,
self._strides,
self._padding,
data_format=self._data_format,
dilations=self._dilations,
) + (self.v.b if self._with_bias else 0)
def _extra_repr(self):
s = (
"{_input_channels}, {_output_channels}, filter_size={_filter_size},"
" strides={_strides}, padding={_padding}"
)
if self._dilations not in [1, (1,)]:
s += ", dilations={_dilations}"
if self._with_bias is not True:
s += ", with_bias=False"
if self._data_format != "NWC":
s += ", data_format={_data_format}"
return s.format(**self.__dict__)
class Conv1DTranspose(Module):
def __init__(
self,
input_channels,
output_channels,
filter_size,
strides,
padding,
/,
*,
weight_initializer=GlorotUniform(),
bias_initializer=Zeros(),
with_bias=True,
output_shape=None,
data_format="NWC",
dilations=1,
device=None,
v=None,
dtype=None,
):
"""1D transpose convolutional layer.
Parameters
----------
input_channels
Number of input channels for the layer.
output_channels
Number of output channels for the layer.
filter_size
Size of the convolutional filter.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or
list indicating the per-dimension paddings.
weight_initializer
Initializer for the weights. Default is GlorotUniform.
bias_initializer
Initializer for the bias. Default is Zeros.
with_bias
Whether or not to include a bias term, default is ``True``.
output_shape
Shape of the output (Default value = None)
data_format
NWC" or "NCW". Defaults to "NWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
v
the variables for each of the conv layer, as a container,
constructed internally by default.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
self._input_channels = input_channels
self._output_channels = output_channels
self._filter_size = filter_size
self._strides = strides
self._padding = padding
self._w_shape = (filter_size, output_channels, input_channels)
self._b_shape = (
(1, 1, output_channels) if data_format == "NWC" else (1, output_channels, 1)
)
self._w_init = weight_initializer
self._b_init = bias_initializer
self._with_bias = with_bias
self._output_shape = output_shape
self._data_format = data_format
self._dilations = dilations
Module.__init__(self, device=device, v=v, dtype=dtype)
def _create_variables(self, device, dtype=None):
"""Create internal variables for the layer.
Parameters
----------
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
device = ivy.default(device, self.device)
dtype = ivy.default(dtype, self.dtype)
v = {
"w": self._w_init.create_variables(
self._w_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
)
}
if self._with_bias:
v = dict(
**v,
b=self._b_init.create_variables(
self._b_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
),
)
return v
def _forward(self, inputs):
"""Perform forward pass of the Conv1DTranspose layer.
Parameters
----------
inputs
Inputs to process *[batch_size,w,d_in]*
Returns
-------
ret
The outputs following the conv1d layer *[batch_size,new_w,d_out]*
"""
return ivy.conv1d_transpose(
inputs,
self.v.w,
self._strides,
self._padding,
output_shape=self._output_shape,
data_format=self._data_format,
dilations=self._dilations,
) + (self.v.b if self._with_bias else 0)
def _extra_repr(self):
s = (
"{_input_channels}, {_output_channels}, filter_size={_filter_size},"
" strides={_strides}, padding={_padding}"
)
if self._dilations not in [1, (1,)]:
s += ", dilations={_dilations}"
if self._with_bias is not True:
s += ", with_bias=False"
if self._output_shape is not None:
s += ", output_shape={_output_shape}"
if self._data_format != "NWC":
s += ", data_format={_data_format}"
return s.format(**self.__dict__)
class Conv2D(Module):
def __init__(
self,
input_channels,
output_channels,
filter_shape,
strides,
padding,
/,
*,
weight_initializer=GlorotUniform(),
bias_initializer=Zeros(),
with_bias=True,
data_format="NHWC",
dilations=1,
device=None,
v=None,
dtype=None,
):
"""2D convolutional layer.
Parameters
----------
input_channels
Number of input channels for the layer.
output_channels
Number of output channels for the layer.
filter_shape
Shape of the convolutional filter.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or
list indicating the per-dimension paddings.
weight_initializer
Initializer for the weights. Default is GlorotUniform.
bias_initializer
Initializer for the bias. Default is Zeros.
with_bias
Whether or not to include a bias term, default is ``True``.
data_format
NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
v
the variables for each of the conv layer, as a container,
constructed internally by default.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
self._input_channels = input_channels
self._output_channels = output_channels
self._filter_shape = filter_shape
self._strides = strides
self._padding = padding
self._w_shape = filter_shape + [input_channels, output_channels]
self._b_shape = (
(1, 1, 1, output_channels)
if data_format == "NHWC"
else (1, output_channels, 1, 1)
)
self._w_init = weight_initializer
self._b_init = bias_initializer
self._with_bias = with_bias
self._data_format = data_format
self._dilations = dilations
Module.__init__(self, device=device, v=v, dtype=dtype)
def _create_variables(self, device, dtype=None):
"""Create internal variables for the layer.
Parameters
----------
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
dtype
the desired data type of the internal variables to be created.
Default is ``None``.
"""
device = ivy.default(device, self.device)
dtype = ivy.default(dtype, self.dtype)
v = {
"w": self._w_init.create_variables(
self._w_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
)
}
if self._with_bias:
v = dict(
**v,
b=self._b_init.create_variables(
self._b_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
),
)
return v
def _forward(self, inputs):
"""Perform forward pass of the Conv2D layer.
Parameters
----------
inputs
Inputs to process *[batch_size,h,w,d_in]*.
Returns
-------
ret
The outputs following the conv1d layer *[batch_size,new_h,new_w,d_out]*
"""
return ivy.conv2d(
inputs,
self.v.w,
self._strides,
self._padding,
data_format=self._data_format,
dilations=self._dilations,
) + (self.v.b if self._with_bias else 0)
def _extra_repr(self):
s = (
"{_input_channels}, {_output_channels}, filter_shape={_filter_shape},"
" strides={_strides}, padding={_padding}"
)
if self._dilations not in [1, (1, 1)]:
s += ", dilations={_dilations}"
if self._with_bias is not True:
s += ", with_bias=False"
if self._data_format != "NHWC":
s += ", data_format={_data_format}"
return s.format(**self.__dict__)
class Conv2DTranspose(Module):
def __init__(
self,
input_channels,
output_channels,
filter_shape,
strides,
padding,
/,
*,
weight_initializer=GlorotUniform(),
bias_initializer=Zeros(),
with_bias=True,
output_shape=None,
data_format="NHWC",
dilations=1,
device=None,
v=None,
dtype=None,
):
"""2D convolutional transpose layer.
Parameters
----------
input_channels
Number of input channels for the layer.
output_channels
Number of output channels for the layer.
filter_shape
Shape of the convolutional filter.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or
list indicating the per-dimension paddings.
weight_initializer
Initializer for the weights. Default is GlorotUniform.
bias_initializer
Initializer for the bias. Default is Zeros.
with_bias
Whether or not to include a bias term, default is ``True``.
output_shape
Shape of the output (Default value = None)
data_format
NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
v
the variables for each of the conv layer, as a container,
constructed internally by default.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
self._input_channels = input_channels
self._output_channels = output_channels
self._filter_shape = filter_shape
self._strides = strides
self._padding = padding
self._w_shape = filter_shape + [output_channels, input_channels]
self._b_shape = (
(1, 1, 1, output_channels)
if data_format == "NHWC"
else (1, output_channels, 1, 1)
)
self._w_init = weight_initializer
self._with_bias = with_bias
self._b_init = bias_initializer
self._output_shape = output_shape
self._data_format = data_format
self._dilations = dilations
Module.__init__(self, device=device, v=v, dtype=dtype)
def _create_variables(self, device, dtype=None):
"""Create internal variables for the layer.
Parameters
----------
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
device = ivy.default(device, self.device)
dtype = ivy.default(dtype, self.dtype)
v = {
"w": self._w_init.create_variables(
self._w_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
)
}
if self._with_bias:
v = dict(
**v,
b=self._b_init.create_variables(
self._b_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
),
)
return v
def _forward(self, inputs):
"""Perform forward pass of the Conv2DTranspose layer.
Parameters
----------
inputs
Inputs to process *[batch_size,h,w,d_in]*.
Returns
-------
ret
The outputs following the conv1d layer *[batch_size,new_h,new_w,d_out]*
"""
return ivy.conv2d_transpose(
inputs,
self.v.w,
self._strides,
self._padding,
output_shape=self._output_shape,
data_format=self._data_format,
dilations=self._dilations,
) + (self.v.b if self._with_bias else 0)
def _extra_repr(self):
s = (
"{_input_channels}, {_output_channels}, filter_shape={_filter_shape},"
" strides={_strides}, padding={_padding}"
)
if self._dilations not in [1, (1, 1)]:
s += ", dilations={_dilations}"
if self._with_bias is not True:
s += ", with_bias=False"
if self._output_shape is not None:
s += ", output_shape={_output_shape}"
if self._data_format != "NHWC":
s += ", data_format={_data_format}"
return s.format(**self.__dict__)
class DepthwiseConv2D(Module):
def __init__(
self,
num_channels,
filter_shape,
strides,
padding,
/,
*,
weight_initializer=GlorotUniform(),
bias_initializer=Zeros(),
with_bias=True,
data_format="NHWC",
dilations=1,
device=None,
v=None,
dtype=None,
):
"""Depthwise 2D convolutional layer.
Parameters
----------
num_channels
Number of input channels for the layer.
filter_shape
Shape of the convolutional filter.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or
list indicating the per-dimension paddings.
weight_initializer
Initializer for the weights. Default is GlorotUniform.
bias_initializer
Initializer for the bias. Default is Zeros.
with_bias
Whether or not to include a bias term, default is ``True``.
data_format
NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
v
the variables for each of the conv layer, as a container,
constructed internally by default.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
self._num_channels = num_channels
self._filter_shape = filter_shape
self._strides = strides
self._padding = padding
self._w_shape = filter_shape + [num_channels]
self._b_shape = (
(1, 1, 1, num_channels)
if data_format == "NHWC"
else (1, num_channels, 1, 1)
)
self._w_init = weight_initializer
self._b_init = bias_initializer
self._with_bias = with_bias
self._data_format = data_format
self._dilations = dilations
Module.__init__(self, device=device, v=v, dtype=dtype)
def _create_variables(self, device, dtype):
"""Create internal variables for the layer.
Parameters
----------
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
device = ivy.default(device, self.device)
dtype = ivy.default(dtype, self.dtype)
v = {
"w": self._w_init.create_variables(
self._w_shape,
device,
self._num_channels,
self._num_channels,
dtype=dtype,
)
}
if self._with_bias:
v = dict(
**v,
b=self._b_init.create_variables(
self._b_shape,
device,
self._num_channels,
self._num_channels,
dtype=dtype,
),
)
return v
def _forward(self, inputs):
"""Perform forward pass of the DepthwiseConv2D layer.
Parameters
----------
inputs
Inputs to process *[batch_size,h,w,d_in]*.
Returns
-------
ret
The outputs following the conv1d layer *[batch_size,new_h,new_w,d_out]*
"""
return ivy.depthwise_conv2d(
inputs,
self.v.w,
self._strides,
self._padding,
data_format=self._data_format,
dilations=self._dilations,
) + (self.v.b if self._with_bias else 0)
def _extra_repr(self):
s = (
"num_channels={_num_channels}, filter_shape={_filter_shape},"
" strides={_strides}, padding={_padding}"
)
if self._dilations not in [1, (1, 1)]:
s += ", dilations={_dilations}"
if self._with_bias is not True:
s += ", with_bias=False"
if self._data_format != "NHWC":
s += ", data_format={_data_format}"
return s.format(**self.__dict__)
class Conv3D(Module):
def __init__(
self,
input_channels,
output_channels,
filter_shape,
strides,
padding,
/,
*,
weight_initializer=GlorotUniform(),
bias_initializer=Zeros(),
with_bias=True,
data_format="NDHWC",
dilations=1,
device=None,
v=None,
dtype=None,
):
"""3D convolutional layer.
Parameters
----------
input_channels
Number of input channels for the layer.
output_channels
Number of output channels for the layer.
filter_shape
Shape of the convolutional filter.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or
list indicating the per-dimension paddings.
weight_initializer
Initializer for the weights. Default is GlorotUniform.
bias_initializer
Initializer for the bias. Default is Zeros.
with_bias
Whether or not to include a bias term, default is ``True``.
data_format
NDHWC" or "NCDHW". Defaults to "NDHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
v
the variables for each of the conv layer, as a container,
constructed internally by default.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
self._input_channels = input_channels
self._output_channels = output_channels
self._filter_shape = filter_shape
self._strides = strides
self._padding = padding
self._w_shape = filter_shape + [input_channels, output_channels]
self._b_shape = (
(1, 1, 1, 1, output_channels)
if data_format == "NDHWC"
else (1, output_channels, 1, 1, 1)
)
self._w_init = weight_initializer
self._b_init = bias_initializer
self._with_bias = with_bias
self._data_format = data_format
self._dilations = dilations
Module.__init__(self, device=device, v=v, dtype=dtype)
def _create_variables(self, device, dtype=None):
"""Create internal variables for the layer.
Parameters
----------
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
device = ivy.default(device, self.device)
dtype = ivy.default(dtype, self.dtype)
v = {
"w": self._w_init.create_variables(
self._w_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
)
}
if self._with_bias:
v = dict(
**v,
b=self._b_init.create_variables(
self._b_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
),
)
return v
def _forward(self, inputs):
"""Perform forward pass of the Conv3D layer.
Parameters
----------
inputs
Inputs to process *[batch_size,d,h,w,d_in]*.
Returns
-------
ret
The outputs following the conv1d layer
*[batch_size,new_d,new_h,new_w,d_out]*
"""
return ivy.conv3d(
inputs,
self.v.w,
self._strides,
self._padding,
data_format=self._data_format,
dilations=self._dilations,
) + (self.v.b if self._with_bias else 0)
def _extra_repr(self):
s = (
"{_input_channels}, {_output_channels}, filter_shape={_filter_shape},"
" strides={_strides}, padding={_padding}"
)
if self._dilations not in [1, (1, 1, 1)]:
s += ", dilations={_dilations}"
if self._with_bias is not True:
s += ", with_bias=False"
if self._data_format != "NDHWC":
s += ", data_format={_data_format}"
return s.format(**self.__dict__)
class Conv3DTranspose(Module):
def __init__(
self,
input_channels,
output_channels,
filter_shape,
strides,
padding,
/,
*,
weight_initializer=GlorotUniform(),
bias_initializer=Zeros(),
with_bias=True,
output_shape=None,
data_format="NDHWC",
dilations=1,
device=None,
v=None,
dtype=None,
):
"""3D convolutional transpose layer.
Parameters
----------
input_channels
Number of input channels for the layer.
output_channels
Number of output channels for the layer.
filter_shape
Shape of the convolutional filter.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or
list indicating the per-dimension paddings.
weight_initializer
Initializer for the weights. Default is GlorotUniform.
bias_initializer
Initializer for the bias. Default is Zeros.
with_bias
Whether or not to include a bias term, default is ``True``.
output_shape
Shape of the output (Default value = None)
data_format
NDHWC" or "NCDHW". Defaults to "NDHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
v
the variables for each of the conv layer, as a container,
constructed internally by default.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
self._input_channels = input_channels
self._output_channels = output_channels
self._filter_shape = filter_shape
self._strides = strides
self._padding = padding
self._w_shape = filter_shape + [output_channels, input_channels]
self._b_shape = (
(1, 1, 1, 1, output_channels)
if data_format == "NDHWC"
else (1, output_channels, 1, 1, 1)
)
self._w_init = weight_initializer
self._b_init = bias_initializer
self._with_bias = with_bias
self._output_shape = output_shape
self._data_format = data_format
self._dilations = dilations
self.dtype = dtype
Module.__init__(self, device=device, v=v, dtype=dtype)
def _create_variables(self, device, dtype=None):
"""Create internal variables for the layer.
Parameters
----------
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
device = ivy.default(device, self.device)
dtype = ivy.default(dtype, self.dtype)
v = {
"w": self._w_init.create_variables(
self._w_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
)
}
if self._with_bias:
v = dict(
**v,
b=self._b_init.create_variables(
self._b_shape,
device,
self._output_channels,
self._input_channels,
dtype=dtype,
),
)
return v
def _forward(self, inputs):
"""Perform forward pass of the Conv3DTranspose layer.
Parameters
----------
inputs
Inputs to process *[batch_size,d,h,w,d_in]*.
Returns
-------
ret
The outputs following the conv1d layer
*[batch_size,new_d,new_h,new_w,d_out]*
"""
return ivy.conv3d_transpose(
inputs,
self.v.w,
self._strides,
self._padding,
output_shape=self._output_shape,
data_format=self._data_format,
dilations=self._dilations,
) + (self.v.b if self._with_bias else 0)
def _extra_repr(self):
s = (
"{_input_channels}, {_output_channels}, filter_shape={_filter_shape},"
" strides={_strides}, padding={_padding}"
)
if self._dilations not in [1, (1, 1, 1)]:
s += ", dilations={_dilations}"
if self._with_bias is not True:
s += ", with_bias=False"
if self._output_shape is not None:
s += ", output_shape={_output_shape}"
if self._data_format != "NDHWC":
s += ", data_format={_data_format}"
return s.format(**self.__dict__)
# LSTM #
# -----#
class LSTM(Module):
def __init__(
self,
input_channels,
output_channels,
/,
*,
weight_initializer=GlorotUniform(),
num_layers=1,
return_sequence=True,
return_state=True,
device=None,
v=None,
dtype=None,
):
"""LSTM layer, which is a set of stacked lstm cells.
Parameters
----------
input_channels
Number of input channels for the layer
output_channels
Number of output channels for the layer
weight_initializer
Initializer for the weights. Default is GlorotUniform.
num_layers
Number of lstm cells in the lstm layer, default is ``1``.
return_sequence
Whether or not to return the entire output sequence, or
just the latest timestep.
Default is ``True``.
return_state
Whether or not to return the latest hidden and cell states.
Default is ``True``.
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
v
the variables for each of the lstm cells, as a container,
constructed internally by default.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
self._input_channels = input_channels
self._output_channels = output_channels
self._w_init = weight_initializer
self._num_layers = num_layers
self._return_sequence = return_sequence
self._return_state = return_state
Module.__init__(self, device=device, v=v, dtype=dtype)
# Public #
def get_initial_state(self, batch_shape, dtype=None):
"""Get the initial state of the hidden and cell states, if not provided
explicitly.
Parameters
----------
batch_shape
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
dtype = ivy.default(dtype, self.dtype)
batch_shape = list(batch_shape)
return (
[
ivy.zeros((batch_shape + [self._output_channels]), dtype=dtype)
for i in range(self._num_layers)
],
[
ivy.zeros((batch_shape + [self._output_channels]), dtype=dtype)
for i in range(self._num_layers)
],
)
# Overridden
def _create_variables(self, device=None, dtype=None):
"""Create internal variables for the layer.
Parameters
----------
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
device = ivy.default(device, self.device)
dtype = ivy.default(dtype, self.dtype)
input_weights = dict(
zip(
[f"layer_{str(i)}" for i in range(self._num_layers)],
[
{
"w": self._w_init.create_variables(
(
(
self._input_channels
if i == 0
else self._output_channels
),
4 * self._output_channels,
),
device,
self._output_channels,
self._input_channels,
dtype=dtype,
)
}
for i in range(self._num_layers)
],
)
)
recurrent_weights = dict(
zip(
[f"layer_{str(i)}" for i in range(self._num_layers)],
[
{
"w": self._w_init.create_variables(
(self._output_channels, 4 * self._output_channels),
device,
self._output_channels,
self._input_channels,
dtype=dtype,
)
}
for i in range(self._num_layers)
],
)
)
return {"input": input_weights, "recurrent": recurrent_weights}
@handle_nestable
def _forward(self, inputs, initial_state=None):
"""Perform forward pass of the LSTM layer.
Parameters
----------
inputs
Inputs to process *[batch_shape, t, in]*.
initial_state
2-tuple of lists of the hidden states h and c for each layer,
each of dimension *[batch_shape,out]*.
Created internally if None. (Default value = None)
Returns
-------
ret
The outputs of the final lstm layer *[batch_shape, t, out]* and the hidden
state tuple of lists, each of dimension *[batch_shape, out]*
"""
if initial_state is None:
initial_state = self.get_initial_state(
inputs.shape[:-2], dtype=inputs.dtype
)
h_n_list = []
c_n_list = []
h_t = inputs
for h_0, c_0, (_, lstm_input_var), (_, lstm_recurrent_var) in zip(
initial_state[0],
initial_state[1],
self.v.input.items(),
self.v.recurrent.items(),
):
h_t, c_n = ivy.lstm_update(
h_t, h_0, c_0, lstm_input_var.w, lstm_recurrent_var.w
)
h_n_list.append(h_t[..., -1, :])
c_n_list.append(c_n)
if not self._return_sequence:
h_t = h_t[..., -1, :]
if not self._return_state:
return h_t
return h_t, (h_n_list, c_n_list)
def _extra_repr(self):
s = "{_input_channels}, {_output_channels}"
if self._num_layers != 1:
s += ", num_layers={_num_layers}"
if self._return_sequence is not True:
s += ", return_sequence={_return_sequence}"
if self._return_state is not True:
s += ", return_state={_return_state}"
return s.format(**self.__dict__)
# Pooling #
# --------#
class MaxPool2D(Module):
def __init__(
self,
kernel_size,
stride,
padding,
/,
*,
data_format="NHWC",
device=None,
v=None,
dtype=None,
):
"""Class for applying Max Pooling over a mini-batch of inputs.
Parameters
----------
kernel_size
The size of the window to take a max over.
stride
The stride of the window. Default value: 1
padding
Implicit zero padding to be added on both sides.
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
"""
self._kernel_size = kernel_size
self._stride = stride
self._padding = padding
self._data_format = data_format
Module.__init__(self, device=device, dtype=dtype)
def _forward(self, inputs):
"""Forward pass of the layer.
Parameters
----------
x
The input to the layer.
Returns
-------
The output of the layer.
"""
return ivy.max_pool2d(
inputs,
self._kernel_size,
self._stride,
self._padding,
data_format=self._data_format,
)
def _extra_repr(self):
s = "kernel_size={_kernel_size}, stride={_stride}, padding={_padding}"
if self._data_format != "NHWC":
s += ", data_format={_data_format}"
return s.format(**self.__dict__)
class AvgPool2D(Module):
def __init__(
self,
kernel_size,
stride,
padding,
/,
*,
data_format="NHWC",
device=None,
v=None,
dtype=None,
):
"""Class for applying Average Pooling over a mini-batch of inputs.
Parameters
----------
kernel_size
The size of the window to take a max over.
stride
The stride of the window. Default value: 1
padding
Implicit zero padding to be added on both sides.
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
"""
self._kernel_size = kernel_size
self._stride = stride
self._padding = padding
self._data_format = data_format
Module.__init__(self, device=device, dtype=dtype)
def _forward(self, inputs):
"""Forward pass of the layer.
Parameters
----------
x
The input to the layer.
Returns
-------
The output of the layer.
"""
return ivy.avg_pool2d(
inputs,
self._kernel_size,
self._stride,
self._padding,
data_format=self._data_format,
)
def _extra_repr(self):
s = "kernel_size={_kernel_size}, stride={_stride}, padding={_padding}"
if self._data_format != "NHWC":
s += ", data_format={_data_format}"
return s.format(**self.__dict__)
class MaxPool1D(Module):
def __init__(
self,
kernel_size,
stride,
padding,
/,
*,
data_format="NWC",
device=None,
v=None,
dtype=None,
):
"""Class for applying Max Pooling over a mini-batch of inputs.
Parameters
----------
kernel_size
The size of the window to take a max over.
stride
The stride of the window. Default value: 1
padding
Implicit zero padding to be added on both sides.
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
"""
self._kernel_size = kernel_size
self._stride = stride
self._padding = padding
self._data_format = data_format
Module.__init__(self, device=device, dtype=dtype)
def _forward(self, inputs):
"""Forward pass of the layer.
Parameters
----------
x
The input to the layer.
Returns
-------
The output of the layer.
"""
return ivy.max_pool1d(
inputs,
self._kernel_size,
self._stride,
self._padding,
data_format=self._data_format,
)
def _extra_repr(self):
s = "kernel_size={_kernel_size}, stride={_stride}, padding={_padding}"
if self._data_format != "NHWC":
s += ", data_format={_data_format}"
return s.format(**self.__dict__)
class MaxPool3D(Module):
def __init__(
self,
kernel_size,
stride,
padding,
/,
*,
data_format="NDHWC",
device=None,
dtype=None,
):
"""Class for applying 3D Max Pooling over 5D inputs.
Parameters
----------
kernel_size
The size of the window to take a max over.
stride
The stride of the window.
padding
Implicit zero padding to be added on both sides.
"""
self._kernel_size = kernel_size
self._stride = stride
self._padding = padding
self._data_format = data_format
Module.__init__(self, device=device, dtype=dtype)
def _forward(self, x):
"""Forward pass of the layer.
Parameters
----------
x
The input array to the layer.
Returns
-------
The output of the layer.
"""
return ivy.max_pool3d(
x,
self._kernel_size,
self._stride,
self._padding,
data_format=self._data_format,
)
def _extra_repr(self):
s = "kernel_size={_kernel_size}, stride={_stride}, padding={_padding}"
if self._data_format != "NDHWC":
s += ", data_format={_data_format}"
return s.format(**self.__dict__)
class AvgPool3D(Module):
def __init__(
self,
kernel_size,
strides,
padding,
/,
*,
data_format="NDHWC",
count_include_pad=False,
ceil_mode=False,
divisor_override=None,
):
"""Class for applying Average Pooling over a mini-batch of inputs.
Parameters
----------
kernel_size
The size of the window to take a max over.
stride
The stride of the window. Default value: 1
padding
Implicit zero padding to be added on both sides.
data_format
NDHWC" or "NCDHW". Defaults to "NDHWC".
count_include_pad
Whether to include padding in the averaging calculation.
ceil_mode
Whether to use ceil or floor for creating the output shape.
divisor_override
If specified, it will be used as divisor,
otherwise kernel_size will be used. # noqa: E501
"""
self._kernel_size = kernel_size
self._stride = strides
self._padding = padding
self._data_format = data_format
self._count_include_pad = count_include_pad
self._ceil_mode = ceil_mode
self._divisor_override = divisor_override
Module.__init__(self)
def _forward(self, x):
"""Forward pass of the layer.
Parameters
----------
x
The input array to the layer.
Returns
-------
The output array of the layer.
"""
return ivy.avg_pool3d(
x,
self._kernel_size,
self._stride,
self._padding,
data_format=self._data_format,
count_include_pad=self._count_include_pad,
ceil_mode=self._ceil_mode,
divisor_override=self._divisor_override,
)
def _extra_repr(self):
s = "kernel_size={_kernel_size}, stride={_stride}, padding={_padding}"
if self._data_format != "NDHWC":
s += ", data_format={_data_format}"
if self._count_include_pad is not False:
s += ", count_include_pad={_count_include_pad}"
if self._ceil_mode is not False:
s += ", ceil_mode={_ceil_mode}"
if self._divisor_override is not False:
s += ", divisor_override={_divisor_override}"
return s.format(**self.__dict__)
class AdaptiveAvgPool2d(Module):
def __init__(
self,
output_size,
/,
*,
data_format="NHWC",
device=None,
dtype=None,
):
"""Class for applying a 2D adaptive average pooling over mini-batch of
inputs.
Parameters
----------
output_size
the target output size of the image.
data_format
NHWC" or "NCHW". Defaults to "NHWC".
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
"""
self._output_size = output_size
self._data_format = data_format
Module.__init__(self, device=device, dtype=dtype)
def _forward(self, x):
"""Forward pass of the layer.
Parameters
----------
x
The input array to the layer.
Returns
-------
The output array of the layer.
"""
# TODO: test again once adaptive_avg_pool2d is
# implemented for the missing backends.
return ivy.adaptive_avg_pool2d(
x, self._output_size, data_format=self._data_format
)
def _extra_repr(self):
return f"output_size={self._output_size}"
class AdaptiveAvgPool1d(Module):
def __init__(
self,
output_size,
device=None,
dtype=None,
):
# TODO: add data_format param
"""Class for applying a 1D adaptive average pooling over mini-batch of
inputs.
Parameters
----------
output_size
An integer or tuple/list of a single integer
specifying new size of output channels.
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
"""
self._output_size = output_size
Module.__init__(self, device=device, dtype=dtype)
def _forward(self, x):
"""Forward pass of the layer.
Parameters
----------
x
The input array to the layer.
Returns
-------
The output array of the layer.
"""
# TODO: test again once adaptive_avg_pool2d is
# implemented for the missing backends.
return ivy.adaptive_avg_pool1d(
x,
self._output_size,
)
def _extra_repr(self):
return f"output_size={self._output_size}"
class FFT(Module):
def __init__(
self,
dim,
/,
*,
norm="backward",
n=None,
out=None,
device=None,
dtype=None,
):
"""Class for applying FFT to input.
Parameters
----------
dim : int
Dimension along which to take the FFT.
norm : str
Normalization mode. Default: 'backward'
n : int
Size of the FFT. Default: None
out : int
Size of the output. Default: None
"""
self._dim = dim
self._norm = norm
self._n = n
self._out = out
Module.__init__(self, device=device, dtype=dtype)
def _forward(self, inputs):
"""Forward pass of the layer.
Parameters
----------
inputs : array
Input array to take the FFT of.
Returns
-------
array
The output array of the layer.
"""
return ivy.fft(
inputs,
self._dim,
norm=self._norm,
n=self._n,
out=self._out,
)
def _extra_repr(self):
s = "dim={_dim}"
if self._norm != "backward":
s += ", norm={_norm}"
if self._n is not False:
s += ", n={_n}"
return s.format(**self.__dict__)
class AvgPool1D(Module):
def __init__(
self,
kernel_size,
stride,
padding,
/,
*,
data_format="NWC",
):
"""Class for applying Average Pooling over a mini-batch of inputs.
Parameters
----------
kernel_size
The size of the window to take an average over.
stride
The stride of the window. Default value: 1
padding
Implicit zero padding to be added on both sides.
data_format
"NCW" or "NWC". Defaults to "NWC".
"""
self._kernel_size = kernel_size
self._stride = stride
self._padding = padding
self._data_format = data_format
Module.__init__(self)
def _forward(self, inputs):
"""Forward pass of the layer.
Parameters
----------
x
The input to the layer.
Returns
-------
The output of the layer.
"""
return ivy.avg_pool1d(
inputs,
self._kernel_size,
self._stride,
self._padding,
data_format=self._data_format,
)
def _extra_repr(self):
s = "kernel_size={_kernel_size}, stride={_stride}, padding={_padding}"
if self._data_format != "NWC":
s += ", data_format={_data_format}"
return s.format(**self.__dict__)
class Dct(Module):
def __init__(
self,
*,
type=2,
n=None,
axis=-1,
norm=None,
device=None,
dtype=None,
):
"""Class for applying the Discrete Cosine Transform over mini-batch of
inputs.
Parameters
----------
x
The input signal.
type
The type of the dct. Must be 1, 2, 3 or 4.
n
The length of the transform. If n is less than the input signal length,
then x is truncated, if n is larger then x is zero-padded.
axis
The axis to compute the DCT along.
norm
The type of normalization to be applied. Must be either None or "ortho".
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
"""
self.type = type
self.n = n
self.axis = axis
self.norm = norm
Module.__init__(self, device=device, dtype=dtype)
def _forward(self, x):
"""Forward pass of the layer.
Parameters
----------
x
The input array to the layer.
Returns
-------
The output array of the layer.
"""
return ivy.dct(
x,
type=self.type,
n=self.n,
axis=self.axis,
norm=self.norm,
)
def _extra_repr(self):
s = "type={type}"
if self.n is not None:
s += ", n={n}"
if self.axis != -1:
s += ", axis={axis}"
if self.norm is not None:
s += ", norm={norm}"
return s.format(**self.__dict__)
# EMBEDDING #
# ----------#
class Embedding(Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx=None,
max_norm=None,
/,
*,
weight_initializer=GlorotUniform(),
device=None,
v=None,
dtype=None,
):
"""Class for embedding indices into a dense representation. The
Embedding layer is a simple lookup table for dense vectors. It's
typically used to store word embeddings and query them using indices.
Parameters
----------
num_embeddingss : int
Number of embeddings.
embedding_dim : int
Dimension of the embeddings.
padding_idx : int
If given, pads the output with zeros whenever it encounters the index.
max_norm : float
If given, each embedding vector with L2 norm larger than max_norm is renormalized to have norm max_norm.
weight_initializer : Initializer
Initializer for the weights.
device : str
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
v : dict
the variables for the embedding layer, as a container, constructed internally
by default.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
self._num_embeddings = num_embeddings
self._embedding_dim = embedding_dim
self._padding_idx = padding_idx
self._max_norm = max_norm
self._weight_initializer = weight_initializer
Module.__init__(self, device=device, v=v, dtype=dtype)
def _create_variables(self, device=None, dtype=None):
"""Create internal variables for the layer.
Parameters
----------
device
device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
dtype
the desired data type of the internal variables to be created if not
provided. Default is ``None``.
"""
device = ivy.default(device, self.device)
dtype = ivy.default(dtype, self.dtype)
v = {
"w": self._weight_initializer.create_variables(
(self._num_embeddings, self._embedding_dim),
device,
self._embedding_dim,
self._num_embeddings,
dtype=dtype,
)
}
return v
def _pad_embd(self, indices, embd):
mask = ivy.expand_dims(indices == self._padding_idx, axis=-1)
mask_val = ivy.array(0.0, dtype=embd.dtype)
return ivy.where(mask, mask_val, embd)
def _forward(self, indices):
"""Forward pass of the layer.
Parameters
----------
indices
The input array to the layer.
Returns
-------
The output array of the layer.
"""
emb = ivy.embedding(self.v.w, indices, max_norm=self._max_norm)
if self._padding_idx is not None:
emb = self._pad_embd(indices, emb)
return emb
def _extra_repr(self):
s = "num_embeddings={_num_embeddings}, embedding_dim={_embedding_dim}"
if self._padding_idx is not None:
s += ", padding_idx={_padding_idx}"
if self._max_norm is not None:
s += ", max_norm={_max_norm}"
return s.format(**self.__dict__)
class Identity(Module):
def __init__(self):
"""Identity layer. The layer is argument insensitive and returns the
input argument as output when called.
It's typically used as a placeholder when no operation is to be
performed. It doesn't have any learnable parameter.
"""
Module.__init__(self)
def _forward(self, x):
"""Forward pass of the layer.
Parameters
----------
x
The input array.
dtype
The desired data type of the internal variables to be created if not
provided. Default is ``None``.
Returns
-------
The input array as it is.
"""
return x
class IFFT(Module):
def __init__(
self,
dim,
/,
*,
norm="backward",
n=None,
out=None,
device=None,
dtype=None,
):
"""Class for applying IFFT to input.
Parameters
----------
dim : int
Dimension along which to take the IFFT.
norm : str
Optional argument indicating the normalization mode. Possible Values : "backward", "ortho" or "forward".
"backward" indicates no normalization.
"ortho" indicates normalization by 1/sqrt(n).
"forward" indicates normalization by 1/n.
Default: "backward"
n : int
Optional argument indicating the sequence length, if given, the input
would be padded with zero or truncated to length n before performing IFFT.
Should be a integer greater than 1. Default: None
out : int
Size of the output. Default: None
"""
self._dim = dim
self._norm = norm
self._n = n
self._out = out
Module.__init__(self, device=device, dtype=dtype)
def _forward(self, inputs):
"""Forward pass of the layer.
Parameters
----------
inputs : array
Input array to take the IFFT of.
Returns
-------
The output array of the layer.
"""
return ivy.ifft(
inputs,
self._dim,
norm=self._norm,
n=self._n,
out=self._out,
)
def _extra_repr(self):
s = "dim={_dim}"
if self._norm != "backward":
s += ", norm={_norm}"
if self._n is not False:
s += ", n={_n}"
return s.format(**self.__dict__)
| ivy/ivy/stateful/layers.py/0 | {
"file_path": "ivy/ivy/stateful/layers.py",
"repo_id": "ivy",
"token_count": 38413
} | 50 |
# Helper functions for einsum_path, this file has been adapted from
# `numpy core einsumfunc.py file` here
# https://github.com/numpy/numpy/blob/v1.26.0/numpy/core/einsumfunc.py
from itertools import combinations
from ivy.utils.einsum_parser import possibly_convert_to_numpy, convert_interleaved_input
einsum_symbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
einsum_symbols_set = set(einsum_symbols)
def flop_count(idx_contraction, inner, num_terms, size_dictionary):
"""Compute the number of FLOPS in the contraction.
Parameters
----------
idx_contraction : iterable
The indices involved in the contraction
inner : bool
Does this contraction require an inner product?
num_terms : int
The number of terms in a contraction
size_dictionary : dict
The size of each of the indices in idx_contraction
Returns
-------
flop_count : int
The total number of FLOPS required for the contraction.
Examples
--------
>>> flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
30
>>> flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
60
"""
overall_size = compute_size_by_dict(idx_contraction, size_dictionary)
op_factor = max(1, num_terms - 1)
if inner:
op_factor += 1
return overall_size * op_factor
def compute_size_by_dict(indices, idx_dict):
"""Compute the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index sizes
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
"""
ret = 1
for i in indices:
ret *= idx_dict[i]
return ret
def find_contraction(positions, input_sets, output_set):
"""Find the contraction for a given set of input and output sets.
Parameters
----------
positions : iterable
Integer positions of terms used in the contraction.
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
Returns
-------
new_result : set
The indices of the resulting contraction
remaining : list
List of sets that have not been contracted, the new set is appended to
the end of this list
idx_removed : set
Indices removed from the entire contraction
idx_contraction : set
The indices used in the current contraction
Examples
--------
# A simple dot product test case
>>> pos = (0, 1)
>>> isets = [set('ab'), set('bc')]
>>> oset = set('ac')
>>> find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
# A more complex case with additional terms in the contraction
>>> pos = (0, 2)
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('ac')
>>> find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
"""
idx_contract = set()
idx_remain = output_set.copy()
remaining = []
for ind, value in enumerate(input_sets):
if ind in positions:
idx_contract |= value
else:
remaining.append(value)
idx_remain |= value
new_result = idx_remain & idx_contract
idx_removed = idx_contract - new_result
remaining.append(new_result)
return (new_result, remaining, idx_removed, idx_contract)
def optimal_path(input_sets, output_set, idx_dict, memory_limit):
"""Compute all possible pair contractions, sieves the results based on
``memory_limit`` and returns the lowest cost path. This algorithm scales
factorial with respect to the elements in the list ``input_sets``.
Parameters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The optimal contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
full_results = [(0, [], input_sets)]
for iteration in range(len(input_sets) - 1):
iter_results = []
# Compute all unique pairs
for curr in full_results:
cost, positions, remaining = curr
for con in combinations(range(len(input_sets) - iteration), 2):
# Find the contraction
cont = find_contraction(con, remaining, output_set)
new_result, new_input_sets, idx_removed, idx_contract = cont
# Sieve the results based on memory_limit
new_size = compute_size_by_dict(new_result, idx_dict)
if new_size > memory_limit:
continue
# Build (total_cost, positions, indices_remaining)
total_cost = cost + flop_count(
idx_contract, idx_removed, len(con), idx_dict
)
new_pos = positions + [con]
iter_results.append((total_cost, new_pos, new_input_sets))
# Update combinatorial list, if we did not find anything return best
# path + remaining contractions
if iter_results:
full_results = iter_results
else:
path = min(full_results, key=lambda x: x[0])[1]
path += [tuple(range(len(input_sets) - iteration))]
return path
# If we have not found anything return single einsum contraction
if len(full_results) == 0:
return [tuple(range(len(input_sets)))]
path = min(full_results, key=lambda x: x[0])[1]
return path
def parse_possible_contraction(
positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost
):
"""Compute the cost (removed size + flops) and resultant indices for
performing the contraction specified by ``positions``.
Parameters
----------
positions : tuple of int
The locations of the proposed tensors to contract.
input_sets : list of sets
The indices found on each tensors.
output_set : set
The output indices of the expression.
idx_dict : dict
Mapping of each index to its size.
memory_limit : int
The total allowed size for an intermediary tensor.
path_cost : int
The contraction cost so far.
naive_cost : int
The cost of the unoptimized expression.
Returns
-------
cost : (int, int)
A tuple containing the size of any indices removed, and the flop cost.
positions : tuple of int
The locations of the proposed tensors to contract.
new_input_sets : list of sets
The resulting new list of indices if this proposed contraction is performed.
"""
# Find the contraction
contract = find_contraction(positions, input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
# Sieve the results based on memory_limit
new_size = compute_size_by_dict(idx_result, idx_dict)
if new_size > memory_limit:
return None
# Build sort tuple
old_sizes = (compute_size_by_dict(input_sets[p], idx_dict) for p in positions)
removed_size = sum(old_sizes) - new_size
# NB: removed_size used to be just the size of any removed indices i.e.:
# helpers.compute_size_by_dict(idx_removed, idx_dict)
cost = flop_count(idx_contract, idx_removed, len(positions), idx_dict)
sort = (-removed_size, cost)
# Sieve based on total cost as well
if (path_cost + cost) > naive_cost:
return None
# Add contraction to possible choices
return [sort, positions, new_input_sets]
def update_other_results(results, best):
"""Update the positions and provisional input_sets of ``results`` based on
performing the contraction result ``best``. Remove any involving the
tensors contracted.
Parameters
----------
results : list
List of contraction results produced by ``_parse_possible_contraction``.
best : list
The best contraction of ``results`` i.e. the one that will be performed.
Returns
-------
mod_results : list
The list of modified results, updated with outcome of ``best`` contraction.
"""
best_con = best[1]
bx, by = best_con
mod_results = []
for cost, (x, y), con_sets in results:
# Ignore results involving tensors just contracted
if x in best_con or y in best_con:
continue
# Update the input_sets
del con_sets[by - int(by > x) - int(by > y)]
del con_sets[bx - int(bx > x) - int(bx > y)]
con_sets.insert(-1, best[2][-1])
# Update the position indices
mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
mod_results.append((cost, mod_con, con_sets))
return mod_results
def greedy_path(input_sets, output_set, idx_dict, memory_limit):
"""Find the path by contracting the best pair until the input list is
exhausted. The best pair is found by minimizing the tuple
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
matrix multiplication or inner product operations, then Hadamard like
operations, and finally outer operations. Outer products are limited by
``memory_limit``. This algorithm scales cubically with respect to the
number of elements in the list ``input_sets``.
Parameters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The greedy contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> greedy_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
# Handle trivial cases that leaked through
if len(input_sets) == 1:
return [(0,)]
elif len(input_sets) == 2:
return [(0, 1)]
# Build up a naive cost
contract = find_contraction(range(len(input_sets)), input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
naive_cost = flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)
# Initially iterate over all pairs
comb_iter = combinations(range(len(input_sets)), 2)
known_contractions = []
path_cost = 0
path = []
for iteration in range(len(input_sets) - 1):
# Iterate over all pairs on first step, only previously found
# pairs on subsequent steps
for positions in comb_iter:
# Always initially ignore outer products
if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
continue
result = parse_possible_contraction(
positions,
input_sets,
output_set,
idx_dict,
memory_limit,
path_cost,
naive_cost,
)
if result is not None:
known_contractions.append(result)
# If we do not have a inner contraction, rescan pairs including outer products
if len(known_contractions) == 0:
# Then check the outer products
for positions in combinations(range(len(input_sets)), 2):
result = parse_possible_contraction(
positions,
input_sets,
output_set,
idx_dict,
memory_limit,
path_cost,
naive_cost,
)
if result is not None:
known_contractions.append(result)
# If we still did not find any remaining contractions,
# default back to einsum like behavior
if len(known_contractions) == 0:
path.append(tuple(range(len(input_sets))))
break
# Sort based on first index
best = min(known_contractions, key=lambda x: x[0])
# Now propagate as many unused contractions as possible to next iteration
known_contractions = update_other_results(known_contractions, best)
# Next iteration only compute contractions with the new tensor
# All other contractions have been accounted for
input_sets = best[2]
new_tensor_pos = len(input_sets) - 1
comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
# Update path and total cost
path.append(best[1])
path_cost += best[0][1]
return path
def can_dot(inputs, result, idx_removed):
"""Check if we can use BLAS (np.tensordot) call and its beneficial to do
so.
Parameters
----------
inputs : list of str
Specifies the subscripts for summation.
result : str
Resulting summation.
idx_removed : set
Indices that are removed in the summation
Returns
-------
type : bool
Returns true if BLAS should and can be used, else False
Notes
-----
If the operations is BLAS level 1 or 2 and is not already aligned
we default back to einsum as the memory movement to copy is more
costly than the operation itself.
Examples
--------
# Standard GEMM operation
>>> can_dot(['ij', 'jk'], 'ik', set('j'))
True
# Can use the standard BLAS, but requires odd data movement
>>> can_dot(['ijj', 'jk'], 'ik', set('j'))
False
# DDOT where the memory is not aligned
>>> can_dot(['ijk', 'ikj'], '', set('ijk'))
False
"""
# All `dot` calls remove indices
if len(idx_removed) == 0:
return False
# BLAS can only handle two operands
if len(inputs) != 2:
return False
input_left, input_right = inputs
for c in set(input_left + input_right):
# can't deal with repeated indices on same input or more than 2 total
nl, nr = input_left.count(c), input_right.count(c)
if (nl > 1) or (nr > 1) or (nl + nr > 2):
return False
# can't do implicit summation or dimension collapse e.g.
# "ab,bc->c" (implicitly sum over 'a')
# "ab,ca->ca" (take diagonal of 'a')
if nl + nr - 1 == int(c in result):
return False
# Build a few temporaries
set_left = set(input_left)
set_right = set(input_right)
keep_left = set_left - idx_removed
keep_right = set_right - idx_removed
rs = len(idx_removed)
# At this point we are a DOT, GEMV, or GEMM operation
# Handle inner products
# DDOT with aligned data
if input_left == input_right:
return True
# DDOT without aligned data (better to use einsum)
if set_left == set_right:
return False
# Handle the 4 possible (aligned) GEMV or GEMM cases
# GEMM or GEMV no transpose
if input_left[-rs:] == input_right[:rs]:
return True
# GEMM or GEMV transpose both
if input_left[:rs] == input_right[-rs:]:
return True
# GEMM or GEMV transpose right
if input_left[-rs:] == input_right[-rs:]:
return True
# GEMM or GEMV transpose left
if input_left[:rs] == input_right[:rs]:
return True
# Einsum is faster than GEMV if we have to copy data
if not keep_left or not keep_right:
return False
# We are a matrix-matrix product, but we need to copy data
return True
def parse_einsum_input(operands, subscripts=None):
"""Reproduction of einsum c side einsum parsing in python.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> np.random.seed(123)
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> parse_einsum_input(('...a,...a->...', a, b))
('za,xza', 'xz', [a, b]) # may vary
>>> parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('za,xza', 'xz', [a, b]) # may vary
"""
if len(operands) == 0:
raise ValueError("No input operands")
if subscripts:
subscripts = subscripts.replace(" ", "")
operands = [possibly_convert_to_numpy(x) for x in operands]
elif isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = [possibly_convert_to_numpy(x) for x in operands[1:]]
else:
subscripts, operands = convert_interleaved_input(operands)
# Check for proper "->"
if ("-" in subscripts) or (">" in subscripts):
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
if invalid or (subscripts.count("->") != 1):
raise ValueError("Subscripts can only contain one '->'.")
# Parse ellipses
if "." in subscripts:
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
unused = list(einsum_symbols_set - set(used))
ellipse_inds = "".join(unused)
longest = 0
if "->" in subscripts:
input_tmp, output_sub = subscripts.split("->")
split_subscripts = input_tmp.split(",")
out_sub = True
else:
split_subscripts = subscripts.split(",")
out_sub = False
for num, sub in enumerate(split_subscripts):
if "." in sub:
if (sub.count(".") != 3) or (sub.count("...") != 1):
raise ValueError("Invalid Ellipses.")
# Take into account numerical values
if operands[num].shape == ():
ellipse_count = 0
else:
ellipse_count = max(operands[num].ndim, 1)
ellipse_count -= len(sub) - 3
if ellipse_count > longest:
longest = ellipse_count
if ellipse_count < 0:
raise ValueError("Ellipses lengths do not match.")
elif ellipse_count == 0:
split_subscripts[num] = sub.replace("...", "")
else:
rep_inds = ellipse_inds[-ellipse_count:]
split_subscripts[num] = sub.replace("...", rep_inds)
subscripts = ",".join(split_subscripts)
if longest == 0:
out_ellipse = ""
else:
out_ellipse = ellipse_inds[-longest:]
if out_sub:
subscripts += "->" + output_sub.replace("...", out_ellipse)
else:
# Special care for outputless ellipses
output_subscript = ""
tmp_subscripts = subscripts.replace(",", "")
for s in sorted(set(tmp_subscripts)):
if s not in (einsum_symbols):
raise ValueError(f"Character {s} is not a valid symbol.")
if tmp_subscripts.count(s) == 1:
output_subscript += s
normal_inds = "".join(sorted(set(output_subscript) - set(out_ellipse)))
subscripts += f"->{out_ellipse}{normal_inds}"
# Build output string if does not exist
if "->" in subscripts:
input_subscripts, output_subscript = subscripts.split("->")
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(",", "")
output_subscript = ""
for s in sorted(set(tmp_subscripts)):
if s not in einsum_symbols:
raise ValueError(f"Character {s} is not a valid symbol.")
if tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError(f"Output character {char} did not appear in the input")
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(",")) != len(operands):
raise ValueError(
"Number of einsum subscripts must be equal to the number of operands."
)
return (input_subscripts, output_subscript, operands)
| ivy/ivy/utils/einsum_path_helpers.py/0 | {
"file_path": "ivy/ivy/utils/einsum_path_helpers.py",
"repo_id": "ivy",
"token_count": 8877
} | 51 |
# general
import json
import os
import pytest
import importlib
import inspect
import functools
from typing import List, Optional
from hypothesis import given, strategies as st, example
# local
import ivy.functional.frontends.numpy as np_frontend
from .hypothesis_helpers import number_helpers as nh
from .globals import TestData
from . import test_parameter_flags as pf
from . import test_globals as t_globals
from .pipeline_helper import BackendHandler
from ivy_tests.test_ivy.helpers.test_parameter_flags import (
DynamicFlag,
BuiltInstanceStrategy,
BuiltAsVariableStrategy,
BuiltNativeArrayStrategy,
BuiltGradientStrategy,
BuiltContainerStrategy,
BuiltWithOutStrategy,
BuiltWithCopyStrategy,
BuiltInplaceStrategy,
BuiltTraceStrategy,
BuiltFrontendArrayStrategy,
BuiltTranspileStrategy,
BuiltPrecisionModeStrategy,
BuiltCythonWrapperStrategy,
)
from ivy_tests.test_ivy.helpers.structs import FrontendMethodData
from ivy_tests.test_ivy.helpers.available_frameworks import available_frameworks
from ivy_tests.test_ivy.helpers.hypothesis_helpers.dtype_helpers import (
_dtype_kind_keys,
_get_type_dict,
)
from .globals import mod_backend
cmd_line_args = (
"with_out",
"instance_method",
"test_gradients",
"test_trace",
"precision_mode",
)
cmd_line_args_lists = (
"as_variable",
"native_array",
"container",
)
def _get_runtime_flag_value(flag):
return flag.strategy if isinstance(flag, DynamicFlag) else flag
@st.composite
def num_positional_args_method(draw, *, method):
"""Draws an integers randomly from the minimum and maximum number of
positional arguments a given method can take.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
method
callable method
Returns
-------
A strategy that can be used in the @given hypothesis decorator.
"""
total, num_positional_only, num_keyword_only = (0, 0, 0)
for param in inspect.signature(method).parameters.values():
if param.name == "self":
continue
total += 1
if param.kind == param.POSITIONAL_ONLY:
num_positional_only += 1
elif param.kind in [param.KEYWORD_ONLY, param.VAR_KEYWORD]:
num_keyword_only += 1
return draw(
nh.ints(min_value=num_positional_only, max_value=(total - num_keyword_only))
)
@st.composite
def num_positional_args(draw, *, fn_name: Optional[str] = None):
"""Draws an integers randomly from the minimum and maximum number of
positional arguments a given function can take.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a
given data-set (ex. list).
fn_name
name of the function.
Returns
-------
A strategy that can be used in the @given hypothesis decorator.
Examples
--------
@given(
num_positional_args=num_positional_args(fn_name="floor_divide")
)
@given(
num_positional_args=num_positional_args(fn_name="add")
)
"""
if mod_backend[t_globals.CURRENT_BACKEND]:
proc, input_queue, output_queue = mod_backend[t_globals.CURRENT_BACKEND]
input_queue.put(
("num_positional_args_helper", fn_name, t_globals.CURRENT_BACKEND)
)
num_positional_only, total, num_keyword_only = output_queue.get()
else:
num_positional_only, total, num_keyword_only = num_positional_args_helper(
fn_name, t_globals.CURRENT_BACKEND
)
return draw(
nh.ints(min_value=num_positional_only, max_value=(total - num_keyword_only))
)
def num_positional_args_helper(fn_name, backend):
num_positional_only = 0
num_keyword_only = 0
total = 0
fn = None
with BackendHandler.update_backend(backend) as ivy_backend:
ivy_backend.utils.dynamic_import.import_module(fn_name.rpartition(".")[0])
for i, fn_name_key in enumerate(fn_name.split(".")):
if i == 0:
fn = ivy_backend.__dict__[fn_name_key]
else:
fn = fn.__dict__[fn_name_key]
for param in inspect.signature(fn).parameters.values():
if param.name == "self":
continue
total += 1
if param.kind == param.POSITIONAL_ONLY:
num_positional_only += 1
elif param.kind in [param.KEYWORD_ONLY, param.VAR_KEYWORD]:
num_keyword_only += 1
return num_positional_only, total, num_keyword_only
# Decorators helpers
def _import_fn(fn_tree: str):
"""Import a function from function tree string.
Parameters
----------
fn_tree
Full function tree without "ivy" root
example: "functional.backends.jax.creation.arange".
Returns
-------
Returns fn_name, imported module, callable function
"""
split_index = fn_tree.rfind(".")
fn_name = fn_tree[split_index + 1 :]
module_to_import = fn_tree[:split_index]
mod = importlib.import_module(module_to_import)
try:
callable_fn = mod.__dict__[fn_name]
except KeyError:
raise ImportError(
f"Error: The function '{fn_name}' could not be found within the module"
f" '{module_to_import}'.\nPlease double-check the function name and its"
" associated path.\nIf this function is a new feature you'd like to see,"
" we'd love to hear from you! You can contribute to our project. For more"
" details, please"
" visit:\nhttps://lets-unify.ai/ivy/contributing/open_tasks.html\n"
)
return callable_fn, fn_name, module_to_import
def _get_method_supported_devices_dtypes_helper(
method_name: str, class_module: str, class_name: str, backend_str: str
):
# helper to delegate backend related
# computation outside the main function
# so as to ease multiprocessing
with BackendHandler.update_backend(backend_str) as backend:
_fn = getattr(class_module.__dict__[class_name], method_name)
devices_and_dtypes = backend.function_supported_devices_and_dtypes(_fn)
organized_dtypes = {}
for device in devices_and_dtypes.keys():
organized_dtypes[device] = _partition_dtypes_into_kinds(
backend_str, devices_and_dtypes[device]
)
return organized_dtypes
def _get_method_supported_devices_dtypes(
method_name: str, class_module: str, class_name: str
):
"""Get supported devices and data types for a method in Ivy API.
Parameters
----------
method_name
Name of the method in the class
class_module
Name of the class module
class_name
Name of the class
Returns
-------
Returns a dictionary containing supported device types and its supported data types
for the method
"""
supported_device_dtypes = {}
for backend_str in available_frameworks:
if mod_backend[backend_str]:
# we gotta do this using multiprocessing
proc, input_queue, output_queue = mod_backend[backend_str]
input_queue.put(
(
"method supported dtypes",
method_name,
class_module.__name__,
class_name,
backend_str,
)
)
supported_device_dtypes[backend_str] = output_queue.get()
else:
supported_device_dtypes[backend_str] = (
_get_method_supported_devices_dtypes_helper(
method_name, class_module, class_name, backend_str
)
)
return supported_device_dtypes
def _get_supported_devices_dtypes_helper(
backend_str: str, fn_module: str, fn_name: str
):
# helper function so as to ease multiprocessing
with BackendHandler.update_backend(backend_str) as backend:
_tmp_mod = importlib.import_module(fn_module) # TODO use dynamic import?
_fn = _tmp_mod.__dict__[fn_name]
devices_and_dtypes = backend.function_supported_devices_and_dtypes(_fn)
try:
# Issue with bfloat16 and tensorflow
if "bfloat16" in devices_and_dtypes["gpu"]:
tmp = list(devices_and_dtypes["gpu"])
tmp.remove("bfloat16")
devices_and_dtypes["gpu"] = tuple(tmp)
except KeyError:
pass
organized_dtypes = {}
for device in devices_and_dtypes.keys():
organized_dtypes[device] = _partition_dtypes_into_kinds(
backend_str, devices_and_dtypes[device]
)
return organized_dtypes
def _get_supported_devices_dtypes(fn_name: str, fn_module: str):
"""Get supported devices and data types for a function in Ivy API.
Parameters
----------
fn_name
Name of the function
fn_module
Full import path of the function module
Returns
-------
Returns a dictionary containing supported device types and its supported data types
for the function
"""
supported_device_dtypes = {}
# This is for getting a private function from numpy frontend where we have
# a ufunc object as we can't refer to them as functions
if fn_module == "ivy.functional.frontends.numpy":
fn_module_ = np_frontend
if isinstance(getattr(fn_module_, fn_name), fn_module_.ufunc):
fn_name = f"_{fn_name}"
for backend_str in available_frameworks:
if mod_backend[backend_str]:
# we know we need to use multiprocessing
# to get the devices and dtypes
proc, input_queue, output_queue = mod_backend[backend_str]
input_queue.put(("supported dtypes", fn_module, fn_name, backend_str))
supported_device_dtypes[backend_str] = output_queue.get()
else:
supported_device_dtypes[backend_str] = _get_supported_devices_dtypes_helper(
backend_str, fn_module, fn_name
)
return supported_device_dtypes
def _partition_dtypes_into_kinds(framework: str, dtypes):
partitioned_dtypes = {}
for kind in _dtype_kind_keys:
partitioned_dtypes[kind] = set(_get_type_dict(framework, kind)).intersection(
dtypes
)
return partitioned_dtypes
# Decorators
def handle_test(
*,
fn_tree: Optional[str] = None,
ground_truth_backend: str = "tensorflow",
number_positional_args=None,
test_instance_method=BuiltInstanceStrategy,
test_with_out=BuiltWithOutStrategy,
test_with_copy=BuiltWithCopyStrategy,
test_gradients=BuiltGradientStrategy,
test_trace=BuiltTraceStrategy,
transpile=BuiltTranspileStrategy,
precision_mode=BuiltPrecisionModeStrategy,
as_variable_flags=BuiltAsVariableStrategy,
native_array_flags=BuiltNativeArrayStrategy,
container_flags=BuiltContainerStrategy,
test_cython_wrapper=BuiltCythonWrapperStrategy,
**_given_kwargs,
):
"""Test wrapper for Ivy functions.
The wrapper sets the required test globals and creates test flags strategies.
Parameters
----------
fn_tree
Full function import path
ground_truth_backend
The framework to assert test results are equal to
number_positional_args
A search strategy for determining the number of positional arguments to be
passed to the function
test_instance_method
A search strategy that generates a boolean to test instance methods
test_with_out
A search strategy that generates a boolean to test the function with an `out`
parameter
test_with_copy
A search strategy that generates a boolean to test the function with an `copy`
parameter
test_gradients
A search strategy that generates a boolean to test the function with arrays as
gradients
test_trace
A search strategy that generates a boolean to trace and test the
function
precision_mode
A search strategy that generates a boolean to switch between two different
precision modes supported by numpy and (torch, jax) and test the function
as_variable_flags
A search strategy that generates a list of boolean flags for array inputs to be
passed as a Variable array
native_array_flags
A search strategy that generates a list of boolean flags for array inputs to be
passed as a native array
container_flags
A search strategy that generates a list of boolean flags for array inputs to be
passed as a Container
"""
is_fn_tree_provided = fn_tree is not None
if is_fn_tree_provided:
fn_tree = f"ivy.{fn_tree}"
is_hypothesis_test = len(_given_kwargs) != 0
possible_arguments = {}
if is_hypothesis_test and is_fn_tree_provided:
# Use the default strategy
if number_positional_args is None:
number_positional_args = num_positional_args(fn_name=fn_tree)
# Generate the test flags strategy
possible_arguments["test_flags"] = pf.function_flags(
ground_truth_backend=st.just(ground_truth_backend),
num_positional_args=number_positional_args,
instance_method=_get_runtime_flag_value(test_instance_method),
with_out=_get_runtime_flag_value(test_with_out),
with_copy=_get_runtime_flag_value(test_with_copy),
test_gradients=_get_runtime_flag_value(test_gradients),
test_trace=_get_runtime_flag_value(test_trace),
transpile=_get_runtime_flag_value(transpile),
as_variable=_get_runtime_flag_value(as_variable_flags),
native_arrays=_get_runtime_flag_value(native_array_flags),
container_flags=_get_runtime_flag_value(container_flags),
precision_mode=_get_runtime_flag_value(precision_mode),
test_cython_wrapper=_get_runtime_flag_value(test_cython_wrapper),
)
def test_wrapper(test_fn):
if is_fn_tree_provided:
callable_fn, fn_name, fn_mod = _import_fn(fn_tree)
supported_device_dtypes = _get_supported_devices_dtypes(fn_name, fn_mod)
possible_arguments["fn_name"] = st.just(fn_name)
# If a test is not a Hypothesis test, we only set the test global data
if is_hypothesis_test:
param_names = inspect.signature(test_fn).parameters.keys()
# Check if these arguments are being asked for
filtered_args = set(param_names).intersection(possible_arguments.keys())
for key in filtered_args:
_given_kwargs[key] = possible_arguments[key]
# Wrap the test with the @given decorator
hypothesis_test_fn = given(**_given_kwargs)(test_fn)
@functools.wraps(hypothesis_test_fn)
def wrapped_test(*args, **kwargs):
try:
hypothesis_test_fn(*args, **kwargs)
except Exception as e:
# A string matching is used instead of actual exception due to
# exception object in with_backend is different from global Ivy
if e.__class__.__qualname__ == "IvyNotImplementedException":
pytest.skip("Function not implemented in backend.")
else:
raise e
else:
wrapped_test = test_fn
# Set the test data to be used by test helpers
if is_fn_tree_provided:
wrapped_test.test_data = TestData(
test_fn=wrapped_test,
fn_tree=fn_tree,
fn_name=fn_name,
supported_device_dtypes=supported_device_dtypes,
)
wrapped_test._ivy_test = True
wrapped_test.ground_truth_backend = ground_truth_backend
return wrapped_test
return test_wrapper
def handle_frontend_test(
*,
fn_tree: str,
gt_fn_tree: Optional[str] = None,
aliases: Optional[List[str]] = None,
number_positional_args=None,
test_with_out=BuiltWithOutStrategy,
test_with_copy=BuiltWithCopyStrategy,
test_inplace=BuiltInplaceStrategy,
as_variable_flags=BuiltAsVariableStrategy,
native_array_flags=BuiltNativeArrayStrategy,
test_trace=BuiltTraceStrategy,
generate_frontend_arrays=BuiltFrontendArrayStrategy,
transpile=BuiltTranspileStrategy,
precision_mode=BuiltPrecisionModeStrategy,
**_given_kwargs,
):
"""Test wrapper for Ivy frontend functions.
The wrapper sets the required test globals and creates test flags strategies.
Parameters
----------
fn_tree
Full function import path
gt_fn_tree
Full function import path for the ground truth function, by default will be
the same as fn_tree
number_positional_args
A search strategy for determining the number of positional arguments to be
passed to the function
test_inplace
A search strategy that generates a boolean to test the method with `inplace`
update
test_with_out
A search strategy that generates a boolean to test the function with an `out`
parameter
test_with_copy
A search strategy that generates a boolean to test the function with an `copy`
parameter
precision_mode
A search strategy that generates a boolean to switch between two different
precision modes supported by numpy and (torch, jax) and test the function
as_variable_flags
A search strategy that generates a list of boolean flags for array inputs to be
passed as a Variable array
native_array_flags
A search strategy that generates a list of boolean flags for array inputs to be
passed as a native array
test_trace
A search strategy that generates a boolean to trace and test the
function
generate_frontend_arrays
A search strategy that generates a list of boolean flags for array inputs to
be frontend array
"""
fn_tree = f"ivy.functional.frontends.{fn_tree}"
if aliases is not None:
for i in range(len(aliases)):
aliases[i] = f"ivy.functional.frontends.{aliases[i]}"
is_hypothesis_test = len(_given_kwargs) != 0
if is_hypothesis_test:
# Use the default strategy
if number_positional_args is None:
number_positional_args = num_positional_args(fn_name=fn_tree)
# Generate the test flags strategy
test_flags = pf.frontend_function_flags(
num_positional_args=number_positional_args,
with_out=_get_runtime_flag_value(test_with_out),
with_copy=_get_runtime_flag_value(test_with_copy),
inplace=_get_runtime_flag_value(test_inplace),
as_variable=_get_runtime_flag_value(as_variable_flags),
native_arrays=_get_runtime_flag_value(native_array_flags),
test_trace=_get_runtime_flag_value(test_trace),
generate_frontend_arrays=_get_runtime_flag_value(generate_frontend_arrays),
transpile=_get_runtime_flag_value(transpile),
precision_mode=_get_runtime_flag_value(precision_mode),
)
def test_wrapper(test_fn):
callable_fn, fn_name, fn_mod = _import_fn(fn_tree)
supported_device_dtypes = _get_supported_devices_dtypes(fn_name, fn_mod)
# If a test is not a Hypothesis test, we only set the test global data
if is_hypothesis_test:
param_names = inspect.signature(test_fn).parameters.keys()
# Check if these arguments are being asked for
possible_arguments = {
"test_flags": test_flags,
"fn_tree": (
st.sampled_from([fn_tree] + aliases)
if aliases is not None
else st.just(fn_tree)
),
"gt_fn_tree": st.just(gt_fn_tree),
}
filtered_args = set(param_names).intersection(possible_arguments.keys())
for key in filtered_args:
# extend Hypothesis given kwargs with our strategies
_given_kwargs[key] = possible_arguments[key]
# Wrap the test with the @given decorator
hypothesis_test_fn = given(**_given_kwargs)(test_fn)
@functools.wraps(hypothesis_test_fn)
def wrapped_test(*args, **kwargs):
try:
hypothesis_test_fn(*args, **kwargs)
except Exception as e:
# A string matching is used instead of actual exception due to
# exception object in with_backend is different from global Ivy
if e.__class__.__qualname__ == "IvyNotImplementedException":
pytest.skip("Function not implemented in backend.")
else:
raise e
else:
wrapped_test = test_fn
wrapped_test.test_data = TestData(
test_fn=wrapped_test,
fn_tree=fn_tree,
fn_name=fn_name,
supported_device_dtypes=supported_device_dtypes,
)
return wrapped_test
return test_wrapper
def _import_method(method_tree: str):
split_index = method_tree.rfind(".")
class_tree, method_name = method_tree[:split_index], method_tree[split_index + 1 :]
split_index = class_tree.rfind(".")
mod_to_import, class_name = class_tree[:split_index], class_tree[split_index + 1 :]
_mod = importlib.import_module(mod_to_import)
_class = _mod.__getattribute__(class_name)
_method = getattr(_class, method_name)
return _method, method_name, _class, class_name, _mod
def handle_method(
*,
init_tree: str = "",
method_tree: Optional[str] = None,
ground_truth_backend: str = "tensorflow",
test_gradients=BuiltGradientStrategy,
test_trace=BuiltTraceStrategy,
precision_mode=BuiltPrecisionModeStrategy,
init_num_positional_args=None,
init_native_arrays=BuiltNativeArrayStrategy,
init_as_variable_flags=BuiltAsVariableStrategy,
method_num_positional_args=None,
method_native_arrays=BuiltNativeArrayStrategy,
method_as_variable_flags=BuiltAsVariableStrategy,
method_container_flags=BuiltContainerStrategy,
**_given_kwargs,
):
"""Test wrapper for Ivy methods.
The wrapper sets the required test globals and creates test flags strategies.
Parameters
----------
method_tree
Full method import path
ground_truth_backend
The framework to assert test results are equal to
"""
# need to fill up the docstring
is_method_tree_provided = method_tree is not None
if is_method_tree_provided:
method_tree = f"ivy.{method_tree}"
is_hypothesis_test = len(_given_kwargs) != 0
possible_arguments = {
"ground_truth_backend": st.just(ground_truth_backend),
"test_gradients": _get_runtime_flag_value(test_gradients),
"test_trace": _get_runtime_flag_value(test_trace),
"precision_mode": _get_runtime_flag_value(precision_mode),
}
if is_hypothesis_test and is_method_tree_provided:
callable_method, method_name, _, class_name, method_mod = _import_method(
method_tree
)
if init_num_positional_args is None:
init_num_positional_args = num_positional_args(fn_name=init_tree)
possible_arguments["init_flags"] = pf.init_method_flags(
num_positional_args=init_num_positional_args,
as_variable=_get_runtime_flag_value(init_as_variable_flags),
native_arrays=_get_runtime_flag_value(init_native_arrays),
precision_mode=_get_runtime_flag_value(precision_mode),
)
if method_num_positional_args is None:
method_num_positional_args = num_positional_args_method(
method=callable_method
)
possible_arguments["method_flags"] = pf.method_flags(
num_positional_args=method_num_positional_args,
as_variable=_get_runtime_flag_value(method_as_variable_flags),
native_arrays=_get_runtime_flag_value(method_native_arrays),
container_flags=_get_runtime_flag_value(method_container_flags),
precision_mode=_get_runtime_flag_value(precision_mode),
)
def test_wrapper(test_fn):
if is_method_tree_provided:
supported_device_dtypes = _get_method_supported_devices_dtypes(
method_name, method_mod, class_name
)
possible_arguments["class_name"] = st.just(class_name)
possible_arguments["method_name"] = st.just(method_name)
if is_hypothesis_test:
param_names = inspect.signature(test_fn).parameters.keys()
filtered_args = set(param_names).intersection(possible_arguments.keys())
for key in filtered_args:
# extend Hypothesis given kwargs with our strategies
_given_kwargs[key] = possible_arguments[key]
hypothesis_test_fn = given(**_given_kwargs)(test_fn)
@functools.wraps(hypothesis_test_fn)
def wrapped_test(*args, **kwargs):
try:
hypothesis_test_fn(*args, **kwargs)
except Exception as e:
# A string matching is used instead of actual exception due to
# exception object in with_backend is different from global Ivy
if e.__class__.__qualname__ == "IvyNotImplementedException":
pytest.skip("Function not implemented in backend.")
else:
raise e
else:
wrapped_test = test_fn
wrapped_test.test_data = TestData(
test_fn=wrapped_test,
fn_tree=method_tree,
fn_name=method_name,
supported_device_dtypes=supported_device_dtypes,
is_method=True,
)
wrapped_test.ground_truth_backend = ground_truth_backend
wrapped_test._ivy_test = True
return wrapped_test
return test_wrapper
def handle_frontend_method(
*,
class_tree: str,
init_tree: str,
method_name: str,
init_num_positional_args=None,
init_native_arrays=BuiltNativeArrayStrategy,
init_as_variable_flags=BuiltAsVariableStrategy,
test_trace=BuiltTraceStrategy,
precision_mode=BuiltPrecisionModeStrategy,
method_num_positional_args=None,
method_native_arrays=BuiltNativeArrayStrategy,
method_as_variable_flags=BuiltAsVariableStrategy,
test_inplace=BuiltInplaceStrategy,
generate_frontend_arrays=BuiltFrontendArrayStrategy,
**_given_kwargs,
):
"""Test wrapper for Ivy frontends methods.
The wrapper sets the required test globals and creates
test flags strategies.
Parameters
----------
class_tree
Full class import path
init_tree
Full import path for the function used to create the class
method_name
Name of the method
init_num_positional_args
A search strategy that generates a number of positional arguments
to be passed during instantiation of the class
init_native_arrays
A search strategy that generates a boolean to test the method with native
arrays
init_as_variable_flags
A search strategy that generates a list of boolean flags for array inputs to be
passed as a Variable array
test_compile
A search strategy that generates a boolean to graph compile and test the
function
precision_mode
A search strategy that generates a boolean to switch between two different
precision modes supported by numpy and (torch, jax) and test the function
method_num_positional_args
A search strategy that generates a number of positional arguments
to be passed during call of the class method
method_native_arrays
A search strategy that generates a boolean to test the method with native
arrays
method_as_variable_flags
A search strategy that generates a list of boolean flags for array inputs to be
passed as a Variable array
test_inplace
A search strategy that generates a boolean to test the method with `inplace`
update
"""
split_index = init_tree.rfind(".")
framework_init_module = init_tree[:split_index]
ivy_init_module = f"ivy.functional.frontends.{init_tree[:split_index]}"
init_name = init_tree[split_index + 1 :]
init_tree = f"ivy.functional.frontends.{init_tree}"
is_hypothesis_test = len(_given_kwargs) != 0
split_index = class_tree.rfind(".")
class_module_path, class_name = (
class_tree[:split_index],
class_tree[split_index + 1 :],
)
class_module = importlib.import_module(class_module_path)
method_class = getattr(class_module, class_name)
if is_hypothesis_test:
callable_method = getattr(method_class, method_name)
if init_num_positional_args is None:
init_num_positional_args = num_positional_args(fn_name=init_tree)
if method_num_positional_args is None:
method_num_positional_args = num_positional_args_method(
method=callable_method
)
def test_wrapper(test_fn):
supported_device_dtypes = _get_method_supported_devices_dtypes(
method_name, class_module, class_name
)
if is_hypothesis_test:
param_names = inspect.signature(test_fn).parameters.keys()
init_flags = pf.frontend_init_flags(
num_positional_args=init_num_positional_args,
as_variable=_get_runtime_flag_value(init_as_variable_flags),
native_arrays=_get_runtime_flag_value(init_native_arrays),
)
method_flags = pf.frontend_method_flags(
num_positional_args=method_num_positional_args,
inplace=_get_runtime_flag_value(test_inplace),
as_variable=_get_runtime_flag_value(method_as_variable_flags),
native_arrays=_get_runtime_flag_value(method_native_arrays),
test_trace=_get_runtime_flag_value(test_trace),
precision_mode=_get_runtime_flag_value(precision_mode),
generate_frontend_arrays=_get_runtime_flag_value(
generate_frontend_arrays
),
)
ivy_init_modules = str(ivy_init_module)
framework_init_modules = str(framework_init_module)
frontend_helper_data = FrontendMethodData(
ivy_init_module=ivy_init_modules,
framework_init_module=framework_init_modules,
init_name=init_name,
method_name=method_name,
)
possible_arguments = {
"init_flags": init_flags,
"method_flags": method_flags,
"frontend_method_data": st.just(frontend_helper_data),
}
filtered_args = set(param_names).intersection(possible_arguments.keys())
for key in filtered_args:
# extend Hypothesis given kwargs with our strategies
_given_kwargs[key] = possible_arguments[key]
hypothesis_test_fn = given(**_given_kwargs)(test_fn)
@functools.wraps(hypothesis_test_fn)
def wrapped_test(*args, **kwargs):
try:
hypothesis_test_fn(*args, **kwargs)
except Exception as e:
# A string matching is used instead of actual exception due to
# exception object in with_backend is different from global Ivy
if e.__class__.__qualname__ == "IvyNotImplementedException":
pytest.skip("Function not implemented in backend.")
else:
raise e
else:
wrapped_test = test_fn
wrapped_test.test_data = TestData(
test_fn=wrapped_test,
fn_tree=f"{init_tree}.{method_name}",
fn_name=method_name,
supported_device_dtypes=supported_device_dtypes,
is_method=[method_name, class_tree, split_index],
)
return wrapped_test
return test_wrapper
@st.composite
def seed(draw):
return draw(st.integers(min_value=0, max_value=2**8 - 1))
def _create_transpile_report(
data: dict, backend: str, file_name: str, is_backend: bool = False
):
backend_specific_data = ["nodes", "time", "args", "kwargs"]
# json report exists already
if os.path.isfile(file_name):
with open(file_name, "r") as outfile:
# Load the file's existing data
file_data = json.load(outfile)
if file_data["nodes"].get(backend, 0) > data["nodes"]:
return
# that are backend specific
for key in backend_specific_data:
file_data[key][backend] = data[key]
if not is_backend:
# not backend specific
for key in ["ivy_nodes", "fw_time"]:
file_data[key] = data[key]
json_object = json.dumps(file_data, indent=6)
with open(file_name, "w") as outfile:
outfile.write(json_object)
return
# create new json report
for key in backend_specific_data:
data[key] = {backend: data[key]}
json_object = json.dumps(data, indent=6)
with open(file_name, "w") as outfile:
outfile.write(json_object)
def handle_example(
*,
test_example: bool = False,
test_frontend_example: bool = False,
test_method_example: bool = False,
test_frontend_method_example: bool = False,
**given_kwargs,
):
if test_example:
test_flags = given_kwargs.get("test_flags", {})
flags = pf.FunctionTestFlags(
ground_truth_backend=test_flags.get("ground_truth_backend", "numpy"),
num_positional_args=test_flags.get("num_positional_args", 0),
instance_method=test_flags.get("instance_method", False),
with_out=test_flags.get("with_out", False),
with_copy=test_flags.get("with_copy", False),
test_gradients=test_flags.get("test_gradients", False),
test_trace=test_flags.get("test_trace", False),
transpile=test_flags.get("transpile", False),
as_variable=test_flags.get("as_variable", [False]),
native_arrays=test_flags.get("native_arrays", [False]),
container=test_flags.get("container", [False]),
precision_mode=test_flags.get("precision_mode", False),
test_cython_wrapper=test_flags.get("test_cython_wrapper", False),
)
given_kwargs["test_flags"] = flags
elif test_frontend_example:
test_flags = given_kwargs.get("test_flags", {})
flags = pf.FrontendFunctionTestFlags(
num_positional_args=test_flags.get("num_positional_args", 0),
with_out=test_flags.get("with_out", False),
with_copy=test_flags.get("with_copy", False),
inplace=test_flags.get("inplace", False),
as_variable=test_flags.get("as_variable", [False]),
native_arrays=test_flags.get("native_arrays", [False]),
test_trace=test_flags.get("test_trace", False),
generate_frontend_arrays=test_flags.get("generate_frontend_arrays", False),
transpile=test_flags.get("transpile", False),
precision_mode=test_flags.get("precision_mode", False),
)
given_kwargs["test_flags"] = flags
elif test_method_example:
method_flags = given_kwargs.get("method_flags", {})
init_flags = given_kwargs.get("init_flags", {})
flags_1 = pf.MethodTestFlags(
num_positional_args=method_flags.get("num_positional_args", 0),
as_variable=method_flags.get("as_variable", [False]),
native_arrays=method_flags.get("native_arrays", [False]),
container_flags=method_flags.get("container", [False]),
precision_mode=method_flags.get("precision_mode", False),
)
flags_2 = pf.InitMethodTestFlags(
num_positional_args=init_flags.get("num_positional_args", 0),
as_variable=init_flags.get("as_variable", [False]),
native_arrays=init_flags.get("native_arrays", [False]),
precision_mode=init_flags.get("precision_mode", False),
)
given_kwargs["method_flags"] = flags_1
given_kwargs["init_flags"] = flags_2
elif test_frontend_method_example:
method_flags = given_kwargs.get("method_flags", {})
init_flags = given_kwargs.get("init_flags", {})
flags_1 = pf.FrontendMethodTestFlags(
num_positional_args=method_flags.get("num_positional_args", 0),
as_variable=method_flags.get("as_variable", [False]),
native_arrays=method_flags.get("native_arrays", [False]),
precision_mode=method_flags.get("precision_mode", False),
inplace=method_flags.get("inplace", False),
test_trace=method_flags.get("test_trace", False),
generate_frontend_arrays=method_flags.get(
"generate_frontend_arrays", False
),
)
flags_2 = pf.FrontendInitTestFlags(
num_positional_args=init_flags.get("num_positional_args", 0),
as_variable=init_flags.get("as_variable", [False]),
native_arrays=init_flags.get("native_arrays", [False]),
)
given_kwargs["method_flags"] = flags_1
given_kwargs["init_flags"] = flags_2
def test_wrapper(test_fn):
hypothesis_test_fn = example(**given_kwargs)(test_fn)
@functools.wraps(hypothesis_test_fn)
def wrapped_test(*args, **kwargs):
try:
hypothesis_test_fn(*args, **kwargs)
except Exception as e:
# A string matching is used instead of actual exception due to
# exception object in with_backend is different from global Ivy
if e.__class__.__qualname__ == "IvyNotImplementedException":
pytest.skip("Function not implemented in backend.")
else:
raise e
return wrapped_test
return test_wrapper
| ivy/ivy_tests/test_ivy/helpers/testing_helpers.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/helpers/testing_helpers.py",
"repo_id": "ivy",
"token_count": 16654
} | 52 |
# import jax
from ivy_tests.test_ivy.test_frontends import NativeClass
jax_classes_to_ivy_classes = {}
def convjax(argument):
"""Convert NativeClass in argument to ivy frontend counterpart for jax."""
if isinstance(argument, NativeClass):
return jax_classes_to_ivy_classes.get(argument._native_class)
return argument
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/__init__.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/__init__.py",
"repo_id": "ivy",
"token_count": 118
} | 53 |
from hypothesis import strategies as st
from ivy_tests.test_ivy.test_frontends.test_numpy.test_creation_routines.test_from_shape_or_value import ( # noqa : E501
_input_fill_and_dtype,
)
from ivy_tests.test_ivy.test_functional.test_core.test_creation import (
_get_dtype_buffer_count_offset,
)
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _get_dtype_and_range(draw):
dim = draw(helpers.ints(min_value=2, max_value=5))
dtype = draw(helpers.get_dtypes("float", index=1, full=False))
start = draw(
helpers.array_values(
dtype=dtype[0],
shape=(dim,),
min_value=-50,
max_value=0,
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
)
)
stop = draw(
helpers.array_values(
dtype=dtype[0],
shape=(dim,),
min_value=1,
max_value=50,
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
)
)
return dtype * 2, start, stop
# --- Main --- #
# ------------ #
# arange
@handle_frontend_test(
fn_tree="jax.numpy.arange",
start=st.integers(min_value=-100, max_value=100),
stop=st.integers(min_value=-100, max_value=100) | st.none(),
step=st.integers(min_value=-100, max_value=100).filter(lambda x: x != 0),
dtype=helpers.get_dtypes("numeric", full=False),
test_with_out=st.just(False),
)
def test_jax_arange(
*,
start,
stop,
step,
dtype,
on_device,
fn_tree,
test_flags,
frontend,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
start=start,
stop=stop,
step=step,
dtype=dtype[0],
)
@handle_frontend_test(
fn_tree="jax.numpy.array",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=st.integers(min_value=1, max_value=10),
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
shared_dtype=True,
),
as_list=st.booleans(),
copy=st.booleans(),
ndmin=helpers.ints(min_value=0, max_value=9),
test_with_out=st.just(True),
test_with_copy=st.just(True),
)
def test_jax_array(
*,
dtype_and_x,
as_list,
copy,
ndmin,
on_device,
fn_tree,
test_flags,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
if as_list:
if isinstance(x, list) and "complex" not in input_dtype[0]:
x = [list(i) if len(i.shape) > 0 else [float(i)] for i in x]
else:
x = list(x)
else:
if len(x) == 1:
x = x[0]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
object=x,
dtype=input_dtype[0],
copy=copy,
order="K",
ndmin=ndmin,
)
# asarray
@handle_frontend_test(
fn_tree="jax.numpy.asarray",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
test_with_out=st.just(False),
)
def test_jax_asarray(
dtype_and_a,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a,
dtype=dtype[0],
)
# bool_
@handle_frontend_test(
fn_tree="jax.numpy.bool_",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("bool")),
)
def test_jax_bool_(
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.numpy.cdouble",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("complex")
),
)
def test_jax_cdouble(
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.numpy.compress",
dtype_arr_ax=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=10,
max_dim_size=100,
valid_axis=True,
force_int_axis=True,
),
condition=helpers.array_values(
dtype=helpers.get_dtypes("bool"),
shape=helpers.get_shape(
min_num_dims=1, max_num_dims=1, min_dim_size=1, max_dim_size=5
),
),
)
def test_jax_compress(
dtype_arr_ax,
condition,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, arr, ax = dtype_arr_ax
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
condition=condition,
a=arr[0],
axis=ax,
)
# copy
@handle_frontend_test(
fn_tree="jax.numpy.copy",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_jax_copy(
dtype_and_a,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a[0],
)
@handle_frontend_test(
fn_tree="jax.numpy.csingle",
aliases=["jax.numpy.complex64"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
)
def test_jax_csingle(
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# double
@handle_frontend_test(
fn_tree="jax.numpy.double",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
)
def test_jax_double(
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# empty
@handle_frontend_test(
fn_tree="jax.numpy.empty",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_jax_empty(
shape,
dtype,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
shape=shape,
dtype=dtype[0],
)
# empty_like
@handle_frontend_test(
fn_tree="jax.numpy.empty_like",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
shape=helpers.get_shape(
allow_none=True,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_jax_empty_like(
dtype_and_x,
shape,
dtype,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
prototype=x[0],
dtype=dtype[0],
shape=shape,
)
# eye
@handle_frontend_test(
fn_tree="jax.numpy.eye",
n=helpers.ints(min_value=3, max_value=10),
m=st.none() | helpers.ints(min_value=3, max_value=10),
k=helpers.ints(min_value=-2, max_value=2),
dtypes=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_jax_eye(
*,
n,
m,
k,
dtypes,
on_device,
fn_tree,
test_flags,
frontend,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
N=n,
M=m,
k=k,
dtype=dtypes[0],
)
# from_dlpack
@handle_frontend_test(
fn_tree="jax.numpy.from_dlpack",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
)
def test_jax_from_dlpack(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
x=x[0],
backend_to_test=backend_fw,
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
)
@handle_frontend_test(
fn_tree="jax.numpy.frombuffer",
dtype_buffer_count_offset=_get_dtype_buffer_count_offset(),
test_with_out=st.just(False),
)
def test_jax_frombuffer(
*,
dtype_buffer_count_offset,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, buffer, count, offset = dtype_buffer_count_offset
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
buffer=buffer,
dtype=input_dtype[0],
count=count,
offset=offset,
)
# full
@handle_frontend_test(
fn_tree="jax.numpy.full",
shape=helpers.get_shape(
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
input_fill_dtype=_input_fill_and_dtype(),
test_with_out=st.just(False),
)
def test_jax_full(
shape,
input_fill_dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, _, fill_value, dtype = input_fill_dtype
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
shape=shape,
fill_value=fill_value,
dtype=dtype,
)
# full_like
@handle_frontend_test(
fn_tree="jax.numpy.full_like",
input_fill_dtype=_input_fill_and_dtype(),
test_with_out=st.just(False),
)
def test_jax_full_like(
input_fill_dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, fill_value, dtype = input_fill_dtype
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
fill_value=fill_value,
dtype=dtype,
)
@handle_frontend_test(
fn_tree="jax.numpy.geomspace",
dtype_start_stop=_get_dtype_and_range(),
num=helpers.ints(min_value=5, max_value=50),
endpoint=st.booleans(),
test_with_out=st.just(False),
)
def test_jax_geomspace(
dtype_start_stop,
num,
endpoint,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, start, stop = dtype_start_stop
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-1,
start=start,
stop=stop,
num=num,
endpoint=endpoint,
dtype=input_dtypes[0],
)
# hstack
@handle_frontend_test(
fn_tree="jax.numpy.hstack",
dtype_and_tup=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shared_dtype=True,
num_arrays=st.integers(min_value=2, max_value=2),
shape=helpers.get_shape(
min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=5
),
),
test_with_out=st.just(False),
)
def test_jax_hstack(
dtype_and_tup,
test_flags,
frontend,
backend_fw,
fn_tree,
):
input_dtype, x = dtype_and_tup
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
tup=x,
)
# identity
@handle_frontend_test(
fn_tree="jax.numpy.identity",
n=helpers.ints(min_value=3, max_value=10),
dtypes=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_jax_identity(
*,
n,
dtypes,
on_device,
fn_tree,
test_flags,
frontend,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
n=n,
dtype=dtypes[0],
)
@handle_frontend_test(
fn_tree="jax.numpy.in1d",
dtype_and_a=helpers.dtype_and_values(min_num_dims=1, max_num_dims=1),
dtype_and_b=helpers.dtype_and_values(min_num_dims=1, max_num_dims=1),
assume_unique=st.booleans(),
invert=st.booleans(),
)
def test_jax_in1d(
*,
dtype_and_a,
dtype_and_b,
assume_unique,
invert,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype_a, a = dtype_and_a
input_dtype_b, b = dtype_and_b
helpers.test_frontend_function(
input_dtypes=input_dtype_a + input_dtype_b,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
ar1=a[0],
ar2=b[0],
assume_unique=assume_unique,
invert=invert,
)
@handle_frontend_test(
fn_tree="jax.numpy.iterable",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_jax_iterable(
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y=x[0],
)
# linspace
@handle_frontend_test(
fn_tree="jax.numpy.linspace",
dtype_start_stop=_get_dtype_and_range(),
num=helpers.ints(min_value=2, max_value=5),
axis=helpers.ints(min_value=-1, max_value=0),
test_with_out=st.just(False),
)
def test_jax_linspace(
dtype_start_stop,
num,
axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, start, stop = dtype_start_stop
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
start=start,
stop=stop,
num=num,
endpoint=True,
retstep=False,
dtype=input_dtypes[0],
axis=axis,
atol=1e-05,
rtol=1e-05,
)
# logspace
@handle_frontend_test(
fn_tree="jax.numpy.logspace",
dtype_start_stop=_get_dtype_and_range(),
num=helpers.ints(min_value=5, max_value=50),
base=helpers.ints(min_value=2, max_value=10),
axis=helpers.ints(min_value=-1, max_value=0),
test_with_out=st.just(False),
)
def test_jax_logspace(
dtype_start_stop,
num,
base,
axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, start, stop = dtype_start_stop
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
start=start,
stop=stop,
num=num,
endpoint=True,
base=base,
dtype=input_dtypes[0],
axis=axis,
)
# meshgrid
@handle_frontend_test(
fn_tree="jax.numpy.meshgrid",
dtype_and_arrays=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=st.integers(min_value=1, max_value=5),
min_num_dims=1,
max_num_dims=1,
shared_dtype=True,
),
sparse=st.booleans(),
indexing=st.sampled_from(["xy", "ij"]),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_jax_meshgrid(
dtype_and_arrays,
sparse,
indexing,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, arrays = dtype_and_arrays
kw = {}
i = 0
for x_ in arrays:
kw[f"x{i}"] = x_
i += 1
test_flags.num_positional_args = len(arrays)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**kw,
sparse=sparse,
indexing=indexing,
)
# ndim
@handle_frontend_test(
fn_tree="jax.numpy.ndim",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
)
def test_jax_ndim(
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
)
# ones
@handle_frontend_test(
fn_tree="jax.numpy.ones",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_jax_ones(
shape,
dtype,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
shape=shape,
dtype=dtype[0],
)
# ones_like
@handle_frontend_test(
fn_tree="jax.numpy.ones_like",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
shape=helpers.get_shape(
allow_none=True,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_jax_ones_like(
dtype_and_x,
shape,
dtype,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
dtype=dtype[0],
shape=shape,
)
@handle_frontend_test(
fn_tree="jax.numpy.setdiff1d",
dtype_and_a=helpers.dtype_and_values(
min_num_dims=1,
max_num_dims=1,
available_dtypes=helpers.get_dtypes("valid"),
),
dtype_and_b=helpers.dtype_and_values(
min_num_dims=1,
max_num_dims=1,
available_dtypes=helpers.get_dtypes("valid"),
),
use_size=st.booleans(),
size=st.integers(min_value=1, max_value=100),
assume_unique=st.booleans(),
)
def test_jax_setdiff1d(
*,
dtype_and_a,
dtype_and_b,
use_size,
size,
assume_unique,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype_a, a = dtype_and_a
input_dtype_b, b = dtype_and_b
if not use_size:
size = None
helpers.test_frontend_function(
input_dtypes=input_dtype_a + input_dtype_b,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
ar1=a[0],
ar2=b[0],
assume_unique=assume_unique,
size=size,
)
# single
@handle_frontend_test(
fn_tree="jax.numpy.single",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
)
def test_jax_single(
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.numpy.size",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
abs_smallest_val=0,
num_arrays=1,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=100,
valid_axis=True,
allow_neg_axes=True,
force_int_axis=True,
),
)
def test_jax_size(
dtype_x_axis,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
)
# triu
@handle_frontend_test(
fn_tree="jax.numpy.triu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
k=helpers.ints(min_value=-10, max_value=10),
test_with_out=st.just(False),
)
def test_jax_triu(
dtype_and_x,
k,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=x[0],
k=k,
)
# vander
@handle_frontend_test(
fn_tree="jax.numpy.vander",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=st.tuples(
st.integers(min_value=1, max_value=5),
),
),
N=st.integers(min_value=0, max_value=5),
increasing=st.booleans(),
)
def test_jax_vander(
*,
dtype_and_x,
N,
increasing,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
N=N,
increasing=increasing,
)
# zeros
@handle_frontend_test(
fn_tree="jax.numpy.zeros",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtypes=helpers.get_dtypes("numeric", full=False),
test_with_out=st.just(False),
)
def test_jax_zeros(
*,
dtypes,
shape,
on_device,
fn_tree,
test_flags,
frontend,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
shape=shape,
dtype=dtypes[0],
)
# zeros_like
@handle_frontend_test(
fn_tree="jax.numpy.zeros_like",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
shape=helpers.get_shape(
allow_none=True,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_jax_zeros_like(
dtype_and_x,
dtype,
shape,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
dtype=dtype[0],
shape=shape,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_creation.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_creation.py",
"repo_id": "ivy",
"token_count": 14229
} | 54 |
# global
from hypothesis import strategies as st
import pytest
import math
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_nn.test_layers import (
_assume_tf_dilation_gt_1,
)
import ivy
# --- Helpers --- #
# --------------- #
def _calculate_same_padding(kernel_size, stride, shape):
padding = tuple(
max(
0,
math.ceil(((shape[i] - 1) * stride[i] + kernel_size[i] - shape[i]) / 2),
)
for i in range(len(kernel_size))
)
if all(kernel_size[i] / 2 >= padding[i] for i in range(len(kernel_size))):
if _is_same_padding(padding, stride, kernel_size, shape):
return padding
return (0, 0)
def _is_same_padding(padding, stride, kernel_size, input_shape):
output_shape = tuple(
(input_shape[i] + 2 * padding[i] - kernel_size[i]) // stride[i] + 1
for i in range(len(padding))
)
return all(
output_shape[i] == math.ceil(input_shape[i] / stride[i])
for i in range(len(padding))
)
def _scale_factor_strategy():
return st.one_of(
st.floats(min_value=0.1, max_value=2.0),
st.tuples(st.floats(min_value=0.1, max_value=2.0)),
st.lists(st.floats(min_value=0.1, max_value=2.0), min_size=3, max_size=3),
)
def _size_and_scale_factor_strategy():
return st.one_of(
st.tuples(_size_strategy(), st.just(None)),
st.tuples(st.just(None), _scale_factor_strategy()),
st.tuples(_size_strategy(), _scale_factor_strategy()),
)
def _size_strategy():
return st.one_of(
st.integers(min_value=1, max_value=10),
st.tuples(st.integers(min_value=1, max_value=10)),
st.lists(st.integers(min_value=1, max_value=10), min_size=3, max_size=3),
)
@st.composite
def _x_and_filters(draw, dim: int = 2):
if not isinstance(dim, int):
dim = draw(dim)
strides = draw(
st.one_of(
st.lists(
st.integers(min_value=1, max_value=3),
min_size=dim,
max_size=dim,
),
st.integers(min_value=1, max_value=3),
)
)
pad_mode = draw(st.sampled_from(["valid", "same", "pad"]))
padding = draw(
st.one_of(
st.integers(min_value=1, max_value=3),
st.lists(st.integers(min_value=1, max_value=2), min_size=dim, max_size=dim),
)
)
batch_size = draw(st.integers(1, 5))
filter_shape = draw(
helpers.get_shape(
min_num_dims=dim, max_num_dims=dim, min_dim_size=1, max_dim_size=5
)
)
dtype = draw(helpers.get_dtypes("float", full=False))
input_channels = draw(st.integers(1, 3))
output_channels = draw(st.integers(1, 3))
group_list = [i for i in range(1, 3)]
group_list = list(filter(lambda x: (input_channels % x == 0), group_list))
fc = draw(st.sampled_from(group_list))
dilations = draw(
st.one_of(
st.lists(
st.integers(min_value=1, max_value=3),
min_size=dim,
max_size=dim,
),
st.integers(min_value=1, max_value=3),
)
)
full_dilations = [dilations] * dim if isinstance(dilations, int) else dilations
x_dim = []
for i in range(dim):
min_x = filter_shape[i] + (filter_shape[i] - 1) * (full_dilations[i] - 1)
x_dim.append(draw(st.integers(min_x, 15)))
x_dim = tuple(x_dim)
output_channels = output_channels * fc
filter_shape = (output_channels, input_channels // fc) + filter_shape
x_shape = (batch_size, input_channels) + x_dim
vals = draw(
helpers.array_values(
dtype=dtype[0],
shape=x_shape,
min_value=0.0,
max_value=1.0,
)
)
filters = draw(
helpers.array_values(
dtype=dtype[0],
shape=filter_shape,
min_value=0.0,
max_value=1.0,
)
)
bias = draw(
helpers.array_values(
dtype=dtype[0],
shape=(output_channels,),
min_value=0.0,
max_value=1.0,
)
)
return dtype, vals, filters, bias, dilations, strides, padding, fc, pad_mode
# --- Main --- #
# ------------ #
# adaptive_avg_pool2d
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.adaptive_avg_pool2d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=4,
max_num_dims=4,
min_dim_size=1,
max_value=100,
min_value=-100,
),
output_size=st.one_of(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=1, max_value=5),
),
helpers.ints(min_value=1, max_value=5),
),
)
def test_mindspore_adaptive_avg_pool2d(
*,
dtype_and_x,
output_size,
test_flags,
frontend,
backend_fw,
on_device,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
on_device=on_device,
fn_tree=fn_tree,
x=x[0],
output_size=output_size,
)
# avg_pool2d
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.avg_pool2d",
dtype_x_k_s=helpers.arrays_for_pooling(
min_dims=4,
max_dims=4,
min_side=1,
max_side=4,
),
pad_mode=st.booleans(),
count_include_pad=st.booleans(),
test_with_out=st.just(False),
)
def test_mindspore_avg_pool2d(
dtype_x_k_s,
count_include_pad,
pad_mode,
*,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x, kernel_size, stride, pad_name = dtype_x_k_s
if len(stride) == 1:
stride = (stride[0], stride[0])
if pad_name == "SAME":
padding = _calculate_same_padding(kernel_size, stride, x[0].shape[2:])
else:
padding = (0, 0)
x[0] = x[0].reshape((x[0].shape[0], x[0].shape[-1], *x[0].shape[1:-1]))
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
kernel_size=kernel_size,
stride=stride,
padding=padding,
pad_mode=pad_mode,
count_include_pad=count_include_pad,
divisor_override=None,
)
# conv1d
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.conv1d",
dtype_vals=_x_and_filters(dim=1),
)
def test_mindspore_conv1d(
*,
dtype_vals,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vals, weight, bias, dilations, strides, padding, fc, pad_mode = dtype_vals
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=vals,
weight=weight,
bias=bias,
stride=strides,
padding=padding,
dilation=dilations,
groups=fc,
pad_mode=pad_mode,
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.conv2d",
dtype_vals=_x_and_filters(dim=2),
)
def test_mindspore_conv2d(
*,
dtype_vals,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vals, weight, bias, dilations, strides, padding, fc, pad_mode = dtype_vals
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=vals,
weight=weight,
bias=bias,
stride=strides,
padding=padding,
dilation=dilations,
groups=fc,
pad_mode=pad_mode,
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.conv3d",
dtype_vals=_x_and_filters(dim=3),
)
def test_mindspore_conv3d(
*,
dtype_vals,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vals, weight, bias, dilations, strides, padding, fc, pad_mode = dtype_vals
# ToDo: Enable gradient tests for dilations > 1 when tensorflow supports it.
_assume_tf_dilation_gt_1(ivy.current_backend_str(), on_device, dilations)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=vals,
weight=weight,
bias=bias,
stride=strides,
padding=padding,
dilation=dilations,
groups=fc,
pad_mode=pad_mode,
)
# dropout2d
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.dropout2d",
d_type_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
shared_dtype=True,
min_value=2,
max_value=5,
min_dim_size=4,
shape=(
st.integers(min_value=2, max_value=10),
4,
st.integers(min_value=12, max_value=64),
st.integers(min_value=12, max_value=64),
),
),
p=st.floats(min_value=0.0, max_value=1.0),
training=st.booleans(),
)
def test_mindspore_dropout2d(
*,
d_type_and_x,
p,
training,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
dtype, x = d_type_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
p=p,
training=training,
)
# dropout3d
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.dropout3d",
d_type_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
shared_dtype=True,
min_value=2,
max_value=5,
min_dim_size=5,
shape=(
st.integers(min_value=2, max_value=10),
st.integers(min_value=12, max_value=64),
st.integers(min_value=12, max_value=64),
st.integers(min_value=12, max_value=64),
),
),
p=st.floats(min_value=0.0, max_value=1.0),
training=st.booleans(),
)
def test_mindspore_dropout3d(
*,
d_type_and_x,
p,
training,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
dtype, x = d_type_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
p=p,
training=training,
)
# FastGelu
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.fast_gelu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_mindspore_fast_gelu(
dtype_and_x,
*,
test_flags,
frontend,
backend_fw,
on_device,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
input=x[0],
)
# flatten
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.flatten",
dtype_input_axes=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
valid_axis=True,
min_num_dims=1,
min_axes_size=2,
max_axes_size=2,
),
)
def test_mindspore_flatten(
*,
dtype_input_axes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input, axes = dtype_input_axes
if isinstance(axes, int):
start_dim = axes
end_dim = -1
else:
start_dim = axes[0]
end_dim = axes[1]
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
order="C",
start_dim=start_dim,
end_dim=end_dim,
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.interpolate",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
shared_dtype=True,
),
mode=st.sampled_from(
[
"nearest",
"linear",
"bilinear",
"bicubic",
"trilinear",
"area",
"nearest-exact",
]
),
align_corners=st.booleans(),
recompute_scale_factor=st.booleans(),
size_and_scale_factor=_size_and_scale_factor_strategy(),
)
def test_mindspore_interpolate(
*,
dtype_and_x,
size,
scale_factor,
mode,
align_corners,
recompute_scale_factor,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
size_and_scale_factor,
):
dtype, x = dtype_and_x
size, scale_factor = size_and_scale_factor
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
size=size,
scale_factor=scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor,
)
# kl_div
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.kl_div",
p=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
shared_dtype=True,
min_value=2,
max_value=5,
min_dim_size=4,
),
q=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
shared_dtype=True,
min_value=2,
max_value=5,
min_dim_size=4,
),
reduction=st.sampled_from(["none", "sum", "mean"]),
)
def test_mindspore_kl_div(
*,
p,
q,
reduction,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
helpers.test_frontend_function(
input_dtypes=p[0],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
p=p[1],
q=q[1],
reduction=reduction,
)
# log_softmax
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.log_softmax",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
safety_factor_scale="log",
small_abs_safety_factor=20,
),
)
def test_mindspore_log_softmax(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
):
input_dtype, x = dtype_and_x
# hardswish
# @handle_frontend_test(
# fn_tree="mindspore.ops.function.nn_func.hardswish",
# dtype_and_x=helpers.dtype_and_values(
# available_dtypes=helpers.get_dtypes("valid"),
# ),
# )
# def test_mindspore_hardswish(
# *,
# dtype_and_x,
# on_device,
# fn_tree,
# frontend,
# test_flags,
# ):
# input_dtype, x = dtype_and_x
# helpers.test_frontend_function(
# input_dtypes=input_dtype,
# frontend=frontend,
# test_flags=test_flags,
# fn_tree=fn_tree,
# on_device=on_device,
# x=x[0],
# )
# max_pool3d
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.max_pool3d",
x_k_s_p=helpers.arrays_for_pooling(
min_dims=5,
max_dims=5,
min_side=1,
max_side=4,
only_explicit_padding=True,
return_dilation=True,
data_format="channel_first",
),
test_with_out=st.just(False),
ceil_mode=st.sampled_from([True, False]),
)
def test_mindspore_max_pool3d(
x_k_s_p,
ceil_mode,
*,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtypes, x, kernel_size, stride, padding, dilation = x_k_s_p
padding = (padding[0][0], padding[1][0], padding[2][0])
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
)
# pad
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.pad",
input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
shared_dtype=True,
min_value=2,
max_value=5,
min_dim_size=4,
),
pad_width=st.lists(
st.tuples(
st.integers(min_value=0, max_value=5), st.integers(min_value=0, max_value=5)
)
),
mode=st.sampled_from(["constant", "reflect", "replicate", "circular"]),
constant_values=st.floats(min_value=0.0, max_value=1.0),
)
def test_mindspore_pad(
*,
input,
pad_width,
mode,
constant_values,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
helpers.test_frontend_function(
input_dtypes=input[0],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[1],
pad_width=pad_width,
mode=mode,
constant_values=constant_values,
)
# selu
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.selu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
safety_factor_scale="log",
small_abs_safety_factor=20,
),
)
def test_mindspore_selu(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# softshrink
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.softshrink",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
lambd=helpers.floats(min_value=0, max_value=1, exclude_min=True),
)
def test_mindspore_softshrink(
*,
dtype_and_input,
lambd,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
lambd=lambd,
)
# gumbel_softmax
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.function.nn_func.gumbel_softmax",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
tau=st.floats(min_value=0),
hard=st.booleans(),
dim=st.integers(),
test_with_out=st.just(False),
test_inplace=st.booleans(),
)
def test_torch_gumbel_softmax(
*,
dtype_and_x,
tau,
hard,
dim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
logits=x[0],
tau=tau,
hard=hard,
dim=dim,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_mindspore/test_ops/test_function/test_mindspore_nn_func.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_mindspore/test_ops/test_function/test_mindspore_nn_func.py",
"repo_id": "ivy",
"token_count": 10886
} | 55 |
# global
from hypothesis import strategies as st, settings, assume
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers.testing_helpers import handle_frontend_test
# can_cast
@handle_frontend_test(
fn_tree="numpy.can_cast",
from_=helpers.get_dtypes("valid", full=False),
to=helpers.get_dtypes("valid", full=False),
casting=st.sampled_from(["no", "equiv", "safe", "same_kind", "unsafe"]),
test_with_out=st.just(False),
number_positional_args=st.just(3),
)
@settings(max_examples=200)
def test_numpy_can_cast(
*,
from_,
to,
casting,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=[],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
from_=from_[0],
to=to[0],
casting=casting,
)
@handle_frontend_test(
fn_tree="numpy.min_scalar_type",
x=st.one_of(
helpers.ints(min_value=-256, max_value=256),
st.booleans(),
helpers.floats(min_value=-256, max_value=256),
),
)
@settings(max_examples=200)
def test_numpy_min_scalar_type(
*,
x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
): # skip torch backend uint
if ivy.current_backend_str() == "torch":
assume(not isinstance(x, int))
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=[],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x,
test_values=False,
)
assert ret._ivy_dtype == frontend_ret[0].name
# promote_types
@handle_frontend_test(
fn_tree="numpy.promote_types",
type1=helpers.get_dtypes("valid", full=False),
type2=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
# there are 100 combinations of dtypes, so run 200 examples to make sure all are tested
@settings(max_examples=200)
def test_numpy_promote_types(
*,
type1,
type2,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=[],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
type1=type1[0],
type2=type2[0],
test_values=False,
)
assert ret._ivy_dtype == frontend_ret[0].name
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_data_type_routines/test_general.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_data_type_routines/test_general.py",
"repo_id": "ivy",
"token_count": 1229
} | 56 |
# global
from hypothesis import strategies as st, assume
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import (
_get_dtype_and_matrix,
_matrix_rank_helper,
)
from ivy_tests.test_ivy.helpers.hypothesis_helpers.general_helpers import (
matrix_is_stable,
)
# --- Helpers --- #
# --------------- #
# norm
@st.composite
def _norm_helper(draw):
def _matrix_norm_example():
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=helpers.get_shape(min_num_dims=2, max_num_dims=2),
min_num_dims=2,
max_num_dims=2,
min_dim_size=1,
max_dim_size=10,
min_value=-1e4,
max_value=1e4,
large_abs_safety_factor=10,
small_abs_safety_factor=10,
safety_factor_scale="log",
),
)
ord = draw(st.sampled_from(["fro", "nuc"]))
axis = (-2, -1)
check_stable = True
return x_dtype, x, axis, ord, check_stable
def _vector_norm_example():
x_dtype, x, axis = draw(
helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
valid_axis=True,
force_int_axis=True,
min_value=-1e04,
max_value=1e04,
large_abs_safety_factor=10,
small_abs_safety_factor=10,
safety_factor_scale="log",
)
)
ints = draw(helpers.ints(min_value=1, max_value=2))
floats = draw(helpers.floats(min_value=1, max_value=2))
ord = draw(st.sampled_from([ints, floats, float("inf"), float("-inf")]))
check_stable = False
return x_dtype, x, axis, ord, check_stable
is_vec_norm = draw(st.booleans())
if is_vec_norm:
return _vector_norm_example()
return _matrix_norm_example()
# --- Main --- #
# ------------ #
# det
@handle_frontend_test(
fn_tree="numpy.linalg.det",
dtype_and_x=_get_dtype_and_matrix(),
test_with_out=st.just(False),
)
def test_numpy_det(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
a=x[0],
)
# matrix_rank
@handle_frontend_test(
fn_tree="numpy.linalg.matrix_rank",
dtype_x_hermitian_atol_rtol=_matrix_rank_helper(),
test_with_out=st.just(False),
)
def test_numpy_matrix_rank(
dtype_x_hermitian_atol_rtol,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, hermitian, atol, rtol = dtype_x_hermitian_atol_rtol
assume(matrix_is_stable(x, cond_limit=10))
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
A=x,
tol=atol,
hermitian=hermitian,
)
@handle_frontend_test(
fn_tree="numpy.linalg.norm",
norm_values=_norm_helper(),
keepdims=st.booleans(),
test_with_out=st.just(False),
)
def test_numpy_norm(
norm_values,
keepdims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis, ord, check_stable = norm_values
if check_stable:
assume(matrix_is_stable(x[0], cond_limit=10))
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
ord=ord,
axis=axis,
keepdims=keepdims,
)
# slogdet
@handle_frontend_test(
fn_tree="numpy.linalg.slogdet",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
max_value=5,
min_value=2,
shape=st.tuples(
st.shared(st.integers(1, 5), key="sq"),
st.shared(st.integers(1, 5), key="sq"),
),
num_arrays=1,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_numpy_slogdet(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
assume(matrix_is_stable(x[0]))
ret, ret_gt = helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
test_values=False,
)
for ret_f, ret_gtt in zip(ret, ret_gt):
frontend_ret = ret_f
frontend_ret_gt = ret_gtt
ret_flattened = helpers.flatten_and_to_np(ret=frontend_ret, backend=backend_fw)
ret_gt_flattened = helpers.flatten_and_to_np(
ret=frontend_ret_gt, backend=frontend
)
helpers.value_test(
ret_np_flat=ret_flattened,
ret_np_from_gt_flat=ret_gt_flattened,
rtol=1e-1,
atol=1e-1,
backend=backend_fw,
ground_truth_backend=frontend,
)
@handle_frontend_test(
fn_tree="numpy.linalg.trace",
gt_fn_tree="numpy.trace",
dtype_and_x_axes=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
valid_axis=True,
min_axes_size=2,
max_axes_size=2,
min_num_dims=2,
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
),
test_with_out=st.just(False),
offset=st.integers(min_value=-4, max_value=4),
)
def test_numpy_trace(
dtype_and_x_axes,
offset,
frontend,
test_flags,
fn_tree,
gt_fn_tree,
backend_fw,
on_device,
):
dtype, x, axes = dtype_and_x_axes
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
gt_fn_tree=gt_fn_tree,
on_device=on_device,
rtol=1e-2,
a=x[0],
offset=offset,
axis1=axes[0],
axis2=axes[1],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_norms_and_other_numbers.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_norms_and_other_numbers.py",
"repo_id": "ivy",
"token_count": 3610
} | 57 |
# global
from hypothesis import assume
# local
import ivy
from hypothesis import strategies as st
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# append
@handle_frontend_test(
fn_tree="numpy.append",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shape=helpers.get_shape(
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
shared_dtype=True,
valid_axis=True,
allow_neg_axes=True,
force_int_axis=True,
),
test_with_out=st.just(False),
)
def test_numpy_append(
dtype_values_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, values, axis = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
arr=values[0],
values=values[1],
axis=axis,
)
@handle_frontend_test(
fn_tree="numpy.trim_zeros",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, max_num_dims=1
),
trim=st.sampled_from(["f", "b", "fb"]),
)
def test_numpy_trim_zeros(
frontend,
on_device,
*,
dtype_and_x,
trim,
fn_tree,
test_flags,
backend_fw,
):
input_dtypes, x = dtype_and_x
if ivy.current_backend_str() == "paddle":
assume(input_dtypes[0] not in ["float16"])
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
filt=x[0],
trim=trim,
)
# unique
@handle_frontend_test(
fn_tree="numpy.unique",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
force_int_axis=True,
valid_axis=True,
),
return_index=st.booleans(),
return_inverse=st.booleans(),
return_counts=st.booleans(),
none_axis=st.booleans(),
test_with_out=st.just(False),
)
def test_numpy_unique(
*,
dtype_x_axis,
return_index,
return_inverse,
return_counts,
none_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs, axis = dtype_x_axis
if none_axis:
axis = None
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
array=xs[0],
return_index=return_index,
return_inverse=return_inverse,
return_counts=return_counts,
axis=axis,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_adding_and_removing_elements.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_adding_and_removing_elements.py",
"repo_id": "ivy",
"token_count": 1493
} | 58 |
# global
from hypothesis import strategies as st
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# angle
@handle_frontend_test(
fn_tree="numpy.angle",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("complex")
),
deg=st.booleans(),
)
def test_numpy_angle(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
deg,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
z=x[0],
deg=deg,
)
# conj
@handle_frontend_test(
fn_tree="numpy.conj",
aliases=["numpy.conjugate"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="conj"
),
)
def test_numpy_conj(
on_device,
frontend,
*,
dtype_and_x,
fn_tree,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# imag
@handle_frontend_test(
fn_tree="numpy.imag",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
test_with_out=st.just(False),
)
def test_numpy_imag(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
val=x[0],
)
# real
@handle_frontend_test(
fn_tree="numpy.real",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
test_with_out=st.just(False),
)
def test_numpy_real(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
val=x[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_handling_complex_numbers.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_handling_complex_numbers.py",
"repo_id": "ivy",
"token_count": 1342
} | 59 |
import pytest
@pytest.fixture(scope="session")
def frontend():
return "paddle"
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/conftest.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/conftest.py",
"repo_id": "ivy",
"token_count": 31
} | 60 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers.testing_helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_nn.test_norms import (
_generate_data_layer_norm,
)
# layer_norm
@handle_frontend_test(
fn_tree="paddle.nn.functional.layer_norm",
values_tuple=_generate_data_layer_norm(
available_dtypes=helpers.get_dtypes("float"),
),
eps=st.floats(min_value=0.01, max_value=0.1),
)
def test_paddle_layer_norm(
*,
values_tuple,
normalized_shape,
eps,
test_flags,
frontend,
on_device,
backend_fw,
fn_tree,
):
(dtype, x, normalized_shape, scale, offset) = values_tuple
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
on_device=on_device,
fn_tree=fn_tree,
x=x[0],
normalized_shape=normalized_shape,
weight=scale[0],
bias=offset[0],
epsilon=eps,
)
# normalize
@handle_frontend_test(
fn_tree="paddle.nn.functional.normalize",
dtype_and_x_and_axis=helpers.arrays_and_axes(
available_dtypes=helpers.get_dtypes(kind="valid"),
num=1,
return_dtype=True,
force_int_axis=True,
),
p=st.floats(min_value=0.1, max_value=2),
negative_axis=st.booleans(),
)
def test_paddle_normalize(
*,
dtype_and_x_and_axis,
p,
negative_axis,
test_flags,
frontend,
backend_fw,
on_device,
fn_tree,
):
dtype, x, axis = dtype_and_x_and_axis
if axis:
axis = -axis if negative_axis else axis
else:
axis = 0
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
on_device=on_device,
fn_tree=fn_tree,
x=x[0],
p=p,
axis=axis,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_norm.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_norm.py",
"repo_id": "ivy",
"token_count": 977
} | 61 |
# global
from hypothesis import strategies as st
import pytest
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_method
CLASS_TREE = "ivy.functional.frontends.pandas.DataFrame"
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="pandas.DataFrame",
method_name="abs",
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
)
def test_pandas_series_abs(
dtype_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
on_device,
):
# todo add castable dtypes for output
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
backend_to_test=backend_fw,
on_device=on_device,
)
@pytest.mark.xfail(reason="testing pipeline fixes")
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="pandas.DataFrame",
method_name="mean",
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
skipna=st.booleans(),
axis=st.sampled_from([None, 0, 1, "index", "columns"]),
)
def test_pandas_series_mean(
dtype_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
skipna,
axis,
):
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"skipna": skipna, "axis": axis},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
backend_to_test=backend_fw,
)
@pytest.mark.xfail(reason="testing pipeline fixes")
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="pandas.DataFrame",
method_name="sum",
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
skipna=st.booleans(),
axis=st.sampled_from([None, 0, 1, "index", "columns"]),
min_count=st.integers(min_value=0, max_value=5),
)
def test_pandas_series_sum(
dtype_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
skipna,
axis,
min_count,
):
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"skipna": skipna,
"axis": axis,
"min_count": min_count,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
backend_to_test=backend_fw,
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="pandas.DataFrame",
method_name="to_numpy",
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
na_values=st.sampled_from([None, np.nan, np.inf, -np.inf]),
copy=st.booleans(),
)
def test_pandas_series_to_numpy(
dtype_x,
frontend,
na_values,
copy,
frontend_method_data,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"na_values": na_values,
"copy": copy,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
backend_to_test=backend_fw,
on_device=on_device,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_pandas/test_dataframe.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_pandas/test_dataframe.py",
"repo_id": "ivy",
"token_count": 2087
} | 62 |
# global
from hypothesis import given, strategies as st
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import BackendHandler
from ivy.functional.frontends.tensorflow.func_wrapper import (
outputs_to_frontend_arrays,
to_ivy_arrays_and_back,
handle_tf_dtype,
)
from ivy.functional.frontends.tensorflow.tensor import EagerTensor
import ivy.functional.frontends.tensorflow as tf_frontend
import ivy.functional.frontends.numpy as np_frontend
# --- Helpers --- #
# --------------- #
@st.composite
def _dtype_helper(draw):
return draw(
st.sampled_from(
[
draw(helpers.get_dtypes("valid", prune_function=False, full=False))[0],
ivy.as_native_dtype(
draw(helpers.get_dtypes("valid", prune_function=False, full=False))[
0
]
),
draw(
st.sampled_from(list(tf_frontend.tensorflow_enum_to_type.values()))
),
draw(st.sampled_from(list(tf_frontend.tensorflow_enum_to_type.keys()))),
np_frontend.dtype(
draw(helpers.get_dtypes("valid", prune_function=False, full=False))[
0
]
),
draw(st.sampled_from(list(np_frontend.numpy_scalar_to_dtype.keys()))),
]
)
)
def _fn(x=None, dtype=None):
if ivy.exists(dtype):
return dtype
return x
# --- Main --- #
# ------------ #
@given(
dtype=_dtype_helper(),
)
def test_tensorflow_handle_tf_dtype(dtype):
ret_dtype = handle_tf_dtype(_fn)(dtype=dtype)
assert isinstance(ret_dtype, ivy.Dtype)
@given(
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False)
),
)
def test_tensorflow_inputs_to_ivy_arrays(dtype_and_x, backend_fw):
x_dtype, x = dtype_and_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
_import_fn = ivy_backend.utils.dynamic_import.import_module
_import_fn("ivy.functional.frontends.tensorflow.func_wrapper")
_tensor_module = _import_fn("ivy.functional.frontends.tensorflow.tensor")
# check for ivy array
input_ivy = ivy_backend.array(x[0], dtype=x_dtype[0])
output = ivy_backend.inputs_to_ivy_arrays(_fn)(input_ivy)
assert isinstance(output, ivy_backend.Array)
assert input_ivy.dtype == output.dtype
assert ivy_backend.all(input_ivy == output)
# check for native array
input_native = ivy_backend.native_array(input_ivy)
output = ivy_backend.inputs_to_ivy_arrays(_fn)(input_native)
assert isinstance(output, ivy_backend.Array)
assert ivy_backend.as_ivy_dtype(input_native.dtype) == output.dtype
assert ivy_backend.all(input_native == output.data)
# check for frontend array
input_frontend = _tensor_module.EagerTensor(x[0])
output = ivy_backend.inputs_to_ivy_arrays(_fn)(input_frontend)
assert isinstance(output, ivy_backend.Array)
assert input_frontend.dtype.ivy_dtype == output.dtype
assert ivy_backend.all(input_frontend.ivy_array == output)
@given(
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False)
),
)
def test_tensorflow_outputs_to_frontend_arrays(dtype_and_x):
x_dtype, x = dtype_and_x
# check for ivy array
input_ivy = ivy.array(x[0], dtype=x_dtype[0])
output = outputs_to_frontend_arrays(_fn)(input_ivy)
assert isinstance(output, EagerTensor)
assert input_ivy.dtype == output.dtype.ivy_dtype
assert ivy.all(input_ivy == output.ivy_array)
@given(
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False)
),
)
def test_tensorflow_to_ivy_arrays_and_back(dtype_and_x):
x_dtype, x = dtype_and_x
# check for ivy array
input_ivy = ivy.array(x[0], dtype=x_dtype[0])
output = to_ivy_arrays_and_back(_fn)(input_ivy)
assert isinstance(output, EagerTensor)
assert input_ivy.dtype == output.dtype.ivy_dtype
assert ivy.all(input_ivy == output.ivy_array)
# check for native array
input_native = ivy.native_array(input_ivy)
output = to_ivy_arrays_and_back(_fn)(input_native)
assert isinstance(output, EagerTensor)
assert ivy.as_ivy_dtype(input_native.dtype) == output.dtype.ivy_dtype
assert ivy.all(input_native == output.ivy_array.data)
# check for frontend array
input_frontend = EagerTensor(x[0])
output = to_ivy_arrays_and_back(_fn)(input_frontend)
assert isinstance(output, EagerTensor)
assert input_frontend.dtype == output.dtype
assert ivy.all(input_frontend.ivy_array == output.ivy_array)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_func_wrapper.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_func_wrapper.py",
"repo_id": "ivy",
"token_count": 2269
} | 63 |
# global
import sys
import ivy
from hypothesis import assume, strategies as st
from ivy.functional.frontends.tensorflow.nn import _convolution_broadcast_helper
from ivy_tests.test_ivy.test_frontends.test_tensorflow.test_nn import _x_and_filters
import numpy as np
import math
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.helpers.globals as test_globals
from ivy_tests.test_ivy.helpers import (
handle_frontend_test,
assert_all_close,
BackendHandler,
)
# for data generation
dtype_shared = st.shared(st.sampled_from(helpers.get_dtypes("numeric")), key="dtype")
# --- Helpers --- #
# --------------- #
@st.composite
def _LinSpace_helper(draw):
shape = ()
dtype = draw(st.sampled_from(["float32", "float64"]))
# Param: start
start = draw(
helpers.array_values(
dtype=dtype,
shape=shape,
min_value=-5.0,
max_value=5.0,
),
)
# Param: stop
stop = draw(
helpers.array_values(
dtype=dtype,
shape=shape,
min_value=-4.0,
max_value=10.0,
),
)
return [dtype] * 2, start, stop
# noinspection DuplicatedCode
@st.composite
def _arrays_idx_n_dtypes(draw):
num_dims = draw(st.shared(helpers.ints(min_value=1, max_value=4), key="num_dims"))
num_arrays = draw(
st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays")
)
common_shape = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=3),
size=num_dims - 1,
)
)
unique_idx = draw(helpers.ints(min_value=0, max_value=num_dims - 1))
unique_dims = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=3),
size=num_arrays,
)
)
xs = []
input_dtypes = draw(
helpers.array_dtypes(
available_dtypes=draw(helpers.get_dtypes("float")), shared_dtype=True
)
)
for ud, dt in zip(unique_dims, input_dtypes):
x = draw(
helpers.array_values(
shape=common_shape[:unique_idx] + [ud] + common_shape[unique_idx:],
dtype=dt,
)
)
xs.append(x)
return xs, input_dtypes, unique_idx
@st.composite
def _dtypes(draw):
return draw(
st.shared(
helpers.list_of_size(
x=st.sampled_from(draw(helpers.get_dtypes("numeric"))),
size=1,
),
key="dtype",
)
)
@st.composite
def _fill_value(draw):
dtype = draw(_dtypes())[0]
with BackendHandler.update_backend(test_globals.CURRENT_BACKEND) as ivy_backend:
if ivy_backend.is_uint_dtype(dtype):
return draw(helpers.ints(min_value=0, max_value=5))
elif ivy_backend.is_int_dtype(dtype):
return draw(helpers.ints(min_value=-5, max_value=5))
return draw(helpers.floats(min_value=-5, max_value=5))
@st.composite
def _get_shared_dtype(draw):
return st.shared(st.sampled_from(draw(helpers.get_dtypes("numeric"))), key="dtype")
@st.composite
def _get_splits(draw, as_list=False):
"""Generate valid splits, either by generating an integer that evenly
divides the axis or a list of splits that sum to the length of the axis
being split."""
shape = draw(st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"))
axis = draw(
st.shared(helpers.get_axis(shape=shape, force_int=True), key="target_axis")
)
@st.composite
def get_int_split(draw):
if shape[axis] == 0:
return 0
factors = []
for i in range(1, shape[axis] + 1):
if shape[axis] % i == 0:
factors.append(i)
return draw(st.sampled_from(factors))
@st.composite
def get_list_split(draw):
num_or_size_splits = []
while sum(num_or_size_splits) < shape[axis]:
split_value = draw(
helpers.ints(
min_value=1,
max_value=shape[axis] - sum(num_or_size_splits),
)
)
num_or_size_splits.append(split_value)
return num_or_size_splits
if as_list:
return draw(get_list_split())
else:
return draw(get_int_split())
# Tile
@st.composite
def _multiple_shape_helper(draw):
input_dtype, input_array, input_shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), ret_shape=True
)
)
input_dims = len(input_shape)
dt_n_multiples = draw(
helpers.dtype_and_values(
available_dtypes=["int32", "int64"],
min_value=0,
max_value=10,
shape=draw(
helpers.get_shape(
min_num_dims=1,
max_num_dims=1,
min_dim_size=input_dims,
max_dim_size=input_dims,
)
),
)
)
return input_dtype, input_array, dt_n_multiples
@st.composite
def _pad_helper(draw, return_constant_values=False):
dtype, input, shape = draw(
helpers.dtype_and_values(
min_num_dims=1,
ret_shape=True,
)
)
ndim = len(shape)
padding_dtype, paddings = draw(
helpers.dtype_and_values(
available_dtypes=["int32", "int64"],
shape=(ndim, 2),
min_value=0,
max_value=10,
)
)
if return_constant_values:
_, constant_values = draw(
helpers.dtype_and_values(
dtype=dtype,
shape=(1,),
)
)
return dtype, input[0], padding_dtype, paddings[0], constant_values[0][0]
return dtype, input[0], padding_dtype, paddings[0]
@st.composite
def _permute_dims_helper(draw):
shape = draw(st.shared(helpers.get_shape(min_num_dims=1), key="shape"))
dims = [x for x in range(len(shape))]
permutation = draw(st.permutations(dims))
return permutation
@st.composite
def _pow_helper_shared_dtype(draw):
dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
)
)
dtype1, dtype2 = dtype
x1, x2 = x
if "int" in dtype2:
x2 = ivy.nested_map(lambda x: abs(x), x2, include_derived={"list": True})
if ivy.is_int_dtype(dtype2):
max_val = ivy.iinfo(dtype2).max
else:
max_val = ivy.finfo(dtype2).max
max_x1 = np.max(np.abs(x1))
if max_x1 in [0, 1]:
max_value = None
else:
max_value = int(math.log(max_val) / math.log(max_x1))
if abs(max_value) > abs(max_val) / 40 or max_value < 0:
max_value = None
return [dtype1, dtype2], [x1, x2]
# Reshape
@st.composite
def _reshape_helper(draw):
# generate a shape s.t len(shape) > 0
shape = draw(helpers.get_shape(min_num_dims=1))
reshape_shape = draw(helpers.reshape_shapes(shape=shape))
dtype = draw(helpers.array_dtypes(num_arrays=1))
x = draw(helpers.array_values(dtype=dtype[0], shape=shape))
return x, dtype, reshape_shape
@st.composite
def _segment_ops_helper(draw):
shape_x = draw(st.integers(min_value=3, max_value=100))
shape_y = draw(st.integers(min_value=3, max_value=100))
max_val = draw(st.integers(min_value=3, max_value=9))
s_dtype = draw(
st.sampled_from(
[
"int32",
"int64",
]
)
)
data_dtype, data = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
shape=(shape_x, shape_y),
min_value=-max_val,
max_value=max_val,
)
)
seg_dtype, segment_ids = draw(
helpers.dtype_and_values(
available_dtypes=[s_dtype],
num_arrays=1,
shape=(shape_x,),
min_value=0,
max_value=max_val,
)
)
return data_dtype + seg_dtype, data, segment_ids, max_val
@st.composite
def _squeeze_helper(draw):
shape = draw(st.shared(helpers.get_shape(), key="value_shape"))
valid_axes = []
for index, axis in enumerate(shape):
if axis == 1:
valid_axes.append(index)
valid_axes.insert(0, None)
axis = draw(st.sampled_from(valid_axes))
return [axis] if axis is not None else axis
@st.composite
def df(draw, data_format):
data_format = draw(data_format)
return data_format
# Reverse
@st.composite
def reverse_helper(draw):
dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=8,
ret_shape=True,
)
)
axis_dtype, axis = draw(
helpers.dtype_and_values(
available_dtypes=["bool"],
min_num_dims=1,
max_num_dims=1,
num_arrays=1,
shape=(len(shape),),
)
)
return dtype, x, axis_dtype, axis
# --- Main --- #
# ------------ #
# Todo: Revise strategies once reimplemented in frontend
# AccumulateNV2
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.AccumulateNV2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
shape=helpers.get_shape(min_num_dims=1),
)
def test_tensorflow_AccumulateNV2(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
shape,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
inputs=x[0],
shape=shape,
)
# Acos
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Acos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Acos( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Acosh
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Acosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Acosh( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Add
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Add",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True
),
test_with_out=st.just(False),
)
def test_tensorflow_Add( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# AddN
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.AddN",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
min_value=-1e04,
max_value=1e04,
),
test_with_out=st.just(False),
)
def test_tensorflow_AddN( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
inputs=x[0],
)
# AddV2
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.AddV2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_AddV2( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# Angle
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Angle",
dtype_and_xs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("complex"),
),
Tout=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_Angle( # NOQA
*,
dtype_and_xs,
Tout,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_xs
if input_dtype[0] == "complex128":
Tout = "float64"
elif input_dtype[0] == "complex64":
Tout = "float32" if Tout else None
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=xs[0],
Tout=Tout,
)
# ApproximateEqual
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.ApproximateEqual",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
large_abs_safety_factor=20,
small_abs_safety_factor=20,
safety_factor_scale="log",
),
tol=st.floats(1e-05, 1e-03),
test_with_out=st.just(False),
)
def test_tensorflow_ApproximateEqual( # NOQA
*,
dtype_and_x,
tol,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
tolerance=tol,
)
# argmax
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.ArgMax",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
allow_inf=False,
),
output_type=st.sampled_from(["int16", "int32", "int64"]),
test_with_out=st.just(False),
)
def test_tensorflow_ArgMax( # NOQA
*,
dtype_x_axis,
output_type,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dimension=axis,
output_type=output_type,
)
# ArgMin
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.ArgMin",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
allow_inf=False,
),
output_type=st.sampled_from(["int32", "int64"]),
test_with_out=st.just(False),
)
def test_tensorflow_ArgMin( # NOQA
*,
dtype_x_axis,
output_type,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dimension=axis,
output_type=output_type,
)
# Asin
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Asin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Asin( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Atan
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Atan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Atan( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Atan2
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Atan2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Atan2( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, xs = dtype_and_x
# Assuming x and y have the same shape
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y=xs[0],
x=xs[1],
)
# Atanh
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Atanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Atanh( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Todo: Revise strategies once reimplemented in frontend
# BandedTriangularSolve
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.BandedTriangularSolve",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
),
test_with_out=st.just(False),
lower=st.booleans(),
adjoint=st.booleans(),
)
def test_tensorflow_BandedTriangularSolve(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
lower,
adjoint,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
matrix=x[0],
rhs=x[1],
lower=lower,
adjoint=adjoint,
)
# Todo: Revise strategies once reimplemented in frontend
# BatchMatMul
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.BatchMatMul",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
),
test_with_out=st.just(False),
adj_x=st.booleans(),
adj_y=st.booleans(),
)
def test_tensorflow_BatchMatMul(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
adj_x,
adj_y,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
adj_x=adj_x,
adj_y=adj_y,
)
# Todo: Revise strategies once reimplemented in frontend
# BatchMatMulV2
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.BatchMatMulV2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
),
test_with_out=st.just(False),
adj_x=st.booleans(),
adj_y=st.booleans(),
)
def test_tensorflow_BatchMatMulV2(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
adj_x,
adj_y,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
adj_x=adj_x,
adj_y=adj_y,
)
# Todo: Revise strategies once reimplemented in frontend
# BatchMatMulV3
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.BatchMatMulV3",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
),
test_with_out=st.just(False),
Tout=st.sampled_from(["float32", "float64"]),
adj_x=st.booleans(),
adj_y=st.booleans(),
)
def test_tensorflow_BatchMatMulV3(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
Tout,
adj_x,
adj_y,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
Tout=Tout,
adj_x=adj_x,
adj_y=adj_y,
)
# BitwiseAnd
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.BitwiseAnd",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_BitwiseAnd( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# BitwiseOr
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.BitwiseOr",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_BitwiseOr( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# BitwiseXor
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.BitwiseXor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_BitwiseXor( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# BroadcastTo
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.BroadcastTo",
array_and_shape=helpers.array_and_broadcastable_shape(_get_shared_dtype()),
test_with_out=st.just(False),
)
def test_tensorflow_BroadcastTo( # NOQA
*,
array_and_shape,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
x, to_shape = array_and_shape
helpers.test_frontend_function(
input_dtypes=[x.dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
shape=to_shape,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Ceil",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Ceil( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Cholesky",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=10,
shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)),
),
test_with_out=st.just(False),
)
def test_tensorflow_Cholesky( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
x = x[0]
x = (
np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3
) # make symmetric positive-definite
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
rtol=1e-4,
atol=1e-4,
)
# Todo: Revise strategies once reimplemented in frontend
# Complex
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Complex",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
),
test_with_out=st.just(False),
Tout=st.sampled_from(["complex64", "complex128"]),
)
def test_tensorflow_Complex(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
Tout,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
real=x[0],
imag=x[1],
Tout=Tout,
)
# Concat
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Concat",
xs_n_input_dtypes_n_unique_idx=_arrays_idx_n_dtypes(),
test_with_out=st.just(False),
)
def test_tensorflow_Concat( # NOQA
*,
xs_n_input_dtypes_n_unique_idx,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
xs, input_dtypes, unique_idx = xs_n_input_dtypes_n_unique_idx
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
concat_dim=unique_idx,
values=xs,
)
# ConcatV2
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.ConcatV2",
xs_n_input_dtypes_n_unique_idx=_arrays_idx_n_dtypes(),
test_with_out=st.just(False),
number_positional_args=st.just(0),
)
def test_tensorflow_ConcatV2(
xs_n_input_dtypes_n_unique_idx,
test_flags,
frontend,
backend_fw,
fn_tree,
):
xs, input_dtypes, unique_idx = xs_n_input_dtypes_n_unique_idx
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
values=xs,
axis=unique_idx,
)
# Conj
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Conj",
dtype_and_xs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("complex"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Conj( # NOQA
*,
dtype_and_xs,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_xs
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=xs[0],
)
# Conv2D
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Conv2D",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NHWC"]),
padding=st.sampled_from(["SAME", "VALID", "EXPLICIT"]),
type="2d",
dilation_min=1,
dilation_max=1,
),
test_with_out=st.just(False),
number_positional_args=st.just(0),
)
def test_tensorflow_Conv2D(
*,
x_f_d_df,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x, filters, dilation, data_format, stride, padding = x_f_d_df
channel_index = data_format.find("C")
stride = _convolution_broadcast_helper(
stride, num_spatial_dims=2, channel_index=channel_index, name="strides"
)
dilation = _convolution_broadcast_helper(
dilation, num_spatial_dims=2, channel_index=channel_index, name="dilations"
)
explicit_padding = None
if isinstance(padding, list):
explicit_padding = padding
padding = "EXPLICIT"
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x,
filter=filters,
strides=stride,
padding=padding,
explicit_paddings=explicit_padding,
data_format=data_format,
dilations=dilation,
use_cudnn_on_gpu=True,
)
# Conv3D
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Conv3D",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NDHWC"]),
padding=st.sampled_from(["SAME", "VALID"]),
type="3d",
# Tensorflow backprop doesn't support dilations more than 1 on CPU
dilation_min=1,
dilation_max=1,
),
test_with_out=st.just(False),
number_positional_args=st.just(0),
)
def test_tensorflow_Conv3D(
*,
x_f_d_df,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x, filters, dilation, data_format, stride, padding = x_f_d_df
# Broadcast stirdes and dilations to correct dims for the ground truth
# backend func to run correctly
stride = _convolution_broadcast_helper(
stride, num_spatial_dims=3, channel_index=4, name="strides"
)
dilation = _convolution_broadcast_helper(
dilation, num_spatial_dims=3, channel_index=4, name="dilations"
)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x,
filter=filters,
strides=stride,
padding=padding,
data_format=data_format,
dilations=dilation,
)
# Cos
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Cos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Cos( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Cosh
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Cosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Cosh(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Cross
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Cross",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=3,
max_dim_size=3,
safety_factor_scale="log",
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Cross( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=xs[0],
b=xs[1],
)
# Cumprod
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Cumprod",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
),
exclusive=st.booleans(),
reverse=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_Cumprod( # NOQA
*,
dtype_x_axis,
exclusive,
reverse,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
exclusive=exclusive,
reverse=reverse,
)
# Cumsum
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Cumsum",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
),
exclusive=st.booleans(),
reverse=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_Cumsum( # NOQA
*,
dtype_x_axis,
exclusive,
reverse,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
atol=1e-02,
x=x[0],
axis=axis,
exclusive=exclusive,
reverse=reverse,
)
# Todo: Revise strategies once reimplemented in frontend
# CumulativeLogsumexp
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.CumulativeLogsumexp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
axis=st.just(0),
test_with_out=st.just(False),
exclusive=st.booleans(),
reverse=st.booleans(),
)
def test_tensorflow_CumulativeLogsumexp(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
axis,
exclusive,
reverse,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
exclusive=exclusive,
reverse=reverse,
)
# Todo: Revise strategies once reimplemented in frontend
# DebugGradientIdentity
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.DebugGradientIdentity",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_DebugGradientIdentity(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Diag",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=[
"float32",
"float64",
"int32",
"int64",
],
min_num_dims=1,
max_num_dims=1,
min_value=-1e30,
max_value=1e30,
),
test_with_out=st.just(False),
)
def test_tensorflow_Diag( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
diagonal=x[0],
)
# Div
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Div",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True
),
test_with_out=st.just(False),
)
def test_tensorflow_Div( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# Elu
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Elu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=-3,
max_value=3,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
name=st.just(None),
test_with_out=st.just(False),
number_positional_args=st.just(0),
)
def test_tensorflow_Elu(
*,
dtype_and_x,
name,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
name=name,
)
# Equal
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Equal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Equal( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# EuclideanNorm
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.EuclideanNorm",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=3,
max_num_dims=5,
min_dim_size=1,
max_dim_size=4,
min_axis=-3,
max_axis=2,
valid_axis=True,
allow_neg_axes=True,
),
keep_dims=st.booleans(),
test_with_out=st.just(False),
number_positional_args=st.just(0),
)
def test_tensorflow_EuclideanNorm(
dtype_values_axis,
keep_dims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, values, axis = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=values[0],
axis=axis,
keep_dims=keep_dims,
)
# Exp
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Exp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Exp( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Expm1
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Expm1",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Expm1( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# FFT
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.FFT",
dtype_and_x=helpers.dtype_and_values(
min_num_dims=1,
min_dim_size=2,
large_abs_safety_factor=15,
small_abs_safety_factor=15,
safety_factor_scale="log",
available_dtypes=helpers.get_dtypes("complex"),
),
test_with_out=st.just(False),
)
def test_tensorflow_FFT( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
rtol=1e-02,
atol=1e-02,
)
# FFT2D
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.FFT2D",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("complex"),
min_value=-1e5,
max_value=1e5,
min_num_dims=2,
max_num_dims=5,
min_dim_size=2,
max_dim_size=5,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
)
def test_tensorflow_FFT2D(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
rtol=1e-02,
atol=1e-02,
)
# FFT3D
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.FFT3D",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("complex"),
min_value=-1e5,
max_value=1e5,
min_num_dims=3,
max_num_dims=5,
min_dim_size=2,
max_dim_size=5,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
)
def test_tensorflow_FFT3D(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
rtol=1e-02,
atol=1e-02,
)
# fill
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Fill",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
min_dim_size=1,
),
fill_value=_fill_value(),
dtypes=_dtypes(),
test_with_out=st.just(False),
)
def test_tensorflow_Fill( # NOQA
*,
shape,
fill_value,
dtypes,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-05,
dims=shape,
value=fill_value,
)
# Floor
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Floor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Floor( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# FloorDiv
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.FloorDiv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_FloorDiv( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# FloorMod
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.FloorMod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_FloorMod( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# Gather
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Gather",
params_indices_others=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("numeric"),
indices_dtypes=["int32", "int64"],
disable_random_axis=True,
axis_zero=True,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
test_with_out=st.just(False),
)
def test_tensorflow_Gather( # NOQA
*,
params_indices_others,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtypes, params, indices = params_indices_others
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
params=params,
indices=indices,
validate_indices=True,
)
# GatherNd
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.GatherNd",
params_indices_axis_batch_dims=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("valid"),
indices_dtypes=["int32", "int64"],
min_num_dims=3,
max_num_dims=3,
min_dim_size=3,
max_dim_size=3,
axis_zero=True,
disable_random_axis=True,
indices_same_dims=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_GatherNd(
*,
params_indices_axis_batch_dims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, params, indices = params_indices_axis_batch_dims
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
params=params,
indices=indices,
)
# Greater
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Greater",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Greater( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# GreaterEqual
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.GreaterEqual",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_GreaterEqual( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# Identity
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Identity",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Identity( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# IdentityN
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.IdentityN",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
test_with_out=st.just(False),
)
def test_tensorflow_IdentityN( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Igamma",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
abs_smallest_val=1e-5,
min_num_dims=2,
max_num_dims=2,
min_dim_size=3,
max_dim_size=3,
min_value=2,
max_value=100,
allow_nan=False,
),
test_with_out=st.just(False),
)
def test_tensorflow_Igamma(
*,
dtype_and_x,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-04,
a=xs[0],
x=xs[1],
)
# Imag
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Imag",
dtype_and_xs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
send_Tout=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_Imag(
*,
dtype_and_xs,
send_Tout,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, xs = dtype_and_xs
if input_dtype[0] == "complex128":
send_Tout = "float64"
elif input_dtype[0] == "complex64":
send_Tout = "float32" if send_Tout else None
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=xs[0],
Tout=send_Tout,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Inv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
test_with_out=st.just(False),
)
def test_tensorflow_Inv( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.InvGrad",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_InvGrad( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y=x[0],
dy=x[1],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Invert",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Invert( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.LeakyRelu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
),
test_with_out=st.just(False),
alpha=helpers.floats(min_value=0, max_value=1),
)
def test_tensorflow_LeakyReLU(
*,
dtype_and_x,
alpha,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
return helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
alpha=alpha,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.LeftShift",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_LeftShift( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# Less
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Less",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Less( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# LessEqual
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.LessEqual",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_LessEqual( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.LinSpace",
dtype_and_params=_LinSpace_helper(),
num=helpers.ints(min_value=2, max_value=10),
)
def test_tensorflow_LinSpace(
*,
dtype_and_params,
num,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, start, stop = dtype_and_params
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
start=start,
stop=stop,
num=num,
on_device=on_device,
)
# Log
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Log",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Log( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Log1p
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Log1p",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_tensorflow_Log1p( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# LogSoftmax
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.LogSoftmax",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_LogSoftmax( # NOQA
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
logits=x[0],
)
# LogicalNot
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.LogicalNot",
dtype_and_x=helpers.dtype_and_values(
dtype=["bool"],
num_arrays=1,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_LogicalNot( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# LogicalOr
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.LogicalOr",
dtype_and_x=helpers.dtype_and_values(
dtype=["bool", "bool"],
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_LogicalOr( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# MatMul
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.MatMul",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=[
"float32",
"float64",
"int32",
"int64",
],
shape=(3, 3),
num_arrays=2,
shared_dtype=True,
large_abs_safety_factor=10,
small_abs_safety_factor=10,
safety_factor_scale="log",
),
transpose_a=st.booleans(),
transpose_b=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_MatMul( # NOQA
*,
dtype_and_x,
transpose_a,
transpose_b,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
a=x[0],
b=x[1],
transpose_a=transpose_a,
transpose_b=transpose_b,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.MatrixDeterminant",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)),
min_value=-5,
max_value=5,
),
test_with_out=st.just(False),
)
def test_tensorflow_MatrixDeterminant( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.MatrixInverse",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=helpers.ints(min_value=2, max_value=10).map(lambda x: (x, x)),
).filter(lambda x: np.linalg.cond(x[1][0].tolist()) < 1 / sys.float_info.epsilon),
adjoint=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_MatrixInverse( # NOQA
*,
dtype_x,
adjoint,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
adjoint=adjoint,
rtol=1e-05,
atol=1e-04,
)
# Max
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Max",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
),
keep_dims=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_Max( # NOQA
*,
dtype_x_axis,
keep_dims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
axis=axis,
keep_dims=keep_dims,
)
# MaxPool3D
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.MaxPool3D",
aliases=["tensorflow.nn.max_pool3d"],
data_format=st.sampled_from(["NDHWC", "NCDHW"]),
x_k_s_p=helpers.arrays_for_pooling(min_dims=5, max_dims=5, min_side=1, max_side=5),
test_with_out=st.just(False),
)
def test_tensorflow_MaxPool3D(
*,
x_k_s_p,
data_format,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, ksize, strides, padding = x_k_s_p
data_format = data_format
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
# Maximum
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Maximum",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Maximum( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# Mean
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Mean",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-10,
max_value=3,
),
keep_dims=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_Mean( # NOQA
*,
dtype_x_axis,
keep_dims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
axis=axis,
keep_dims=keep_dims,
rtol=1e-02,
atol=1e-02,
)
# Min
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Min",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
),
keep_dims=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_Min( # NOQA
*,
dtype_x_axis,
keep_dims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
axis=axis,
keep_dims=keep_dims,
)
# Minimum
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Minimum",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Minimum( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# Mod
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Mod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Mod( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Mul",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Mul( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# Neg
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Neg",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=[
"float32",
"float64",
"int8",
"int16",
"int32",
"int64",
],
),
test_with_out=st.just(False),
)
def test_tensorflow_Neg( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# NotEqual
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.NotEqual",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_NotEqual( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.NthElement",
array_indices_axis=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("numeric"),
indices_dtypes=["int32"],
min_num_dims=1,
min_dim_size=1,
disable_random_axis=True,
),
reverse=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_NthElement( # NOQA
*,
array_indices_axis,
reverse,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, n = array_indices_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
n=n.flatten()[0],
reverse=reverse,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.OnesLike",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
test_with_out=st.just(False),
)
def test_tensorflow_OnesLike( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Pack",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_Pack( # NOQA
dtype_x_axis,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
values=x,
axis=axis,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Pad",
dtype_x_paddings=_pad_helper(),
number_positional_args=st.just(0),
test_with_out=st.just(False),
)
def test_tensorflow_Pad( # NOQA
dtype_x_paddings,
frontend,
test_flags,
fn_tree,
backend_fw,
):
dtype, x, padding_dtype, paddings = dtype_x_paddings
helpers.test_frontend_function(
input_dtypes=dtype + padding_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
input=x,
paddings=paddings,
)
# TODO: Fails with torch backend
# ivy.exceptions.IvyBackendException: torch: constant_pad: constant_pad_nd(): argument
# 'value' (position 3) must be Number, not bfloat16
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.PadV2",
dtype_x_paddings=_pad_helper(return_constant_values=True),
test_with_out=st.just(False),
)
def test_tensorflow_PadV2(
dtype_x_paddings,
frontend,
test_flags,
fn_tree,
backend_fw,
):
dtype, x, padding_dtype, paddings, constant_values = dtype_x_paddings
helpers.test_frontend_function(
input_dtypes=dtype + padding_dtype + dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
input=x,
paddings=paddings,
constant_values=constant_values,
)
# Pow
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Pow",
dtype_and_x=_pow_helper_shared_dtype(),
test_with_out=st.just(False),
)
def test_tensorflow_Pow( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Prod",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
),
keep_dims=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_Prod( # NOQA
*,
dtype_x_axis,
keep_dims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
axis=axis,
keep_dims=keep_dims,
)
# Todo: Revise strategies once reimplemented in frontend
# Real
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Real",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
Tout=st.sampled_from(["float32", "float64"]),
)
def test_tensorflow_Real(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
Tout,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
Tout=Tout,
)
# RealDiv
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.RealDiv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_tensorflow_RealDiv( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-03,
rtol=1e-03,
x=xs[0],
y=xs[1],
)
# reciprocal
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Reciprocal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_Reciprocal( # NOQA
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
x=x[0],
)
# Relu
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Relu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_Relu( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Relu6",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_Relu6( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Reshape",
test_with_out=st.just(False),
x_reshape=_reshape_helper(),
)
def test_tensorflow_Reshape( # NOQA
*,
x_reshape,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
x, dtype, shape = x_reshape
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensor=x,
shape=shape,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Reverse",
dtype_x_axis=reverse_helper(),
)
def test_tensorflow_Reverse(
*,
dtype_x_axis,
frontend,
fn_tree,
test_flags,
on_device,
backend_fw,
):
dtype, x, axis_dtype, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype + axis_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
tensor=x[0],
dims=axis[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.RightShift",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
min_value=0,
max_value=8,
),
test_with_out=st.just(False),
)
def test_tensorflow_RightShift( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Round",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Round( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Rsqrt
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Rsqrt",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Rsqrt(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Shape
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Shape",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_Shape( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.ShapeN",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), max_num_dims=4
),
output_dtype=st.sampled_from(["int32", "int64"]),
test_with_out=st.just(False),
)
def test_tensorflow_ShapeN( # NOQA
*,
dtype_and_x,
output_dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input,
out_type=output_dtype,
)
# Sigmoid
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Sigmoid",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_Sigmoid( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Sign
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Sign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
large_abs_safety_factor=5,
small_abs_safety_factor=5,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_tensorflow_Sign( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Sinh
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Sinh",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_Sinh( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Size",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), max_num_dims=4
),
output_dtype=st.sampled_from(["int32", "int64"]),
test_with_out=st.just(False),
)
def test_tensorflow_Size( # NOQA
*, dtype_and_x, frontend, test_flags, backend_fw, fn_tree, on_device, output_dtype
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
out_type=output_dtype,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Softmax",
dtype_values_axis=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_Softmax(
dtype_values_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, values = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
logits=values[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Softplus",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_Softplus( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
)
# Softsign
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Softsign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_Softsign(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
)
# Split
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Split",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
axis=st.shared(
helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
force_int=True,
),
key="target_axis",
),
num_splits=_get_splits(),
)
def test_tensorflow_Split( # NOQA
*,
dtype_and_x,
axis,
num_splits,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, value = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
value=value[0],
axis=axis,
num_split=num_splits,
)
# SplitV
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.SplitV",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
axis=st.shared(
helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
force_int=True,
),
key="target_axis",
),
size_splits=_get_splits(as_list=True),
test_with_out=st.just(False),
)
def test_tensorflow_SplitV( # NOQA
*,
dtype_and_x,
axis,
size_splits,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, value = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
value=value[0],
axis=axis,
size_splits=size_splits,
num_split=len(size_splits),
)
# Sqrt
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Sqrt",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Sqrt( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Square
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Square",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Square( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.SquaredDifference",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_SquaredDifference(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# Squeeze
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Squeeze",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="value_shape"),
),
axis=_squeeze_helper(),
test_with_out=st.just(False),
)
def test_tensorflow_Squeeze( # NOQA
dtype_value,
axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, xs = dtype_value
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=xs[0],
axis=axis,
)
# Sub
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Sub",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True
),
test_with_out=st.just(False),
)
def test_tensorflow_Sub( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Sum",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
),
keep_dims=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_Sum( # NOQA
*,
dtype_x_axis,
keep_dims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
axis=axis,
keep_dims=keep_dims,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Svd",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=0,
max_value=10,
shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)),
),
full_matrices=st.booleans(),
compute_uv=st.just(True),
)
def test_tensorflow_Svd(
*,
dtype_and_x,
full_matrices,
compute_uv,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
dtype, x = dtype_and_x
x = np.asarray(x[0], dtype=dtype[0])
# make symmetric positive definite beforehand
x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
input=x,
full_matrices=full_matrices,
compute_uv=compute_uv,
)
ret = [ivy.to_numpy(x) for x in ret]
frontend_ret = [np.asarray(x) for x in frontend_ret]
u, s, vh = ret
frontend_s, frontend_u, frontend_vh = frontend_ret
assert_all_close(
ret_np=u @ np.diag(s) @ vh,
ret_from_gt_np=frontend_u @ np.diag(frontend_s) @ frontend_vh.T,
rtol=1e-2,
atol=1e-2,
ground_truth_backend=frontend,
backend=backend_fw,
)
# Tan
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Tan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Tan( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# Tanh
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Tanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_Tanh( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# TanhGrad
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.TanhGrad",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True
),
test_with_out=st.just(False),
)
def test_tensorflow_TanhGrad( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y=xs[0],
dy=xs[1],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Tile", all_arguments=_multiple_shape_helper()
)
def test_tensorflow_Tile(
*,
all_arguments,
test_flags,
frontend,
fn_tree,
on_device,
backend_fw,
):
input_dtype, input_matrix, dt_and_multiples = all_arguments
dt_mul, multiples = dt_and_multiples
helpers.test_frontend_function(
input_dtypes=input_dtype + dt_mul,
input=input_matrix[0],
multiples=multiples[0],
test_flags=test_flags,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.TruncateDiv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, shared_dtype=True
),
test_with_out=st.just(False),
)
def test_tensorflow_TruncateDiv( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, xs = dtype_and_x
# prevent too close to zero
assume(not np.any(np.isclose(xs[1], 0)))
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Unpack",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_Unpack( # NOQA
*,
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
value=x[0],
num=x[0].shape[axis],
axis=axis,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.UnsortedSegmentProd",
params=_segment_ops_helper(),
test_with_out=st.just(False),
)
def test_tensorflow_UnsortedSegmentProd(
*,
params,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtypes, data, segment_ids, max_val = params
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
data=data[0],
segment_ids=segment_ids[0],
num_segments=max_val + 1,
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Xdivy",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Xdivy( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Xlog1py",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Xlog1py( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Xlogy",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float16", "float32", "float64"],
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Xlogy(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# Zeta
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Zeta",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=1,
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_Zeta(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
q=x[1],
)
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Roll",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
),
shift=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
force_tuple=True,
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
force_tuple=True,
),
)
def test_tensorflow_roll(
*,
dtype_and_values,
shift,
axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
if isinstance(shift, int) and isinstance(axis, tuple):
axis = axis[0]
if isinstance(shift, tuple) and isinstance(axis, tuple):
if len(shift) != len(axis):
mn = min(len(shift), len(axis))
shift = shift[:mn]
axis = axis[:mn]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
shift=shift,
axis=axis,
)
# Transpose
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.Transpose",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
),
perm=_permute_dims_helper(),
test_with_out=st.just(False),
)
def test_tensorflow_transpose( # NOQA
*,
dtype_and_x,
perm,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
perm=perm,
)
# ZerosLike
@handle_frontend_test(
fn_tree="tensorflow.raw_ops.ZerosLike",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_zeros_like( # NOQA
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py",
"repo_id": "ivy",
"token_count": 56771
} | 64 |
# global
from hypothesis import strategies as st
import importlib
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _pop_size_num_samples_replace_n_probs(draw):
prob_dtype = draw(helpers.get_dtypes("float", full=False))
batch_size = draw(helpers.ints(min_value=1, max_value=5))
replace = draw(st.booleans())
num_samples = draw(helpers.ints(min_value=1, max_value=20))
probs = draw(
helpers.array_values(
dtype=prob_dtype[0],
shape=[batch_size, num_samples],
min_value=1.0013580322265625e-05,
max_value=1.0,
exclude_min=True,
)
)
return prob_dtype, batch_size, num_samples, replace, probs
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="torch.bernoulli",
dtype_and_probs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", full=False),
min_value=0,
max_value=1,
min_num_dims=0,
),
)
def test_torch_bernoulli(
dtype_and_probs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, probs = dtype_and_probs
def call():
return helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
input=probs[0],
)
ret = call()
if not ivy.exists(ret):
return
ret_np, ret_from_np = ret
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw)
for u, v in zip(ret_np, ret_from_np):
assert u.dtype == v.dtype
assert u.shape == v.shape
@handle_frontend_test(
fn_tree="torch.manual_seed",
seed=st.integers(min_value=0, max_value=2**32 - 1),
)
def test_torch_manual_seed(
*,
seed,
fn_tree,
frontend,
test_flags,
backend_fw,
):
# just test calling the function
frontend_fw = importlib.import_module(fn_tree[25 : fn_tree.rfind(".")])
split_index = fn_tree.rfind(".")
_, fn_name = fn_tree[:split_index], fn_tree[split_index + 1 :]
frontend_fw.__dict__[fn_name](seed)
# multinomial
@handle_frontend_test(
fn_tree="torch.multinomial",
everything=_pop_size_num_samples_replace_n_probs(),
)
def test_torch_multinomial(
*,
everything,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
prob_dtype, batch_size, num_samples, replace, probs = everything
def call():
return helpers.test_frontend_function(
input_dtypes=prob_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
input=probs,
num_samples=num_samples,
replacement=replace,
)
ret = call()
if not ivy.exists(ret):
return
ret_np, ret_from_np = ret
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw)
for u, v in zip(ret_np, ret_from_np):
assert u.dtype == v.dtype
assert u.shape == v.shape
@handle_frontend_test(
fn_tree="torch.normal",
dtype_and_mean=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-1000,
max_value=1000,
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
),
dtype_and_std=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=1000,
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
),
)
def test_torch_normal(
*,
dtype_and_mean,
dtype_and_std,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
mean_dtype, mean = dtype_and_mean
_, std = dtype_and_std
def call():
return helpers.test_frontend_function(
input_dtypes=mean_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
mean=mean[0],
std=std[0],
)
ret = call()
if not ivy.exists(ret):
return
ret_np, ret_from_np = ret
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw)
for u, v in zip(ret_np, ret_from_np):
assert u.dtype == v.dtype
assert u.shape == v.shape
@handle_frontend_test(
fn_tree="torch.poisson",
dtype_and_lam=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", full=False),
min_value=0,
max_value=100,
min_num_dims=0,
max_num_dims=10,
min_dim_size=1,
),
)
def test_torch_poisson(
*,
dtype_and_lam,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
lam_dtype, lam = dtype_and_lam
def call():
return helpers.test_frontend_function(
input_dtypes=lam_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
input=lam[0],
)
ret = call()
if not ivy.exists(ret):
return
ret_np, ret_from_np = ret
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw)
for u, v in zip(ret_np, ret_from_np):
assert u.dtype == v.dtype
assert u.shape == v.shape
@handle_frontend_test(
fn_tree="torch.rand",
dtype=helpers.get_dtypes("float", full=False),
size=helpers.get_shape(
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
)
def test_torch_rand(*, dtype, size, frontend, fn_tree, test_flags, backend_fw):
size = {f"size{i}": size[i] for i in range(len(size))}
test_flags.num_positional_args = len(size)
def call():
return helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_values=False,
fn_tree=fn_tree,
test_flags=test_flags,
**size,
)
ret = call()
if not ivy.exists(ret):
return
ret_np, ret_from_np = ret
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw)
for u, v in zip(ret_np, ret_from_np):
assert u.dtype == v.dtype
assert u.shape == v.shape
@handle_frontend_test(
fn_tree="torch.rand_like",
dtype=helpers.get_dtypes("float", full=False),
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=10,
min_dim_size=1,
max_dim_size=10,
),
)
def test_torch_rand_like(
dtype_and_x, dtype, *, frontend, fn_tree, test_flags, backend_fw
):
input_dtype, input = dtype_and_x
def call():
return helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_values=False,
fn_tree=fn_tree,
test_flags=test_flags,
input=input[0],
dtype=dtype[0],
)
ret = call()
if not ivy.exists(ret):
return
ret_np, ret_from_np = ret
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw)
for u, v in zip(ret_np, ret_from_np):
assert u.dtype == v.dtype
assert u.shape == v.shape
# randint
@handle_frontend_test(
fn_tree="torch.randint",
low=helpers.ints(min_value=0, max_value=10),
high=helpers.ints(min_value=11, max_value=20),
size=helpers.get_shape(),
dtype=helpers.get_dtypes("integer"),
)
def test_torch_randint(
*,
low,
high,
size,
dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
):
def call():
return helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_values=False,
fn_tree=fn_tree,
test_flags=test_flags,
low=low,
high=high,
size=size,
)
ret = call()
if not ivy.exists(ret):
return
ret_np, ret_from_np = ret
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw)
for u, v in zip(ret_np, ret_from_np):
assert u.dtype == v.dtype
assert u.shape == v.shape
@handle_frontend_test(
fn_tree="torch.randint_like",
dtype=helpers.get_dtypes("signed_integer", full=False),
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("signed_integer"),
min_num_dims=1,
max_num_dims=10,
min_dim_size=1,
max_dim_size=10,
),
low=helpers.ints(min_value=0, max_value=10),
high=helpers.ints(min_value=11, max_value=20),
)
def test_torch_randint_like(
dtype_and_x, low, high, *, dtype, frontend, fn_tree, test_flags, backend_fw
):
input_dtype, input = dtype_and_x
def call():
return helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_values=False,
fn_tree=fn_tree,
test_flags=test_flags,
input=input[0],
low=low,
high=high,
dtype=dtype[0],
)
ret = call()
if not ivy.exists(ret):
return
ret_np, ret_from_np = ret
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw)
for u, v in zip(ret_np, ret_from_np):
assert u.dtype == v.dtype
assert u.shape == v.shape
@handle_frontend_test(
fn_tree="torch.randn",
dtype=helpers.get_dtypes("float", full=False),
size=helpers.get_shape(
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
)
def test_torch_randn(*, dtype, size, frontend, fn_tree, test_flags, backend_fw):
size = {f"size{i}": size[i] for i in range(len(size))}
test_flags.num_positional_args = len(size)
def call():
return helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_values=False,
fn_tree=fn_tree,
test_flags=test_flags,
**size,
)
ret = call()
if not ivy.exists(ret):
return
ret_np, ret_from_np = ret
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw)
for u, v in zip(ret_np, ret_from_np):
assert u.dtype == v.dtype
assert u.shape == v.shape
@handle_frontend_test(
fn_tree="torch.randn_like",
dtype=helpers.get_dtypes("float", full=False),
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=10,
min_dim_size=1,
max_dim_size=10,
),
)
def test_torch_randn_like(
dtype_and_x, dtype, *, frontend, fn_tree, test_flags, backend_fw
):
input_dtype, input = dtype_and_x
def call():
return helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_values=False,
fn_tree=fn_tree,
test_flags=test_flags,
input=input[0],
dtype=dtype[0],
)
ret = call()
if not ivy.exists(ret):
return
ret_np, ret_from_np = ret
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw)
for u, v in zip(ret_np, ret_from_np):
assert u.dtype == v.dtype
assert u.shape == v.shape
# randperm
@handle_frontend_test(
fn_tree="torch.randperm",
n=st.integers(min_value=0, max_value=10),
dtype=helpers.get_dtypes("integer", full=False),
)
def test_torch_randperm(
*,
n,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
def call():
return helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
n=n,
)
ret = call()
if not ivy.exists(ret):
return
ret_np, ret_from_np = ret
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw)
for u, v in zip(ret_np, ret_from_np):
assert u.dtype == v.dtype
assert u.shape == v.shape
# set_rng_state
@handle_frontend_test(
fn_tree="torch.set_rng_state",
new_state=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=0,
max_value=10,
min_num_dims=1,
max_num_dims=1,
min_dim_size=1,
max_dim_size=1,
),
)
def test_torch_set_rng_state(
*,
new_state,
frontend,
test_flags,
fn_tree,
backend_fw,
):
dtype, new_state = new_state
helpers.test_frontend_function(
backend_to_test=backend_fw,
frontend=frontend,
input_dtypes=dtype,
test_values=False,
fn_tree=fn_tree,
test_flags=test_flags,
new_state=new_state[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_random_sampling.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_random_sampling.py",
"repo_id": "ivy",
"token_count": 7452
} | 65 |
"""Collection of tests for unified device functions."""
# global
import io
import multiprocessing
import os
import re
import shutil
import sys
import warnings
import numpy as np
import psutil
import subprocess
from hypothesis import strategies as st, assume
# nvidia-ml-py (pynvml) is not installed in CPU Dockerfile.
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.helpers.globals as test_globals
from ivy_tests.test_ivy.helpers import handle_test, BackendHandler
try:
import pynvml
except ImportError:
warnings.warn(
"pynvml installation was not found in the environment, functionalities"
" of the Ivy's device module will be limited. Please install pynvml if"
" you wish to use GPUs with Ivy."
)
# --- Helpers --- #
# --------------- #
# Function Splitting #
@st.composite
def _axis(draw):
max_val = draw(st.shared(helpers.ints(), key="num_dims"))
return draw(helpers.ints(min_value=0, max_value=max_val - 1))
def _composition_1(backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
return ivy_backend.relu().argmax()
def _composition_2(backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
return ivy_backend.ceil() or ivy_backend.floor()
def _empty_dir(path, recreate=False):
# Delete the directory if it exists and create it again if recreate is True
if os.path.exists(path):
shutil.rmtree(path)
if recreate:
os.makedirs(path)
def _get_possible_devices():
# Return all the possible usable devices
with BackendHandler.update_backend(test_globals.CURRENT_BACKEND) as ivy_backend:
devices = ["cpu"]
if ivy_backend.gpu_is_available():
for i in range(ivy_backend.num_gpus()):
devices.append("gpu:" + str(i))
# Return a list of ivy devices
return list(map(ivy_backend.Device, devices))
def _ram_array_and_clear_test(metric_fn, device, size=10000000):
# This function checks if the memory usage changes before, during and after
# Measure usage before creating array
before = metric_fn()
# Create an array of floats, by default with 10 million elements (40 MB)
arr = ivy.random_normal(shape=(size,), dtype="float32", device=device)
during = metric_fn()
# Check that the memory usage has increased
assert before < during
# Delete the array
del arr
# Measure the memory usage after the array is deleted
after = metric_fn()
# Check that the memory usage has decreased
assert during > after
# --- Main --- #
# ------------ #
def get_cpu_percent():
output = str(subprocess.check_output(["top", "-bn1"]))
cpu_percent = float(re.search(r"%Cpu\(s\):\s+([\d.]+)\s+us", output).group(1))
return cpu_percent
def get_gpu_mem_usage(backend, device="gpu:0"):
handle = backend.ivy.functional.ivy.device._get_nvml_gpu_handle(device)
info = pynvml.nvmlDeviceGetMemoryInfo(handle)
return (info.used / info.total) * 100
# as_ivy_dev
@handle_test(
fn_tree="functional.ivy.as_ivy_dev",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_as_ivy_dev(*, dtype_and_x, test_flags, backend_fw):
dtype, x = dtype_and_x
dtype = dtype[0]
x = x[0]
with BackendHandler.update_backend(backend_fw) as ivy_backend:
for device in _get_possible_devices():
x = ivy_backend.array(x, device=device)
if test_flags.as_variable and ivy_backend.is_float_dtype(dtype):
x = ivy_backend.functional.ivy.gradients._variable(x)
native_device = ivy_backend.dev(x, as_native=True)
ret = ivy_backend.as_ivy_dev(native_device)
# Type test
assert isinstance(ret, str)
# Value test
assert ret == device
# as_native_dev
@handle_test(
fn_tree="functional.ivy.as_native_dev",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_as_native_dev(*, dtype_and_x, test_flags, on_device, backend_fw):
dtype, x = dtype_and_x
dtype = dtype[0]
x = x[0]
with BackendHandler.update_backend(backend_fw) as ivy_backend:
for device in _get_possible_devices():
x = ivy_backend.asarray(x, device=on_device)
if test_flags.as_variable:
x = ivy_backend.functional.ivy.gradients._variable(x)
device = ivy_backend.as_native_dev(on_device)
ret = ivy_backend.as_native_dev(ivy_backend.dev(x))
# value test
if backend_fw == "tensorflow":
assert "/" + ":".join(ret[1:].split(":")[-2:]) == "/" + ":".join(
device[1:].split(":")[-2:]
)
elif backend_fw == "torch":
assert ret.type == device.type
elif backend_fw == "paddle":
assert ret._equals(device)
else:
assert ret == device
@handle_test(fn_tree="clear_cached_mem_on_dev")
def test_clear_cached_mem_on_dev(backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
devices = _get_possible_devices()
for device in devices:
# Testing on only GPU since clearing cache mem is relevant
# for only CUDA devices
if "gpu" in device:
arr = ivy_backend.random_normal( # noqa: F841
shape=(10000, 1000), dtype="float32", device=device
)
del arr
before = get_gpu_mem_usage(device)
ivy_backend.clear_cached_mem_on_dev(device)
after = get_gpu_mem_usage(device)
assert before > after
# Device Allocation #
# default_device
@handle_test(fn_tree="functional.ivy.default_device")
def test_default_device(backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
# setting and unsetting
orig_len = len(ivy_backend.default_device_stack)
ivy_backend.set_default_device("cpu")
assert len(ivy_backend.default_device_stack) == orig_len + 1
ivy_backend.set_default_device("cpu")
assert len(ivy_backend.default_device_stack) == orig_len + 2
ivy_backend.unset_default_device()
assert len(ivy_backend.default_device_stack) == orig_len + 1
ivy_backend.unset_default_device()
assert len(ivy_backend.default_device_stack) == orig_len
# with
assert len(ivy_backend.default_device_stack) == orig_len
with ivy_backend.DefaultDevice("cpu"):
assert len(ivy_backend.default_device_stack) == orig_len + 1
with ivy_backend.DefaultDevice("cpu"):
assert len(ivy_backend.default_device_stack) == orig_len + 2
assert len(ivy_backend.default_device_stack) == orig_len + 1
assert len(ivy_backend.default_device_stack) == orig_len
# Tests #
# ------#
# Device Queries #
# dev
@handle_test(
fn_tree="functional.ivy.dev",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_dev(*, dtype_and_x, test_flags, backend_fw):
dtype, x = dtype_and_x
dtype = dtype[0]
x = x[0]
with BackendHandler.update_backend(backend_fw) as ivy_backend:
for device in _get_possible_devices():
x = ivy_backend.array(x, device=device)
if test_flags.as_variable and ivy_backend.is_float_dtype(dtype):
x = ivy_backend.functional.ivy.gradients._variable(x)
ret = ivy_backend.dev(x)
# type test
assert isinstance(ret, str)
# value test
assert ret == device
# array instance test
assert x.dev() == device
# container instance test
container_x = ivy_backend.Container({"a": x})
assert container_x.dev() == device
# container static test
assert ivy_backend.Container.static_dev(container_x) == device
@handle_test(fn_tree="dev_util")
def test_dev_util(backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
devices = _get_possible_devices()
for device in devices:
# The internally called psutil.cpu_percent() has a unique behavior where it
# returns 0 as usage when run the second time in same line so simple
# assert psutil.cpu_percent() == ivy.dev_util(device) isn't possible
if "cpu" in device:
assert 100 >= ivy_backend.dev_util(device) >= 0
# Comparing CPU utilization using top. Two percentiles won't be directly
# equal but absolute difference should be below a safe threshold
assert abs(get_cpu_percent() - ivy_backend.dev_util(device)) < 10
elif "gpu" in device:
handle = ivy_backend.functional.ivy.device._get_nvml_gpu_handle(device)
assert (
ivy_backend.dev_util(device)
== pynvml.nvmlDeviceGetUtilizationRates(handle).gpu
)
# function_unsupported_devices
@handle_test(
fn_tree="functional.ivy.function_supported_devices",
func=st.sampled_from([_composition_1, _composition_2]),
expected=st.just(["cpu"]),
)
def test_function_supported_devices(
*,
func,
expected,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
res = ivy_backend.function_supported_devices(func)
exp = set(expected)
assert sorted(exp) == sorted(res)
# function_unsupported_devices
@handle_test(
fn_tree="functional.ivy.function_supported_devices",
func=st.sampled_from([_composition_1, _composition_2]),
expected=st.just(["gpu", "tpu"]),
)
def test_function_unsupported_devices(
*,
func,
expected,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
res = ivy_backend.function_unsupported_devices(func)
exp = set(expected)
assert sorted(exp) == sorted(res)
@handle_test(
fn_tree="functional.ivy.get_all_ivy_arrays_on_dev",
num=helpers.ints(min_value=0, max_value=5),
)
def test_get_all_ivy_arrays_on_dev(
*,
num,
on_device,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
arrays = [ivy_backend.array(np.random.uniform(size=2)) for _ in range(num)]
arr_ids_on_dev = [
id(a) for a in ivy_backend.get_all_ivy_arrays_on_dev(on_device).values()
]
for a in arrays:
assert id(a) in arr_ids_on_dev
@handle_test(fn_tree="gpu_is_available")
def test_gpu_is_available(backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
# If gpu is available but cannot be initialised it will fail the test
if ivy_backend.gpu_is_available():
try:
pynvml.nvmlInit()
except pynvml.NVMLError:
assert False
# handle_soft_device_variable
@handle_test(
fn_tree="functional.ivy.handle_soft_device_variable",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
),
)
def test_handle_soft_device_variable(*, dtype_and_x, backend_fw):
dtype, x = dtype_and_x
dtype = dtype[0]
with BackendHandler.update_backend(backend_fw) as ivy_backend:
x = ivy_backend.to_device(x[0], "cpu")
def fn(x, y):
return ivy_backend.add(x, y)
for device in _get_possible_devices():
ivy_backend.set_default_device(device)
out = ivy_backend.handle_soft_device_variable(x, fn=fn, y=x)
# check if device shifting is successful
assert out.device == ivy_backend.default_device()
@handle_test(fn_tree="num_cpu_cores")
def test_num_cpu_cores(backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
# using multiprocessing module too because ivy uses psutil as basis.
p_cpu_cores = psutil.cpu_count()
m_cpu_cores = multiprocessing.cpu_count()
assert isinstance(ivy_backend.num_cpu_cores(), int)
assert ivy_backend.num_cpu_cores() == p_cpu_cores
assert ivy_backend.num_cpu_cores() == m_cpu_cores
@handle_test(
fn_tree="functional.ivy.num_ivy_arrays_on_dev",
num=helpers.ints(min_value=0, max_value=5),
)
def test_num_ivy_arrays_on_dev(
*,
num,
on_device,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
arrays = [
ivy_backend.array(np.random.uniform(size=2).tolist(), device=on_device)
for _ in range(num)
]
assert ivy_backend.num_ivy_arrays_on_dev(on_device) == num
for item in arrays:
del item
@handle_test(fn_tree="percent_used_mem_on_dev")
def test_percent_used_mem_on_dev(backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
devices = _get_possible_devices()
for device in devices:
used = ivy_backend.percent_used_mem_on_dev(ivy_backend.Device(device))
assert 0 <= used <= 100
# Same as test_used_mem_on_dev, but using percent of total memory as metric
# function
_ram_array_and_clear_test(
lambda: ivy_backend.percent_used_mem_on_dev(
device, process_specific=True
),
device=device,
)
@handle_test(
fn_tree="functional.ivy.print_all_ivy_arrays_on_dev",
num=helpers.ints(min_value=0, max_value=2),
attr_only=st.booleans(),
)
def test_print_all_ivy_arrays_on_dev(
*,
num,
attr_only,
on_device,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
arr = [ivy_backend.array(np.random.uniform(size=2)) for _ in range(num)]
# Flush to avoid artifact
sys.stdout.flush()
# temporarily redirect output to a buffer
captured_output = io.StringIO()
sys.stdout = captured_output
ivy_backend.print_all_ivy_arrays_on_dev(device=on_device, attr_only=attr_only)
# Flush again to make sure all data is printed
sys.stdout.flush()
written = captured_output.getvalue().splitlines()
# restore stdout
sys.stdout = sys.__stdout__
# Should have written same number of lines as the number of array in device
assert len(written) == num
if attr_only:
# Check that the attribute are printed are in the format of
# (ivy.Shape(dim,...), type)
regex = r"^\(ivy.Shape\((\d+,(\d,\d*)*)\), \'\w*\'\)$"
else:
# Check that the arrays are printed are in the format of ivy.array(...)
regex = r"^ivy\.array\(\[.*\]\)$"
# Clear the array from device
for item in arr:
del item
# Apply the regex search
assert all(re.match(regex, line) for line in written)
# profiler
@handle_test(
fn_tree="functional.ivy.Profiler",
)
def test_profiler(*, backend_fw):
# ToDo: find way to prevent this test from hanging when run
# alongside other tests in parallel
# log dir, each framework uses their own folder,
# so we can run this test in parallel
with BackendHandler.update_backend(backend_fw) as ivy_backend:
this_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(this_dir, "../log")
fw_log_dir = os.path.join(log_dir, backend_fw)
# Remove old content and recreate log dir
_empty_dir(fw_log_dir, True)
# with statement
with ivy_backend.Profiler(fw_log_dir):
a = ivy_backend.ones([10])
b = ivy_backend.zeros([10])
_ = a + b
# Should have content in folder
assert len(os.listdir(fw_log_dir)) != 0, "Profiler did not log anything"
# Remove old content and recreate log dir
_empty_dir(fw_log_dir, True)
# Profiler should stop log
assert (
len(os.listdir(fw_log_dir)) == 0
), "Profiler logged something while stopped"
# start and stop methods
profiler = ivy_backend.Profiler(fw_log_dir)
profiler.start()
a = ivy_backend.ones([10])
b = ivy_backend.zeros([10])
_ = a + b
profiler.stop()
# Should have content in folder
assert len(os.listdir(fw_log_dir)) != 0, "Profiler did not log anything"
# Remove old content including the logging folder
_empty_dir(fw_log_dir, False)
assert not os.path.exists(fw_log_dir), "Profiler recreated logging folder"
@handle_test(
fn_tree="functional.ivy.split_func_call",
array_shape=helpers.lists(
x=helpers.ints(min_value=1, max_value=3),
min_size="num_dims",
max_size="num_dims",
size_bounds=[1, 3],
),
dtype=helpers.get_dtypes("numeric", full=False),
chunk_size=helpers.ints(min_value=1, max_value=3),
axis=_axis(),
)
def test_split_func_call(
*,
array_shape,
dtype,
chunk_size,
axis,
test_flags,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
# inputs
shape = tuple(array_shape)
x1 = np.random.uniform(size=shape).astype(dtype[0])
x2 = np.random.uniform(size=shape).astype(dtype[0])
x1 = ivy_backend.asarray(x1)
x2 = ivy_backend.asarray(x2)
if test_flags.as_variable and ivy_backend.is_float_dtype(dtype[0]):
x1 = ivy_backend.functional.ivy.gradients._variable(x1)
x2 = ivy_backend.functional.ivy.gradients._variable(x2)
# function
def func(t0, t1):
return t0 * t1, t0 - t1, t1 - t0
# predictions
a, b, c = ivy_backend.split_func_call(
func, [x1, x2], "concat", chunk_size=chunk_size, input_axes=axis
)
# true
a_true, b_true, c_true = func(x1, x2)
# value test
helpers.assert_all_close(
ivy_backend.to_numpy(a), ivy_backend.to_numpy(a_true), backend=backend_fw
)
helpers.assert_all_close(
ivy_backend.to_numpy(b), ivy_backend.to_numpy(b_true), backend=backend_fw
)
helpers.assert_all_close(
ivy_backend.to_numpy(c), ivy_backend.to_numpy(c_true), backend=backend_fw
)
@handle_test(
fn_tree="functional.ivy.split_func_call",
array_shape=helpers.lists(
x=helpers.ints(min_value=2, max_value=3),
min_size="num_dims",
max_size="num_dims",
size_bounds=[2, 3],
),
dtype=helpers.get_dtypes("numeric", full=False),
chunk_size=helpers.ints(min_value=1, max_value=3),
axis=helpers.ints(min_value=0, max_value=1),
)
def test_split_func_call_with_cont_input(
*,
array_shape,
test_flags,
dtype,
chunk_size,
axis,
on_device,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
shape = tuple(array_shape)
x1 = np.random.uniform(size=shape).astype(dtype[0])
x2 = np.random.uniform(size=shape).astype(dtype[0])
x1 = ivy_backend.asarray(x1, device=on_device)
x2 = ivy_backend.asarray(x2, device=on_device)
# inputs
if test_flags.as_variable and ivy_backend.is_float_dtype(dtype[0]):
_variable_fn = ivy_backend.functional.ivy.gradients._variable
in0 = ivy_backend.Container(cont_key=_variable_fn(x1))
in1 = ivy_backend.Container(cont_key=_variable_fn(x2))
else:
in0 = ivy_backend.Container(cont_key=x1)
in1 = ivy_backend.Container(cont_key=x2)
# function
def func(t0, t1):
return t0 * t1, t0 - t1, t1 - t0
# predictions
a, b, c = ivy.split_func_call(
func, [in0, in1], "concat", chunk_size=chunk_size, input_axes=axis
)
# true
a_true, b_true, c_true = func(in0, in1)
# value test
helpers.assert_all_close(
ivy_backend.to_numpy(a.cont_key),
ivy_backend.to_numpy(a_true.cont_key),
backend=backend_fw,
)
helpers.assert_all_close(
ivy_backend.to_numpy(b.cont_key),
ivy_backend.to_numpy(b_true.cont_key),
backend=backend_fw,
)
helpers.assert_all_close(
ivy_backend.to_numpy(c.cont_key),
ivy_backend.to_numpy(c_true.cont_key),
backend=backend_fw,
)
# to_dev
@handle_test(
fn_tree="functional.ivy.to_device",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
stream=helpers.ints(min_value=0, max_value=50),
)
def test_to_device(
*,
dtype_and_x,
stream,
test_flags,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
dtype = dtype[0]
x = x[0]
with BackendHandler.update_backend(backend_fw) as ivy_backend:
x = ivy_backend.asarray(x)
if test_flags.as_variable and ivy_backend.is_float_dtype(dtype):
x = ivy_backend.functional.ivy.gradients._variable(x)
# create a dummy array for out that is broadcastable to x
out = (
ivy_backend.zeros(ivy_backend.shape(x), device=on_device, dtype=dtype)
if test_flags.with_out
else None
)
device = ivy_backend.dev(x)
x_on_dev = ivy_backend.to_device(x, on_device, stream=stream, out=out)
dev_from_new_x = ivy_backend.dev(x_on_dev)
if test_flags.with_out:
# should be the same array test
assert x_on_dev is out
# should be the same device
if backend_fw != "paddle":
assert ivy_backend.dev(x_on_dev, as_native=True) == ivy_backend.dev(
out, as_native=True
)
else:
assert ivy_backend.dev(x_on_dev, as_native=False) == ivy_backend.dev(
out, as_native=False
)
# check if native arrays are the same
# these backends do not support native inplace updates
assume(backend_fw not in ["tensorflow", "jax"])
assert x_on_dev.data is out.data
# value test
if backend_fw == "tensorflow":
assert "/" + ":".join(dev_from_new_x[1:].split(":")[-2:]) == "/" + ":".join(
device[1:].split(":")[-2:]
)
elif backend_fw == "torch":
assert type(dev_from_new_x) == type(device) # noqa: E721
else:
assert dev_from_new_x == device
# array instance test
assert x.to_device(device).dev() == device
# container instance test
container_x = ivy_backend.Container({"x": x})
assert container_x.to_device(device).dev() == device
# container static test
assert ivy_backend.Container.to_device(container_x, device).dev() == device
@handle_test(fn_tree="total_mem_on_dev")
def test_total_mem_on_dev(backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
devices = _get_possible_devices()
for device in devices:
if "cpu" in device:
assert (
ivy_backend.total_mem_on_dev(device)
== psutil.virtual_memory().total / 1e9
)
elif "gpu" in device:
handle = ivy_backend.functional.ivy.device._get_nvml_gpu_handle(device)
gpu_mem = pynvml.nvmlDeviceGetMemoryInfo(handle)
assert ivy_backend.total_mem_on_dev(device) == gpu_mem.total / 1e9
@handle_test(fn_tree="tpu_is_available")
def test_tpu_is_available(backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
import tensorflow as tf
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
tf.config.list_logical_devices("TPU")
tf.distribute.experimental.TPUStrategy(resolver)
ground_truth = True
except ValueError:
ground_truth = False
assert ivy_backend.tpu_is_available() == ground_truth
@handle_test(fn_tree="used_mem_on_dev")
def test_used_mem_on_dev(backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
devices = _get_possible_devices()
# Check that there not all memory is used
for device in devices:
assert ivy_backend.used_mem_on_dev(device) > 0
assert ivy_backend.used_mem_on_dev(device) < ivy_backend.total_mem_on_dev(
device
)
_ram_array_and_clear_test(
lambda: ivy_backend.used_mem_on_dev(device, process_specific=True),
device=device,
)
| ivy/ivy_tests/test_ivy/test_functional/test_core/test_device.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_device.py",
"repo_id": "ivy",
"token_count": 11847
} | 66 |
import pytest
from hypothesis import strategies as st
import numpy as np
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
# --- Helpers --- #
# --------------- #
@st.composite
def _random_cp_data(draw):
shape = draw(
st.lists(helpers.ints(min_value=1, max_value=5), min_size=2, max_size=4)
)
rank = draw(helpers.ints(min_value=1, max_value=10))
dtype = draw(helpers.get_dtypes("float", full=False))
full = draw(st.booleans())
orthogonal = draw(st.booleans())
if (rank > min(shape)) and orthogonal:
rank = min(shape)
seed = draw(st.one_of((st.just(None), helpers.ints(min_value=0, max_value=2000))))
normalise_factors = draw(st.booleans())
return shape, rank, dtype[0], full, orthogonal, seed, normalise_factors
@st.composite
def _random_parafac2_data(draw):
num_shapes = draw(st.integers(min_value=2, max_value=4))
common_dimension = draw(st.integers(min_value=1, max_value=5))
shapes = [
(draw(st.integers(min_value=1, max_value=10)), common_dimension)
for _ in range(num_shapes)
]
rank = draw(helpers.ints(min_value=1, max_value=10))
dtype = draw(helpers.get_dtypes("float", full=False))
full = draw(st.booleans())
seed = draw(st.one_of((st.just(None), helpers.ints(min_value=0, max_value=2000))))
normalise_factors = draw(st.booleans())
return shapes, rank, dtype[0], full, seed, normalise_factors
@st.composite
def _random_tr_data(draw):
shape = draw(
st.lists(helpers.ints(min_value=1, max_value=5), min_size=2, max_size=4)
)
rank = min(shape)
dtype = draw(helpers.get_dtypes("valid", full=False))
full = draw(st.booleans())
seed = draw(st.one_of((st.just(None), helpers.ints(min_value=0, max_value=2000))))
return shape, rank, dtype[0], full, seed
@st.composite
def _random_tt_data(draw):
shape = draw(
st.lists(helpers.ints(min_value=1, max_value=5), min_size=2, max_size=4)
)
rank = draw(helpers.ints(min_value=1, max_value=len(shape)))
dtype = draw(helpers.get_dtypes("float", full=False))
full = draw(st.booleans())
seed = draw(st.one_of((st.just(None), helpers.ints(min_value=0, max_value=2000))))
return shape, rank, dtype[0], full, seed
@st.composite
def _random_tucker_data(draw):
shape = draw(
st.lists(helpers.ints(min_value=1, max_value=5), min_size=2, max_size=4)
)
rank = []
for dim in shape:
rank.append(draw(helpers.ints(min_value=1, max_value=dim)))
dtype = draw(helpers.get_dtypes("float", full=False))
full = draw(st.booleans())
orthogonal = draw(st.booleans())
seed = draw(st.one_of((st.just(None), helpers.ints(min_value=0, max_value=2000))))
non_negative = draw(st.booleans())
return shape, rank, dtype[0], full, orthogonal, seed, non_negative
@st.composite
def valid_unsorted_segment_min_inputs(draw):
while True:
dtype = draw(st.sampled_from([ivy.int32, ivy.int64, ivy.float32, ivy.float64]))
segment_ids_dim = draw(st.integers(min_value=3, max_value=10))
num_segments = draw(st.integers(min_value=2, max_value=segment_ids_dim))
data_dim = draw(
helpers.get_shape(
min_dim_size=segment_ids_dim,
max_dim_size=segment_ids_dim,
min_num_dims=1,
max_num_dims=4,
)
)
data_dim = (segment_ids_dim,) + data_dim[1:]
data = draw(
helpers.array_values(
dtype=dtype,
shape=data_dim,
min_value=1,
max_value=10,
)
)
segment_ids = draw(
helpers.array_values(
dtype=ivy.int32,
shape=(segment_ids_dim,),
min_value=0,
max_value=num_segments + 1,
)
)
if data.shape[0] == segment_ids.shape[0]:
if np.max(segment_ids) < num_segments:
return (dtype, ivy.int32), data, num_segments, segment_ids
# --- Main --- #
# ------------ #
# eye_like
@handle_test(
fn_tree="functional.ivy.experimental.eye_like",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=1,
min_dim_size=1,
max_dim_size=5,
),
k=helpers.ints(min_value=-10, max_value=10),
test_gradients=st.just(False),
number_positional_args=st.just(1),
)
def test_eye_like(*, dtype_and_x, k, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
k=k,
dtype=dtype[0],
device=on_device,
)
# hamming_window
@handle_test(
fn_tree="functional.ivy.experimental.hamming_window",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
max_num_dims=0,
min_value=1,
max_value=10,
),
periodic=st.booleans(),
dtype_and_f=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
max_num_dims=0,
num_arrays=2,
min_value=0,
max_value=5,
),
dtype=helpers.get_dtypes("float", full=False),
test_gradients=st.just(False),
test_instance_method=st.just(False),
)
def test_hamming_window(
*,
dtype_and_x,
periodic,
dtype_and_f,
dtype,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype1, x = dtype_and_x
input_dtype2, f = dtype_and_f
helpers.test_function(
input_dtypes=input_dtype1 + input_dtype2,
test_flags=test_flags,
backend_to_test=backend_fw,
atol_=2e-06,
fn_name=fn_name,
on_device=on_device,
window_length=int(x[0]),
periodic=periodic,
alpha=float(f[0]),
beta=float(f[1]),
dtype=dtype[0],
)
# TODO: fix return precision problem when dtype=bfloat16
# hann_window
@handle_test(
fn_tree="functional.ivy.experimental.hann_window",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
max_num_dims=0,
min_value=1,
max_value=10,
),
periodic=st.booleans(),
dtype=helpers.get_dtypes("float", full=False),
test_gradients=st.just(False),
test_instance_method=st.just(False),
)
def test_hann_window(
*, dtype_and_x, periodic, dtype, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
atol_=0.015,
fn_name=fn_name,
on_device=on_device,
size=int(x[0]),
periodic=periodic,
dtype=dtype[0],
)
# indices
@handle_test(
fn_tree="functional.ivy.experimental.indices",
ground_truth_backend="numpy",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtypes=helpers.get_dtypes(
"numeric",
full=False,
),
sparse=st.booleans(),
container_flags=st.just([False]),
test_instance_method=st.just(False),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_indices(*, shape, dtypes, sparse, test_flags, backend_fw, fn_name, on_device):
helpers.test_function(
input_dtypes=[],
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
dimensions=shape,
dtype=dtypes[0],
sparse=sparse,
)
# kaiser_bessel_derived_window
@handle_test(
fn_tree="functional.ivy.experimental.kaiser_bessel_derived_window",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
max_num_dims=0,
min_value=1,
max_value=10,
),
beta=st.floats(min_value=1, max_value=5),
dtype=helpers.get_dtypes("float", full=False),
test_gradients=st.just(False),
test_instance_method=st.just(False),
)
def test_kaiser_bessel_derived_window(
*, dtype_and_x, beta, dtype, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
window_length=int(x[0]),
beta=beta,
dtype=dtype[0],
)
# kaiser_window
@handle_test(
fn_tree="functional.ivy.experimental.kaiser_window",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
max_num_dims=0,
min_value=1,
max_value=10,
),
periodic=st.booleans(),
beta=st.floats(min_value=0, max_value=5),
dtype=helpers.get_dtypes("float", full=False),
test_gradients=st.just(False),
test_instance_method=st.just(False),
)
def test_kaiser_window(
*, dtype_and_x, periodic, beta, dtype, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
window_length=int(x[0]),
periodic=periodic,
beta=beta,
dtype=dtype[0],
)
# mel_weight_matrix
@handle_test(
fn_tree="functional.ivy.experimental.mel_weight_matrix",
num_mel_bins=helpers.ints(min_value=5, max_value=10),
dft_length=helpers.ints(min_value=5, max_value=10),
sample_rate=helpers.ints(min_value=1000, max_value=2000),
lower_edge_hertz=helpers.floats(min_value=0.0, max_value=5.0),
upper_edge_hertz=helpers.floats(min_value=5.0, max_value=10.0),
test_with_out=st.just(False),
test_gradients=st.just(False),
test_instance_method=st.just(False),
)
def test_mel_weight_matrix(
*,
num_mel_bins,
dft_length,
sample_rate,
lower_edge_hertz,
upper_edge_hertz,
test_flags,
backend_fw,
fn_name,
on_device,
):
helpers.test_function(
input_dtypes=[],
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
rtol_=0.05,
atol_=0.05,
fn_name=fn_name,
num_mel_bins=num_mel_bins,
dft_length=dft_length,
sample_rate=sample_rate,
lower_edge_hertz=lower_edge_hertz,
upper_edge_hertz=upper_edge_hertz,
)
# ndenumerate
@handle_test(
fn_tree="functional.ivy.experimental.ndenumerate",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
),
)
def test_ndenumerate(dtype_and_x):
values = dtype_and_x[1][0]
for (index1, x1), (index2, x2) in zip(
np.ndenumerate(values), ivy.ndenumerate(values)
):
assert index1 == index2
assert x1 == x2.to_numpy()
# ndindex
@handle_test(
fn_tree="functional.ivy.experimental.ndindex",
dtype_x_shape=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
ret_shape=True,
),
)
def test_ndindex(dtype_x_shape):
shape = dtype_x_shape[2]
for index1, index2 in zip(np.ndindex(shape), ivy.ndindex(shape)):
assert index1 == index2
# polyval
@handle_test(
fn_tree="functional.ivy.experimental.polyval",
dtype_and_coeffs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
),
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=0,
),
test_with_out=st.just(False),
test_gradients=st.just(False),
test_instance_method=st.just(False),
)
def test_polyval(
*, dtype_and_coeffs, dtype_and_x, test_flags, backend_fw, fn_name, on_device
):
coeffs_dtype, coeffs = dtype_and_coeffs
x_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=coeffs_dtype + x_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
coeffs=coeffs,
x=x,
)
@handle_test(
fn_tree="functional.ivy.experimental.random_cp",
data=_random_cp_data(),
test_with_out=st.just(False),
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_random_cp(
*,
data,
test_flags,
backend_fw,
fn_name,
on_device,
):
shape, rank, dtype, full, orthogonal, seed, normalise_factors = data
results = helpers.test_function(
input_dtypes=[],
backend_to_test=backend_fw,
test_flags=test_flags,
on_device=on_device,
fn_name=fn_name,
shape=shape,
rank=rank,
dtype=dtype,
full=full,
orthogonal=orthogonal,
seed=seed,
normalise_factors=normalise_factors,
test_values=False,
)
ret_np, ret_from_gt_np = results
if full:
reconstructed_tensor = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
reconstructed_tensor_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np, backend=test_flags.ground_truth_backend
)
for x, x_gt in zip(reconstructed_tensor, reconstructed_tensor_gt):
assert np.prod(shape) == np.prod(x.shape)
assert np.prod(shape) == np.prod(x_gt.shape)
else:
weights = helpers.flatten_and_to_np(ret=ret_np[0], backend=backend_fw)
factors = helpers.flatten_and_to_np(ret=ret_np[1], backend=backend_fw)
weights_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np[0], backend=test_flags.ground_truth_backend
)
factors_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np[1], backend=test_flags.ground_truth_backend
)
for w, w_gt in zip(weights, weights_gt):
assert len(w) == rank
assert len(w_gt) == rank
for f, f_gt in zip(factors, factors_gt):
assert np.prod(f.shape) == np.prod(f_gt.shape)
@handle_test(
fn_tree="functional.ivy.experimental.random_tr",
data=_random_tr_data(),
test_with_out=st.just(False),
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_random_tr(
*,
data,
test_flags,
backend_fw,
fn_name,
on_device,
):
shape, rank, dtype, full, seed = data
results = helpers.test_function(
input_dtypes=[],
backend_to_test=backend_fw,
test_flags=test_flags,
on_device=on_device,
fn_name=fn_name,
shape=shape,
rank=rank,
dtype=dtype,
full=full,
seed=seed,
test_values=False,
)
ret_np, ret_from_gt_np = results
if full:
reconstructed_tensor = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
reconstructed_tensor_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np, backend=test_flags.ground_truth_backend
)
for x, x_gt in zip(reconstructed_tensor, reconstructed_tensor_gt):
assert np.prod(shape) == np.prod(x.shape)
assert np.prod(shape) == np.prod(x_gt.shape)
else:
core = helpers.flatten_and_to_np(ret=ret_np[0], backend=backend_fw)
factors = helpers.flatten_and_to_np(ret=ret_np[1], backend=backend_fw)
core_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np[0], backend=test_flags.ground_truth_backend
)
factors_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np[1], backend=test_flags.ground_truth_backend
)
for c, c_gt in zip(core, core_gt):
assert len(c) == rank
assert len(c_gt) == rank
for f, f_gt in zip(factors, factors_gt):
assert np.prod(f.shape) == np.prod(f_gt.shape)
def test_random_tr_throws_error_when_rank_first_last_elem_not_equal():
rank = [2, 3]
shape = [1, 2, 3]
with pytest.raises(ValueError) as e:
ivy.random_tr(shape, rank)
assert e.value.args
# **Uncomment when Tensorly validation issue is resolved.**
# https://github.com/tensorly/tensorly/issues/528
# @handle_test(
# fn_tree="functional.ivy.experimental.random_parafac2",
# data=_random_parafac2_data(),
# test_with_out=st.just(False),
# test_instance_method=st.just(False),
# )
# def test_random_parafac2(
# *,
# data,
# test_flags,
# backend_fw,
# fn_name,
# on_device,
# ):
# shapes, rank, dtype, full, seed, normalise_factors = data
# results = helpers.test_function(
# input_dtypes=[],
# backend_to_test=backend_fw,
# test_flags=test_flags,
# on_device=on_device,
# fn_name=fn_name,
# shapes=shapes,
# rank=rank,
# dtype=dtype,
# full=full,
# seed=seed,
# normalise_factors=normalise_factors,
# test_values=False,
# )
# ret_np, ret_from_gt_np = results
# if full:
# reconstructed_tensor = helpers.flatten_and_to_np(ret=ret_np,
# backend=backend_fw)
# reconstructed_tensor_gt = helpers.flatten_and_to_np(
# ret=ret_from_gt_np, backend=test_flags.ground_truth_backend
# )
# for x, x_gt in zip(reconstructed_tensor, reconstructed_tensor_gt):
# assert x_gt.shape == x.shape
# else:
# weights = helpers.flatten_and_to_np(ret=ret_np[0], backend=backend_fw)
# factors = helpers.flatten_and_to_np(ret=ret_np[1], backend=backend_fw)
# # projections = helpers.flatten_and_to_np(ret=ret_np[2], backend=backend_fw)
# weights_gt = helpers.flatten_and_to_np(
# ret=ret_from_gt_np[0], backend=test_flags.ground_truth_backend
# )
# factors_gt = helpers.flatten_and_to_np(
# ret=ret_from_gt_np[1], backend=test_flags.ground_truth_backend
# )
# # projections_gt = helpers.flatten_and_to_np(
# # ret=ret_from_gt_np[2], backend=test_flags.ground_truth_backend
# # )
# for w, w_gt in zip(weights, weights_gt):
# assert len(w) == rank
# assert len(w_gt) == rank
# for f, f_gt in zip(factors, factors_gt):
# assert np.prod(f.shape) == np.prod(f_gt.shape)
# # for p, p_gt in zip(projections,projections_gt):
# # assert np.prod(p.shape) == np.prod(p_gt.shape)
@handle_test(
fn_tree="functional.ivy.experimental.random_tt",
data=_random_tt_data(),
test_with_out=st.just(False),
test_instance_method=st.just(False),
)
def test_random_tt(
*,
data,
test_flags,
backend_fw,
fn_name,
on_device,
):
shape, rank, dtype, full, seed = data
results = helpers.test_function(
input_dtypes=[],
backend_to_test=backend_fw,
test_flags=test_flags,
on_device=on_device,
fn_name=fn_name,
shape=shape,
rank=rank,
dtype=dtype,
full=full,
seed=seed,
test_values=False,
)
ret_np, ret_from_gt_np = results
if full:
reconstructed_tensor = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
reconstructed_tensor_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np, backend=test_flags.ground_truth_backend
)
for x, x_gt in zip(reconstructed_tensor, reconstructed_tensor_gt):
assert np.prod(shape) == np.prod(x.shape)
assert np.prod(shape) == np.prod(x_gt.shape)
else:
factors = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
factors_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np, backend=test_flags.ground_truth_backend
)
for f, f_gt in zip(factors, factors_gt):
assert np.prod(f.shape) == np.prod(f_gt.shape)
@handle_test(
fn_tree="functional.ivy.experimental.random_tucker",
data=_random_tucker_data(),
test_with_out=st.just(False),
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_random_tucker(
*,
data,
test_flags,
backend_fw,
fn_name,
on_device,
):
shape, rank, dtype, full, orthogonal, seed, non_negative = data
results = helpers.test_function(
input_dtypes=[],
backend_to_test=backend_fw,
test_flags=test_flags,
on_device=on_device,
fn_name=fn_name,
shape=shape,
rank=rank,
dtype=dtype,
full=full,
orthogonal=orthogonal,
seed=seed,
non_negative=non_negative,
test_values=False,
)
ret_np, ret_from_gt_np = results
if full:
reconstructed_tensor = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
reconstructed_tensor_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np, backend=test_flags.ground_truth_backend
)
for x, x_gt in zip(reconstructed_tensor, reconstructed_tensor_gt):
assert np.prod(shape) == np.prod(x.shape)
assert np.prod(shape) == np.prod(x_gt.shape)
else:
core = helpers.flatten_and_to_np(ret=ret_np[0], backend=backend_fw)
factors = helpers.flatten_and_to_np(ret=ret_np[1], backend=backend_fw)
core_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np[0], backend=test_flags.ground_truth_backend
)
factors_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np[1], backend=test_flags.ground_truth_backend
)
for c, c_gt in zip(core, core_gt):
assert np.prod(c.shape) == np.prod(rank)
assert np.prod(c_gt.shape) == np.prod(rank)
for f, f_gt in zip(factors, factors_gt):
assert np.prod(f.shape) == np.prod(f_gt.shape)
@handle_test(
fn_tree="functional.ivy.experimental.tril_indices",
dtype_and_n=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
max_num_dims=0,
num_arrays=2,
min_value=0,
max_value=10,
),
k=helpers.ints(min_value=-11, max_value=11),
test_with_out=st.just(False),
test_gradients=st.just(False),
test_instance_method=st.just(False),
)
def test_tril_indices(*, dtype_and_n, k, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_n
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
on_device=on_device,
fn_name=fn_name,
n_rows=int(x[0]),
n_cols=int(x[1]),
k=k,
)
@handle_test(
fn_tree="functional.ivy.experimental.trilu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
k=helpers.ints(min_value=-10, max_value=10),
upper=st.booleans(),
)
def test_trilu(*, dtype_and_x, k, upper, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
upper=upper,
k=k,
)
@handle_test(
fn_tree="functional.ivy.experimental.unsorted_segment_mean",
d_x_n_s=valid_unsorted_segment_min_inputs(),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_unsorted_segment_mean(
*,
d_x_n_s,
test_flags,
backend_fw,
fn_name,
on_device,
):
dtypes, data, num_segments, segment_ids = d_x_n_s
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
)
# unsorted_segment_min
@handle_test(
fn_tree="functional.ivy.experimental.unsorted_segment_min",
d_x_n_s=valid_unsorted_segment_min_inputs(),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_unsorted_segment_min(
*,
d_x_n_s,
test_flags,
backend_fw,
fn_name,
on_device,
):
dtypes, data, num_segments, segment_ids = d_x_n_s
helpers.test_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
on_device=on_device,
fn_name=fn_name,
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
)
@handle_test(
fn_tree="functional.ivy.experimental.unsorted_segment_sum",
d_x_n_s=valid_unsorted_segment_min_inputs(),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_unsorted_segment_sum(
*,
d_x_n_s,
test_flags,
backend_fw,
fn_name,
on_device,
):
dtypes, data, num_segments, segment_ids = d_x_n_s
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
)
# vorbis_window
@handle_test(
fn_tree="functional.ivy.experimental.vorbis_window",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
max_num_dims=0,
min_value=1,
max_value=10,
),
dtype=helpers.get_dtypes("float", full=False),
test_gradients=st.just(False),
test_instance_method=st.just(False),
)
def test_vorbis_window(
*, dtype_and_x, dtype, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
atol_=1e-02,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
window_length=int(x[0]),
dtype=dtype[0],
)
| ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_creation.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_creation.py",
"repo_id": "ivy",
"token_count": 13105
} | 67 |
# global
from hypothesis import strategies as st
# local
import numpy as np
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
from ivy_tests.test_ivy.test_functional.test_core.test_statistical import (
_statistical_dtype_values,
_get_castable_dtype,
)
# --- Helpers --- #
# --------------- #
@st.composite
def _get_castable_float_dtype_nan(draw, min_value=None, max_value=None):
available_dtypes = helpers.get_dtypes("float")
shape = draw(helpers.get_shape(min_num_dims=1, max_num_dims=4, max_dim_size=6))
dtype3, where = draw(
helpers.dtype_and_values(available_dtypes=["bool"], shape=shape)
)
dtype, values = draw(
helpers.dtype_and_values(
available_dtypes=available_dtypes,
num_arrays=1,
large_abs_safety_factor=6,
small_abs_safety_factor=24,
safety_factor_scale="log",
shape=shape,
min_value=min_value,
max_value=max_value,
allow_nan=True,
)
)
axis = draw(helpers.get_axis(shape=shape, force_int=True))
dtype1, values, dtype2 = draw(
helpers.get_castable_dtype(draw(available_dtypes), dtype[0], values[0])
)
return dtype1, [values], axis, dtype2, dtype3, where
@st.composite
def _get_dtype_value1_value2_cov(
draw,
available_dtypes,
min_num_dims,
max_num_dims,
min_dim_size,
max_dim_size,
abs_smallest_val=None,
min_value=None,
max_value=None,
allow_inf=False,
exclude_min=False,
exclude_max=False,
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
):
shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
dtype = draw(st.sampled_from(draw(available_dtypes)))
values = []
for i in range(2):
values.append(
draw(
helpers.array_values(
dtype=dtype,
shape=shape,
abs_smallest_val=abs_smallest_val,
min_value=min_value,
max_value=max_value,
allow_inf=allow_inf,
exclude_min=exclude_min,
exclude_max=exclude_max,
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
)
)
)
value1, value2 = values[0], values[1]
# modifiers: rowVar, bias, ddof
rowVar = draw(st.booleans())
bias = draw(st.booleans())
ddof = draw(helpers.ints(min_value=0, max_value=1))
numVals = None
if rowVar is False:
numVals = -1 if numVals == 0 else 0
else:
numVals = 0 if len(shape) == 1 else -1
fweights = draw(
helpers.array_values(
dtype="int64",
shape=shape[numVals],
abs_smallest_val=1,
min_value=1,
max_value=10,
allow_inf=False,
)
)
aweights = draw(
helpers.array_values(
dtype="float64",
shape=shape[numVals],
abs_smallest_val=1,
min_value=1,
max_value=10,
allow_inf=False,
small_abs_safety_factor=1,
)
)
return [dtype], value1, value2, rowVar, bias, ddof, fweights, aweights
@st.composite
def _histogram_helper(draw):
dtype_input = draw(st.sampled_from(draw(helpers.get_dtypes("float"))))
bins = draw(
helpers.array_values(
dtype=dtype_input,
shape=(draw(helpers.ints(min_value=1, max_value=10)),),
abs_smallest_val=-10,
min_value=-10,
max_value=10,
)
)
bins = np.asarray(sorted(set(bins)), dtype=dtype_input)
if len(bins) == 1:
bins = int(abs(bins[0]))
if bins == 0:
bins = 1
if dtype_input in draw(helpers.get_dtypes("unsigned")):
range = (
draw(
helpers.floats(
min_value=0, max_value=10, exclude_min=False, exclude_max=False
)
),
draw(
helpers.floats(
min_value=11, max_value=20, exclude_min=False, exclude_max=False
)
),
)
else:
range = (
draw(helpers.floats(min_value=-10, max_value=0)),
draw(helpers.floats(min_value=1, max_value=10)),
)
range = draw(st.sampled_from([range, None]))
else:
range = None
shape = draw(
helpers.get_shape(
min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=5
)
)
a = draw(
helpers.array_values(
dtype=dtype_input,
shape=shape,
min_value=-20,
max_value=20,
)
)
weights = draw(
helpers.array_values(
dtype=dtype_input,
shape=shape,
min_value=-20,
max_value=20,
)
)
# weights = draw(st.sampled_from([weights, None]))
axes = draw(
helpers.get_axis(
shape=shape,
# TODO: negative axes
allow_neg=False,
min_size=1,
max_size=10,
)
)
dtype_out = draw(
st.sampled_from(
draw(
helpers.get_castable_dtype(
draw(helpers.get_dtypes("float")), str(dtype_input)
)
)
)
)
if range:
if np.min(a) < range[0]:
extend_lower_interval = True
else:
extend_lower_interval = draw(st.booleans())
if np.max(a) > range[1]:
extend_upper_interval = True
else:
extend_upper_interval = draw(st.booleans())
else:
if isinstance(bins, int):
extend_lower_interval = draw(st.booleans())
extend_upper_interval = draw(st.booleans())
else:
if np.min(a) < bins[0]:
extend_lower_interval = True
else:
extend_lower_interval = draw(st.booleans())
if np.max(a) > bins[-1]:
extend_upper_interval = True
else:
extend_upper_interval = draw(st.booleans())
density = draw(st.booleans())
return (
a,
bins,
axes,
extend_lower_interval,
extend_upper_interval,
dtype_out,
range,
weights,
density,
dtype_input,
)
@st.composite
def _quantile_helper(draw):
large_abs_safety_factor = 2
small_abs_safety_factor = 2
dtype, values, axis = draw(
helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale="log",
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
valid_axis=True,
allow_neg_axes=False,
min_axes_size=1,
force_int_axis=True,
)
)
q = draw(
st.one_of(
helpers.array_values(
dtype=helpers.get_dtypes("float"),
shape=helpers.get_shape(min_dim_size=1, max_num_dims=1, min_num_dims=1),
min_value=0.0,
max_value=1.0,
exclude_max=False,
exclude_min=False,
),
st.floats(min_value=0.0, max_value=1.0),
)
)
interpolation_names = [
"linear",
"lower",
"higher",
"midpoint",
"nearest",
"nearest_jax",
]
interpolation = draw(
helpers.list_of_size(
x=st.sampled_from(interpolation_names),
size=1,
)
)
return dtype, values, axis, interpolation, q
# bincount
@st.composite
def bincount_dtype_and_values(draw):
dtype_and_x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
max_num_dims=1,
min_dim_size=1,
max_dim_size=10,
min_value=0,
max_value=10,
allow_nan=False,
)
)
dtype_and_x[1][1] = dtype_and_x[1][0]
if draw(st.booleans()):
dtype_and_x[1][1] = None
min_length = draw(st.integers(min_value=0, max_value=10))
return dtype_and_x, min_length
# --- Main --- #
# ------------ #
@handle_test(
fn_tree="functional.ivy.experimental.bincount",
dtype_and_x=bincount_dtype_and_values(),
test_gradients=st.just(False),
)
def test_bincount(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
dtype_and_x, min_length = dtype_and_x
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
weights=x[1],
minlength=min_length,
)
# corrcoef
@handle_test(
fn_tree="functional.ivy.experimental.corrcoef",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
num_arrays=2,
shared_dtype=True,
abs_smallest_val=1e-5,
min_num_dims=2,
max_num_dims=2,
min_dim_size=3,
max_dim_size=3,
min_value=-100,
max_value=100,
allow_nan=False,
),
rowvar=st.booleans(),
test_gradients=st.just(False),
)
def test_corrcoef(*, dtype_and_x, rowvar, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
y=x[1],
rowvar=rowvar,
)
# cov
@handle_test(
fn_tree="functional.ivy.experimental.cov",
dtype_x1_x2_cov=_get_dtype_value1_value2_cov(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=2,
min_dim_size=2,
max_dim_size=5,
min_value=1,
max_value=1e10,
abs_smallest_val=0.01,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
test_gradients=st.just(False),
test_with_out=st.just(False),
)
def test_cov(
*,
dtype_x1_x2_cov,
test_flags,
backend_fw,
fn_name,
on_device,
):
dtype, x1, x2, rowVar, bias, ddof, fweights, aweights = dtype_x1_x2_cov
helpers.test_function(
input_dtypes=[dtype[0], dtype[0], "int64", "float64"],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x1,
x2=x2,
rowVar=rowVar,
bias=bias,
ddof=ddof,
fweights=fweights,
aweights=aweights,
return_flat_np_arrays=True,
rtol_=1e-2,
atol_=1e-2,
)
@handle_test(
fn_tree="functional.ivy.experimental.cummax",
dtype_x_axis_castable=_get_castable_dtype(),
exclusive=st.booleans(),
reverse=st.booleans(),
)
def test_cummax(
*,
dtype_x_axis_castable,
exclusive,
reverse,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, x, axis, castable_dtype = dtype_x_axis_castable
helpers.test_function(
input_dtypes=[input_dtype],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
axis=axis,
exclusive=exclusive,
reverse=reverse,
dtype=castable_dtype,
rtol_=1e-1,
atol_=1e-1,
)
# cummin
@handle_test(
fn_tree="functional.ivy.experimental.cummin",
dtype_x_axis_castable=_get_castable_dtype(),
exclusive=st.booleans(),
reverse=st.booleans(),
)
def test_cummin(
*,
dtype_x_axis_castable,
exclusive,
reverse,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, x, axis, castable_dtype = dtype_x_axis_castable
helpers.test_function(
input_dtypes=[input_dtype],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
axis=axis,
exclusive=exclusive,
reverse=reverse,
dtype=castable_dtype,
rtol_=1e-1,
atol_=1e-1,
)
# TODO: - Error message from Tensorflow: 'Number of dimensions of `x` and `weights`
# must coincide. Found: x has <nd1>, weights has <nd2>'
# - Error description: typo that throws unintended exceptions when using both
# weights and multiple axis.
# - fixed in TFP 0.20 release.
# - Test helper needs to be modified to handle this case in older versions.
@handle_test(
fn_tree="functional.ivy.experimental.histogram",
values=_histogram_helper(),
test_gradients=st.just(False),
)
def test_histogram(
*,
values,
test_flags,
backend_fw,
fn_name,
on_device,
):
(
a,
bins,
axis,
extend_lower_interval,
extend_upper_interval,
dtype,
range,
weights,
density,
dtype_input,
) = values
helpers.test_function(
a=a,
bins=bins,
axis=axis,
extend_lower_interval=extend_lower_interval,
extend_upper_interval=extend_upper_interval,
dtype=dtype,
range=range,
weights=weights,
density=density,
input_dtypes=[dtype_input],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
)
# igamma
@handle_test(
fn_tree="functional.ivy.experimental.igamma",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
min_value=2,
max_value=100,
),
test_gradients=st.just(False),
test_with_out=st.just(False),
)
def test_igamma(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
rtol_=1e-04,
a=x[0],
x=x[1],
)
@handle_test(
fn_tree="functional.ivy.experimental.median",
dtype_x_axis=_statistical_dtype_values(function="median"),
keep_dims=st.booleans(),
test_gradients=st.just(False),
test_with_out=st.just(False),
)
def test_median(*, dtype_x_axis, keep_dims, test_flags, backend_fw, fn_name, on_device):
input_dtype, x, axis = dtype_x_axis
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
input=x[0],
axis=axis,
keepdims=keep_dims,
)
# nanmean
@handle_test(
fn_tree="functional.ivy.experimental.nanmean",
dtype_x_axis=_statistical_dtype_values(function="nanmean"),
keep_dims=st.booleans(),
dtype=helpers.get_dtypes("valid", full=False),
test_gradients=st.just(False),
)
def test_nanmean(
*, dtype_x_axis, keep_dims, dtype, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x, axis, *_ = dtype_x_axis
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
atol_=1e-02,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keep_dims,
dtype=dtype[0],
)
# nanmedian
@handle_test(
fn_tree="functional.ivy.experimental.nanmedian",
dtype_x_axis=_statistical_dtype_values(function="nanmedian"),
keep_dims=st.booleans(),
dtype=helpers.get_dtypes("valid", full=False),
overwriteinput=st.booleans(),
test_gradients=st.just(False),
)
def test_nanmedian(
*,
dtype_x_axis,
keep_dims,
overwriteinput,
dtype,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
atol_=1e-02,
fn_name=fn_name,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keep_dims,
overwrite_input=overwriteinput,
)
@handle_test(
fn_tree="functional.ivy.experimental.nanmin",
dtype_x_axis_castable=_get_castable_float_dtype_nan(),
test_gradients=st.just(False),
initial=st.integers(min_value=-5, max_value=5),
keep_dims=st.booleans(),
)
def test_nanmin(
*,
dtype_x_axis_castable,
initial,
keep_dims,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, x, axis, castable_dtype, dtype3, where = dtype_x_axis_castable
x = x[0]
helpers.test_function(
input_dtypes=[input_dtype, dtype3[0]],
test_flags=test_flags,
rtol_=1e-1,
atol_=1e-1,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
a=x,
axis=axis,
keepdims=keep_dims,
initial=initial,
where=where[0],
)
@handle_test(
fn_tree="functional.ivy.experimental.nanprod",
dtype_x_axis_castable=_get_castable_float_dtype_nan(),
keep_dims=st.booleans(),
test_gradients=st.just(False),
initial=st.integers(min_value=-5, max_value=5),
)
def test_nanprod(
*,
dtype_x_axis_castable,
keep_dims,
test_flags,
initial,
backend_fw,
fn_name,
on_device,
):
input_dtype, x, axis, castable_dtype = dtype_x_axis_castable
x = x[0]
helpers.test_function(
input_dtypes=[input_dtype],
test_flags=test_flags,
rtol_=1e-1,
atol_=1e-1,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
a=x,
axis=axis,
keepdims=keep_dims,
dtype=castable_dtype,
initial=initial,
)
# quantile
@handle_test(
fn_tree="functional.ivy.experimental.quantile",
dtype_and_x=_quantile_helper(),
keep_dims=st.booleans(),
test_gradients=st.just(False),
test_with_out=st.just(False),
)
def test_quantile(
*, dtype_and_x, keep_dims, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x, axis, interpolation, q = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
a=x[0],
q=q,
axis=axis,
interpolation=interpolation[0],
keepdims=keep_dims,
atol_=1e-3,
rtol_=1e-3,
)
| ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_statistical.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_statistical.py",
"repo_id": "ivy",
"token_count": 10371
} | 68 |
# global
from packaging import version
import pytest
import importlib
import types
import numpy as np
# local
import ivy
from ivy.utils.backend.handler import _backend_dict
# TODO fix due to refactor
from ivy_tests.test_ivy.helpers.available_frameworks import _available_frameworks
try:
import tensorflow as tf
except ImportError:
tf = types.SimpleNamespace()
tf.constant = lambda x: x
try:
import torch
except ImportError:
torch = types.SimpleNamespace()
torch.tensor = lambda x: x
try:
import jax.numpy as jnp
import jax
except ImportError:
jnp = types.SimpleNamespace()
jnp.array = lambda x: x
jax = types.SimpleNamespace()
try:
import paddle
except ImportError:
paddle = types.SimpleNamespace()
paddle.Tensor = lambda x: x
available_array_types_class = [
("numpy", "<class 'numpy.ndarray'>"),
]
available_array_types_input = [
("numpy", np.array(3.0)),
]
available_frameworks_with_none = _available_frameworks()[:]
# Dynamic Backend
backends = list(_backend_dict.keys())
@pytest.mark.parametrize("excluded", available_frameworks_with_none)
def test_choose_random_backend(excluded):
backend = ivy.choose_random_backend(excluded=excluded)
if excluded is None:
assert backend in list(_backend_dict.keys())
else:
backends_list = list(_backend_dict.keys())
backends_list.remove(excluded)
assert backend in backends_list
@pytest.mark.parametrize(
("backend", "array_type"),
available_array_types_input,
)
def test_current_backend(backend, array_type):
# test backend inference from arguments when stack clear
ivy.unset_backend()
assert ivy.current_backend(array_type) is importlib.import_module(
_backend_dict[backend]
)
# global_backend > argument's backend.
if "torch" in _available_frameworks():
ivy.set_backend("torch")
ivy.utils.assertions.check_equal(
ivy.current_backend(array_type),
importlib.import_module(_backend_dict["torch"]),
as_array=False,
)
else:
ivy.set_backend("numpy")
ivy.utils.assertions.check_equal(
ivy.current_backend(array_type),
importlib.import_module(_backend_dict["numpy"]),
as_array=False,
)
@pytest.mark.parametrize(
("middle_backend", "end_backend"),
[(a, b) for a in backends for b in backends if (a != b and "mxnet" not in [a, b])],
)
def test_dynamic_backend_all_combos(middle_backend, end_backend):
# create an ivy array, container and native container
a = ivy.array([1, 2, 3])
b = ivy.array([4, 5, 6])
ivy_cont = ivy.Container({"w": a, "b": b})
# clear the backend stack after initialization of inputs
ivy.unset_backend()
# set dynamic_backend to false for all objects
ivy_cont.dynamic_backend = False
a.dynamic_backend = False
b.dynamic_backend = False
# set the middle backend
ivy.set_backend(middle_backend, dynamic=True)
var_cont = ivy.Container(
{
"w": ivy.gradients._variable(ivy.array([10, 20, 30])),
"b": ivy.gradients._variable(ivy.array([40, 50, 60])),
}
)
# set dynamic_backend to true for all objects
ivy_cont.dynamic_backend = True
a.dynamic_backend = True
b.dynamic_backend = True
# set the final backend
ivy.set_backend(end_backend, dynamic=True)
# add the necessary asserts to check if the data
# of the objects are in the correct format
assert isinstance(a.data, ivy.NativeArray)
assert isinstance(ivy_cont["b"].data, ivy.NativeArray)
if {"numpy", "jax"}.intersection([middle_backend, end_backend]):
# these frameworks don't support native variables
assert isinstance(var_cont["b"].data, ivy.NativeArray)
else:
assert ivy.gradients._is_variable(var_cont["b"])
def test_dynamic_backend_context_manager():
with ivy.dynamic_backend_as(True):
a = ivy.array([0.0, 1.0])
b = ivy.array([2.0, 3.0])
with ivy.dynamic_backend_as(False):
c = ivy.array([4.0, 5.0])
d = ivy.array([6.0, 7.0])
assert a.dynamic_backend is True
assert b.dynamic_backend is True
assert c.dynamic_backend is False
assert d.dynamic_backend is False
def test_dynamic_backend_setter():
a = ivy.array([1, 2, 3])
type_a = type(a.data)
a.dynamic_backend = False
# clear the backend stack after initialization of inputs
ivy.unset_backend()
ivy.set_backend("tensorflow", dynamic=True)
assert type(a.data) == type_a # noqa: E721
a.dynamic_backend = True
assert isinstance(a.data, tf.Tensor)
ivy.set_backend("torch", dynamic=True)
assert isinstance(a.data, torch.Tensor)
@pytest.mark.parametrize("backend", _available_frameworks())
def test_previous_backend(backend):
if not ivy.backend_stack:
assert ivy.previous_backend() is None
ivy.set_backend(backend)
stack_before_unset = []
func_address_before_unset = id(ivy.sum)
stack_before_unset.extend(ivy.backend_stack)
previous_backend = ivy.previous_backend()
stack_after_unset = ivy.backend_stack
# check that the function id has changed as inverse=True.
ivy.utils.assertions.check_equal(
func_address_before_unset, id(ivy.sum), inverse=True, as_array=False
)
ivy.utils.assertions.check_equal(
previous_backend,
importlib.import_module(_backend_dict[backend]),
as_array=False,
)
ivy.utils.assertions.check_greater(
len(stack_before_unset), len(stack_after_unset), as_array=False
)
# checking a previously set backend is still set
ivy.set_backend(backend)
ivy.set_backend("numpy")
ivy.previous_backend()
ivy.utils.assertions.check_equal(ivy.current_backend_str(), backend, as_array=False)
@pytest.mark.parametrize(
(
"backend",
"array_type",
),
available_array_types_class,
)
def test_set_backend(backend, array_type):
# recording data before backend change
stack_before = []
func_address_before = id(ivy.sum)
stack_before.extend(ivy.backend_stack)
ivy.set_backend(backend)
stack_after = ivy.backend_stack
# check that the function id has changed as inverse=True.
ivy.utils.assertions.check_equal(
func_address_before, id(ivy.sum), inverse=True, as_array=False
)
# using ivy assertions to ensure the desired backend is set
ivy.utils.assertions.check_less(len(stack_before), len(stack_after), as_array=False)
ivy.utils.assertions.check_equal(ivy.current_backend_str(), backend, as_array=False)
backend = importlib.import_module(_backend_dict[backend])
ivy.utils.assertions.check_equal(stack_after[-1], backend, as_array=False)
x = ivy.array([1, 2, 3])
ivy.utils.assertions.check_equal(
str(type(ivy.to_native(x))), array_type, as_array=False
)
@pytest.mark.parametrize("backend", ["torch", "numpy"])
def test_set_backend_no_warning_when_inplace_update_supported(backend):
with pytest.warns(None):
ivy.set_backend(backend)
def test_set_backend_throw_warning_only_once_when_inplace_update_not_supported(
backend_fw,
):
def _assert_number_of_inplace_warnings_is(n):
inplace_update_warning_counter = 0
for item in record:
if "inplace update" in str(item.message):
inplace_update_warning_counter += 1
assert inplace_update_warning_counter == n
if backend_fw in ["tensorflow", "paddle", "jax"]:
with pytest.warns(UserWarning) as record:
ivy.set_backend(backend_fw)
ivy.set_backend(backend_fw)
_assert_number_of_inplace_warnings_is(1)
def test_unset_backend():
for backend_str in _available_frameworks():
ivy.set_backend(backend_str)
ivy.unset_backend()
ivy.utils.assertions.check_equal(ivy.backend_stack, [], as_array=False)
def test_variables():
# clear the backend stack
ivy.unset_backend()
ivy.set_backend("tensorflow", dynamic=True)
a = tf.Variable(0)
b = tf.Variable(1)
dyn_cont = ivy.Container({"w": a, "b": b})
stat_cont = ivy.Container({"w": a, "b": b})
stat_cont.dynamic_backend = False
ivy.set_backend("torch", dynamic=True)
assert ivy.current_backend().gradients.is_variable(dyn_cont["w"].data)
ivy.set_backend("paddle", dynamic=True)
assert ivy.current_backend().gradients.is_variable(dyn_cont["w"].data)
assert isinstance(stat_cont["w"], tf.Variable)
available_frameworks_with_none.append(None)
if "tensorflow" in _available_frameworks():
available_array_types_input.append(("tensorflow", tf.constant([3.0])))
available_array_types_class.append(
("tensorflow", "<class 'tensorflow.python.framework.ops.EagerTensor'>")
)
if "jax" in _available_frameworks():
available_array_types_input.append(("jax", jnp.array(3.0)))
if version.parse(jax.__version__) >= version.parse("0.4.1"):
available_array_types_class.append(
("jax", "<class 'jaxlib.xla_extension.ArrayImpl'>")
)
else:
available_array_types_class.append(
("jax", "<class 'jaxlib.xla_extension.DeviceArray'>")
)
if "torch" in _available_frameworks():
available_array_types_input.append(("torch", torch.tensor([3.0])))
available_array_types_class.append(("torch", "<class 'torch.Tensor'>"))
if "paddle" in _available_frameworks():
available_array_types_input.append(("paddle", paddle.to_tensor([3.0])))
available_array_types_class.append(("paddle", "<class 'paddle.Tensor'>"))
| ivy/ivy_tests/test_ivy/test_misc/test_backend_utils/test_backend_handler.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_backend_utils/test_backend_handler.py",
"repo_id": "ivy",
"token_count": 4010
} | 69 |
from hypothesis import assume, strategies as st
import numpy as np
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_method
CLASS_TREE = "ivy.Shape"
DUMMY_DTYPE = ["int32"]
@handle_method(
init_tree=CLASS_TREE,
method_tree="Shape.__add__",
shape_1=helpers.get_shape(),
shape_2=helpers.get_shape(),
)
def test_shape__add__(
shape_1,
shape_2,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape_tup": shape_1},
init_input_dtypes=DUMMY_DTYPE,
method_input_dtypes=DUMMY_DTYPE,
method_all_as_kwargs_np={"other": shape_2},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__bool__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
max_num_dims=0,
min_value=0,
max_value=1,
),
)
def test_shape__bool__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__eq__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
)
def test_shape__eq__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Shape.__ge__",
shape_1=helpers.get_shape(),
shape_2=helpers.get_shape(),
)
def test_shape__ge__(
shape_1,
shape_2,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape_tup": shape_1},
init_input_dtypes=DUMMY_DTYPE,
method_input_dtypes=DUMMY_DTYPE,
method_all_as_kwargs_np={"other": shape_2},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__getitem__",
dtypes_x_query=helpers.dtype_array_query(
available_dtypes=helpers.get_dtypes("valid"),
allow_neg_step=False,
),
)
def test_shape__getitem__(
dtypes_x_query,
init_flags,
method_flags,
method_name,
class_name,
backend_fw,
ground_truth_backend,
on_device,
):
dtypes, x, query = dtypes_x_query
helpers.test_method(
on_device=on_device,
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape": x[0]},
init_input_dtypes=[dtypes[0]],
method_input_dtypes=[dtypes[1]],
method_all_as_kwargs_np={"key": query},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__gt__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
)
def test_shape__gt__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__int__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
max_num_dims=0,
min_value=-1e15,
max_value=1e15,
),
method_container_flags=st.just([False]),
)
def test_shape__int__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__iter__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_dim_size=2,
min_num_dims=1,
),
)
def test_shape__iter__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__le__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
)
def test_shape__le__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__len__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_dim_size=2,
min_num_dims=1,
),
)
def test_shape__len__(
dtype_and_x,
method_name,
class_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__lt__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
)
def test_shape__lt__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__mod__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_shape__mod__(
dtype_and_x,
method_name,
class_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Shape.__mul__",
shape=helpers.get_shape(),
other=st.integers(min_value=1, max_value=10),
)
def test_shape__mul__(
shape,
other,
method_name,
class_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape_tup": shape},
init_input_dtypes=DUMMY_DTYPE,
method_input_dtypes=DUMMY_DTYPE,
method_all_as_kwargs_np={"other": other},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Shape.__radd__",
shape_1=helpers.get_shape(),
shape_2=helpers.get_shape(),
)
def test_shape__radd__(
shape_1,
shape_2,
method_name,
class_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape_tup": shape_1},
init_input_dtypes=DUMMY_DTYPE,
method_input_dtypes=DUMMY_DTYPE,
method_all_as_kwargs_np={"other": shape_2},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__rdiv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_shape__rdiv__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__rmod__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_shape__rmod__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Shape.__rmul__",
shape=helpers.get_shape(),
other=st.integers(min_value=1, max_value=10),
)
def test_shape__rmul__(
shape,
other,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape_tup": shape},
init_input_dtypes=DUMMY_DTYPE,
method_input_dtypes=DUMMY_DTYPE,
method_all_as_kwargs_np={"other": other},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__rsub__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_shape__rsub__(
dtype_and_x,
method_name,
class_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
method_tree="Shape.__sub__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_shape__sub__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
def test_shape_in_conditions():
shape = ivy.Shape((1, 2))
condition_is_true = True if shape else False
assert condition_is_true
shape = ivy.Shape(())
condition_is_true = True if shape else False
assert not condition_is_true
| ivy/ivy_tests/test_ivy/test_misc/test_shape.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_shape.py",
"repo_id": "ivy",
"token_count": 8436
} | 70 |
# Run Tests
import os
import subprocess
import sys
from pymongo import MongoClient
result_config = {
"success": "https://img.shields.io/badge/-success-success",
"failure": "https://img.shields.io/badge/-failure-red",
}
def make_clickable(url, name):
return (
f'<a href="{url}" rel="noopener noreferrer" '
+ f'target="_blank"><img src={name}></a>'
)
def get_submodule(test_path):
test_path = test_path.split("/")
submod_test = test_path[-1]
submod, test_fn = submod_test.split("::")
submod = submod.replace("test_", "").replace(".py", "")
return ["array_api", 0], submod, test_fn
def update_individual_test_results(
collection,
id,
submod,
backend,
test,
result,
backend_version=None,
frontend_version=None,
):
key = f"{submod}.{backend}"
if backend_version is not None:
backend_version = backend_version.replace(".", "_")
key += f".{backend_version}"
if frontend_version is not None:
frontend_version = frontend_version.replace(".", "_")
key += f".{frontend_version}"
key += f".{test}"
collection.update_one(
{"_id": id},
{"$set": {key: result}},
upsert=True,
)
BACKENDS = ["numpy", "jax", "tensorflow", "torch"]
def main():
redis_url = sys.argv[1]
redis_pass = sys.argv[2]
mongo_key = sys.argv[3]
workflow_id = sys.argv[4]
if len(sys.argv) > 5:
run_id = sys.argv[5]
else:
run_id = f"https://github.com/unifyai/ivy/actions/runs/{workflow_id}"
failed = False
cluster = MongoClient(
f"mongodb+srv://deep-ivy:{mongo_key}@cluster0.qdvf8q3.mongodb.net/?retryWrites=true&w=majority" # noqa
)
db = cluster["Ivy_tests_multi"]
k_flag = {}
subprocess.run(
["python3", "ivy_tests/array_api_testing/write_array_api_tests_k_flag.py"],
check=True,
)
for backend in BACKENDS:
k_flag_file = f"ivy_tests/array_api_testing/.array_api_tests_k_flag_{backend}"
with open(k_flag_file, "r") as f:
array_api_tests_k_flag = f.read().strip()
if backend == "torch":
array_api_tests_k_flag += " and not (uint16 or uint32 or uint64)"
k_flag[backend] = array_api_tests_k_flag
with open("tests_to_run", "r") as f:
for line in f:
test, backend = line.split(",")
backend = backend.strip("\n")
coll, submod, test_fn = get_submodule(test)
command = f'docker run --rm --env IVY_BACKEND={backend} --env ARRAY_API_TESTS_MODULE="ivy" --env REDIS_URL={redis_url} --env REDIS_PASSWD={redis_pass} -v "$(pwd)":/ivy -v "$(pwd)"/.hypothesis:/.hypothesis unifyai/ivy:latest timeout 30m python3 -m pytest {test} -k "{k_flag[backend]}" --tb=short -vv' # noqa
print(f"\n{'*' * 100}")
print(f"{line[:-1]}")
print(f"{'*' * 100}\n")
sys.stdout.flush()
ret = os.system(command)
if ret != 0:
res = make_clickable(run_id, result_config["failure"])
failed = True
else:
res = make_clickable(run_id, result_config["success"])
update_individual_test_results(
db[coll[0]],
coll[1],
submod,
backend,
test_fn,
res,
"latest-stable",
)
if failed:
sys.exit(1)
if __name__ == "__main__":
main()
| ivy/scripts/run_tests/array_api_run_tests.py/0 | {
"file_path": "ivy/scripts/run_tests/array_api_run_tests.py",
"repo_id": "ivy",
"token_count": 1731
} | 71 |
from get_all_tests import BACKENDS
from packaging import version
from pymongo import MongoClient
import requests
import sys
def get_latest_package_version(package_name):
try:
url = f"https://pypi.org/pypi/{package_name}/json"
response = requests.get(url, timeout=10)
response.raise_for_status()
package_info = response.json()
versions = list(package_info["releases"].keys())
key = lambda x: version.parse(x)
return sorted(versions, key=key, reverse=True)
except requests.exceptions.RequestException:
print(f"Error: Failed to fetch package information for {package_name}.")
return None
def main():
# connect to the database
priority = sys.argv[1] == "true"
run_iter = int(sys.argv[2]) - 1
cluster = MongoClient(
"mongodb+srv://readonly-user:hvpwV5yVeZdgyTTm@cluster0.qdvf8q3.mongodb.net"
)
ci_dashboard_db = cluster["ci_dashboard"]
ivy_tests_collection = ci_dashboard_db["ivy_tests"]
frontend_tests_collection = ci_dashboard_db["frontend_tests"]
# iterate over demos and collect ivy and frontend functions used
ivy_test_docs = ivy_tests_collection.find()
frontend_test_docs = frontend_tests_collection.find()
ivy_functions = [
ivy_test_doc["_id"]
for ivy_test_doc in ivy_test_docs
if not priority or ivy_test_doc.get("demos", None)
]
frontend_functions = [
frontend_test_doc["_id"]
for frontend_test_doc in frontend_test_docs
if not priority or frontend_test_doc.get("demos", None)
]
ivy_functions = sorted(list(set(ivy_functions)))
frontend_functions = sorted(list(set(frontend_functions)))
versions = {
backend: [
version_name.replace(".", "_")
for version_name in get_latest_package_version(backend)
]
for backend in BACKENDS
}
# find corresponding test paths for those functions
ivy_test_paths = []
frontend_test_paths = []
for function in ivy_functions:
print("function", function)
result = ivy_tests_collection.find_one({"_id": function})
if result:
for backend in BACKENDS:
if backend in result:
for version_name in versions[backend]:
if version_name in result[backend]:
if "status" in result[backend][version_name]:
status = result[backend][version_name]["status"].get(
"cpu"
)
if not status and status is not None:
ivy_test_paths.append(
f"{result['test_path']},{backend}"
)
break
for function in frontend_functions:
print("frontend function", function)
frontend = function.split(".")[0]
result = frontend_tests_collection.find_one({"_id": function})
if result and frontend in versions:
for frontend_version in versions[frontend]:
if frontend_version in result:
backend_result = result[frontend_version]
for backend in BACKENDS:
if backend in backend_result:
for version_name in versions[backend]:
if version_name in backend_result[backend]:
if (
"status"
in backend_result[backend][version_name]
):
status = backend_result[backend][version_name][
"status"
].get("cpu")
if not status and status is not None:
frontend_test_paths.append(
f"{result['test_path']},{backend}"
)
break
all_tests = ivy_test_paths + frontend_test_paths
all_tests = [test_path.strip() for test_path in all_tests]
tests_per_run = 50
num_tests = len(all_tests)
start = run_iter * tests_per_run
end = (run_iter + 1) * tests_per_run
end = min(end, num_tests)
if start < end:
tests = all_tests[start:end]
else:
tests = []
# add those paths to the tests_to_run
with open("tests_to_run", "w") as write_file:
write_file.write("\n".join(tests))
if __name__ == "__main__":
main()
| ivy/scripts/setup_tests/setup_failing_tests.py/0 | {
"file_path": "ivy/scripts/setup_tests/setup_failing_tests.py",
"repo_id": "ivy",
"token_count": 2485
} | 72 |
{
"name": "Ivy GPU Development Environment (image)",
"image": "unifyai/ivy:latest-gpu",
"customizations": {
"vscode": {
"extensions": [
"ms-python.vscode-pylance"
],
"settings": {
"python.defaultInterpreterPath": "/opt/miniconda/envs/multienv/bin/python3"
}
}
},
"runArgs": ["--gpus","all"],
"postCreateCommand": {
"post_create": "bash .devcontainer/post_create_commands.sh",
"bashrc": "echo \"alias python=python3\" >> ~/.bashrc"
},
"initializeCommand": "docker pull unifyai/ivy:latest",
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
// "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
// "remoteUser": "vscode",
"features": {
"ghcr.io/devcontainers/features/common-utils:2": {
"installZsh": true,
"configureZshAsDefaultShell": true,
"installOhMyZsh": true,
"upgradePackages": false
},
"ghcr.io/devcontainers/features/docker-outside-of-docker:1": {
"moby": true,
"installDockerBuildx": true,
"version": "20.10",
"dockerDashComposeVersion": "v2"
},
"ghcr.io/devcontainers/features/github-cli:1": {
"installDirectlyFromGitHubRelease": true,
"version": "latest"
}
}
}
| ivy/.devcontainer/image_gpu/devcontainer.json/0 | {
"file_path": "ivy/.devcontainer/image_gpu/devcontainer.json",
"repo_id": "ivy",
"token_count": 674
} | 0 |
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: check-yaml
- id: trailing-whitespace
- id: check-toml
- id: end-of-file-fixer
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.2.2
hooks:
# Run the linter.
- id: ruff
args: [ --fix ]
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 24.2.0
hooks:
- id: black
language_version: python3
args:
- "--preview"
exclude: >
(?x)
(
ivy/functional/frontends/(?!.*(?:config\.py|__init__\.py)$).* |
ivy_tests/test_ivy/(?!.*(?:__init__\.py|conftest\.py|helpers/.*|test_frontends/config/.*$)).*
)
- repo: https://github.com/PyCQA/autoflake
rev: v2.3.0
hooks:
- id: autoflake
- repo: https://github.com/PyCQA/docformatter
rev: v1.7.5
hooks:
- id: docformatter
- repo: https://github.com/unifyai/lint-hook
rev: a72ffb17562d919311653d7f593cb537d1245c19
hooks:
- id: ivy-lint
| ivy/.pre-commit-config.yaml/0 | {
"file_path": "ivy/.pre-commit-config.yaml",
"repo_id": "ivy",
"token_count": 569
} | 1 |
#!/bin/bash
docker build -t unifyai/ivy:latest --no-cache -f DockerfileApplied ..
| ivy/docker/build_applied_dockerfile.sh/0 | {
"file_path": "ivy/docker/build_applied_dockerfile.sh",
"repo_id": "ivy",
"token_count": 31
} | 2 |
{% extends "top_level_toc.rst" %}
{% block name %}{{"Functions" | escape | underline}}{% endblock %}
{% block template %}top_functional_module.rst{% endblock %}
| ivy/docs/_templates/top_functional_toc.rst/0 | {
"file_path": "ivy/docs/_templates/top_functional_toc.rst",
"repo_id": "ivy",
"token_count": 61
} | 3 |
Backend Setting
===============
.. _`this function`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/backend_handler.py#L154
.. _`implicit_backend`: https://github.com/unifyai/ivy/blob/3358b5bbadbe4cbc0509cad4ea8f05f178dfd8b8/ivy/utils/backend/handler.py
.. _`import the backend module`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/backend_handler.py#L184
.. _`writing the function`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/backend_handler.py#L212
.. _`wrap the functions`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/backend_handler.py#L204
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`backend setting thread`: https://discord.com/channels/799879767196958751/1189905734645850254
The backend framework can either be set by calling :code:`ivy.set_backend(backend_name)` or it can inferred from the arguments.
For the latter, a global variable `implicit_backend`_ is located in the file which is initialized as numpy, and is always used to infer the backend in cases where: (a) no backend has been set using the :code:`set_backend` function and (b) the backend cannot be inferred from the inputs.
If the framework can be inferred from the inputs, then this is always used, and the `implicit_backend`_ is overwritten with the framework inferred.
numpy will always be the default backend unless it is explicitly set or is inferred.
When calling `this function`_ for setting the backend, the following steps are performed:
#. store a global copy of the original :attr:`ivy.__dict__` to :code:`ivy_original_dict`, if this is not already stored.
#. `import the backend module`_, for example :mod:`ivy.functional.backends.torch`, if the backend has been passed in as a string.
All functions in this unmodified backend module are *primary* functions, because only primary functions are stored in :mod:`ivy.functional.backends.backend_name`.
This backend module does not include any *compositional* functions.
#. loop through the original :code:`ivy_original_dict` (which has all functions, including compositional), and (a) add the primary function from the backend if it exists, (b) else add the compositional function from :code:`ivy_original_dict`.
#. `wrap the functions`_ where necessary, extending them with shared repeated functionality and `writing the function`_ to :attr:`ivy.__dict__`.
Wrapping is used in order to avoid excessive code duplication in every backend function implementation.
This is explained in more detail in the next section: `Function Wrapping <function_wrapping.rst>`_.
It's helpful to look at an example:
.. code-block:: python
x = ivy.array([[2., 3.]])
ivy.current_backend()
<module 'ivy.functional.backends.numpy' from '/opt/project/ivy/functional/backends/numpy/__init__.py'>
.. code-block:: python
y = ivy.multiply(torch.Tensor([3.]), torch.Tensor([4.]))
ivy.current_backend()
<module 'ivy.functional.backends.torch' from '/opt/project/ivy/functional/backends/torch/__init__.py'>
.. code-block:: python
ivy.set_backend('jax')
z = ivy.matmul(jax.numpy.array([[2.,3.]]), jax.numpy.array([[5.],[6.]]))
ivy.current_backend()
<module 'ivy.functional.backends.jax' from '/opt/project/ivy/functional/backends/jax/__init__.py'>
ivy.previous_backend()
ivy.current_backend()
<module 'ivy.functional.backends.torch' from '/opt/project/ivy/functional/backends/torch/__init__.py'>
In the last example above, the moment any backend is set, it will be used over the `implicit_backend`_.
However when the current backend is set to the previous using the :func:`ivy.previous_backend`, the `implicit_backend`_ will be used as a fallback, which will assume the backend from the last run.
While the `implicit_backend`_ functionality gives more freedom to the user, the recommended way of doing things would be to set the backend explicitly.
In addition, all the previously set backends can be cleared by calling :func:`ivy.unset_backend`.
Dynamic Backend Setting
-----------------------
.. _`ivy.set_dynamic_backend`: https://github.com/unifyai/ivy/blob/e2b0b1d7fcd454f12bfae94b03213457460276c8/ivy/__init__.py#L1150.
.. _`ivy.unset_dynamic_backend`: https://github.com/unifyai/ivy/blob/e2b0b1d7fcd454f12bfae94b03213457460276c8/ivy/__init__.py#L1187.
.. _`ivy.dynamic_backend_as`: https://github.com/unifyai/ivy/blob/e2b0b1d7fcd454f12bfae94b03213457460276c8/ivy/__init__.py#L1190.
.. _`ivy.Array`: https://github.com/unifyai/ivy/blob/e2b0b1d7fcd454f12bfae94b03213457460276c8/ivy/data_classes/array/array.py#L190.
.. _`ivy.Container`: https://github.com/unifyai/ivy/blob/e2b0b1d7fcd454f12bfae94b03213457460276c8/ivy/data_classes/container/base.py#L4285.
.. _`dynamic_backend_converter`: https://github.com/unifyai/ivy/blob/e2b0b1d7fcd454f12bfae94b03213457460276c8/ivy/utils/backend/handler.py#L252.
Working with different backends in Ivy can be challenging, especially when you need to switch between backends frequently.
To make this easier, users can make use of the dynamic backend attribute of :class:`ivy.Array` and :class:`ivy.Container` classes which allow you to automatically convert ivy arrays to the new backend whenever the backend is changed.
Essentially, when the user calls :code:`ivy.set_backend(<backend>, dynamic=True)`, the following steps are performed:
#. First, all live objects in the current project scope are found and then filtered to only include :class:`ivy.Array`/:class:`ivy.Container` objects.
#. Then, these objects are iterated through and converted to the target backend using DLPack or numpy as an intermediary.
By default, the dynamic backend attribute is set to True when you create an ivy array (e.g., :code:`x = ivy.array([1,2,3])`), but the attribute is mutable and can be changed after the ivy array is created (e.g., :code:`x.dynamic_backend= True`).
Here's an example to illustrate how this works in practice:
.. code-block:: python
ivy.set_backend('torch')
x = ivy.array([1,2,3])
y = ivy.array([1,2,3])
y.dynamic_backend=False
x.dynamic_backend=True
x.data # torch tensor
y.data # torch.tensor
ivy.set_backend('jax')
x.data # will be a jax array
y.data # will still be a torch tensor since dynamic_backend=False
Setting the attribute to True converts the array to the current backend even if the backend was set with `dynamic=False`. In addition to setting the dynamic backend attribute for individual ivy arrays, you can also set or unset the dynamic backend feature globally for all such instances using `ivy.set_dynamic_backend`_ and `ivy.unset_dynamic_backend`_ respectively.
Another useful feature of the dynamic backend is the `ivy.dynamic_backend_as`_ context manager. This allows you to write code like this:
.. code-block:: python
with ivy.dynamic_backend_as(True):
a = ivy.array([0., 1.])
b = ivy.array([2., 3.])
with ivy.dynamic_backend_as(False):
c = ivy.array([4., 5.])
d = ivy.array([6., 7.])
This makes it easy to define different sections of your project with different settings, without having to explicitly call :code:`ivy.set_<something>` and :code:`ivy.unset_<something>` etc.
Backend and Frontend Version Support
------------------------------------
Each time a new ivy backend is set, the backend_handler modifies the :attr:`ivy.__dict__` to support the multiple versions of functions that are not forward compatible.
For example, :func:`torch.ones_like` in the latest stable version :code:`1.12` has many new arguments :code:`dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format` compared to the same function at version :code:`0.3.1`.
None of these new arguments will cause any forward compatibility issues: they weren't used in old code, and they can now just be used in new code if desired.
However, the removal of the :code:`out` argument does break forward compatibility.
Old torch code will raise an :exc:`Argument Not Found` error if being run with new torch versions.
However, such forward-breaking changes are in the vast minority.
We currently use a naming convention for such functions and name them as :code:`fn_name_v_1p12_and_above` which means that this particular implementation of the function is valid for versions :code:`1.12` and above.
Similarly, :code:`fn_name_v_1p01_to_1p1` means that the function is valid for versions between :code:`1.01` and :code:`1.1` both inclusive.
Each time a backend is set, we go through the :attr:`backend.__dict__` and for all functions for which multiple versions are detected, we simply import and assign the original :code:`fn_name` to the version specific one.
We do so by detecting the version of the backend framework installed on the user's end.
We follow the same workflow for providing version support to the frontend functions.
Again the version is inferred by importing the corresponding framework on the user's system.
If the user's system doesn't have the backend framework installed, we default to the latest version.
**Round Up**
This should have hopefully given you a good feel for how the backend framework is set.
If you have any questions, please feel free to reach out on `discord`_ in the `backend setting thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/ROt5E8aHgww" class="video">
</iframe>
| ivy/docs/overview/deep_dive/backend_setting.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/backend_setting.rst",
"repo_id": "ivy",
"token_count": 3095
} | 4 |
Ivy Frontends
=============
.. _`tensorflow.tan`: https://github.com/unifyai/ivy/blob/f52457a7bf3cfafa30a7c1a29a708ade017a735f/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py#L109
.. _`aliases`: https://www.tensorflow.org/api_docs/python/tf/math/tan
.. _`jax.lax.add`: https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.add.html
.. _`jax.lax`: https://jax.readthedocs.io/en/latest/jax.lax.html
.. _`jax.lax.tan`: https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.tan.html
.. _`numpy.add`: https://numpy.org/doc/stable/reference/generated/numpy.add.html
.. _`numpy mathematical functions`: https://numpy.org/doc/stable/reference/index.html
.. _`numpy.tan`: https://numpy.org/doc/stable/reference/generated/numpy.tan.html
.. _`tf`: https://www.tensorflow.org/api_docs/python/tf
.. _`tf.math.tan`: https://www.tensorflow.org/api_docs/python/tf/math/tan
.. _`torch.add`: https://pytorch.org/docs/stable/generated/torch.add.html#torch.add
.. _`torch`: https://pytorch.org/docs/stable/torch.html#math-operations
.. _`torch.tan`: https://pytorch.org/docs/stable/generated/torch.tan.html#torch.tan
.. _`YouTube tutorial series`: https://www.youtube.com/watch?v=72kBVJTpzIw&list=PLwNuX3xB_tv-wTpVDMSJr7XW6IP_qZH0t
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`ivy frontends thread`: https://discord.com/channels/799879767196958751/1189908295041941514
.. _`Array manipulation routines`: https://numpy.org/doc/stable/reference/routines.array-manipulation.html#
.. _`Array creation routines`: https://numpy.org/doc/stable/reference/routines.array-creation.html
Introduction
------------
On top of the Ivy functional API and backend functional APIs, Ivy has another set of framework-specific frontend functional APIs, which play an important role in code transpilations, as explained `here <https://lets-unify.ai/docs/ivy/overview/design/ivy_as_a_transpiler.html>`_.
The Frontend Basics
-------------------
When using functions and methods of Ivy Frontends, in addition to importing ivy itself like :code:`import ivy` please also import the corresponding Frontend module.
For example, to use ivy's tensorflow frontend:
:code:`import ivy.functional.frontends.tensorflow as tf_frontend`
----
When testing the frontend functions, we can sometimes call the function directly from the root frontend namespace.
For example, we call `tensorflow.tan`_ rather than :func:`tensorflow.math.tan`.
In this particular case both are fine, and in fact are `aliases`_.
However, sometimes an extra namespace path is necessary.
Taking JAX as an example, the functions :func:`jax.numpy.abs` and :func:`jax.lax.abs` both exist, while :func:`jax.abs` does not exist.
In our JAX frontend, if we add both of these to the root namespace, it would not be possible to call :func:`jax.abs` in our frontend.
This would result in :func:`jax.numpy.abs` or :func:`jax.lax.abs` overwriting the other one in an arbitrary manner.
In fact, neither of these should be added to the root namespace, as it does not exist in the native :mod:`jax` framework.
If you accidentally test a function with :code:`fn_tree="<func_name>"` instead of :code:`fn_tree="<lax|numpy>.<func_name>"`, you will see an error since the wrong frontend function is being tested.
Therefore, in order to avoid this potential conflict:
* All frontend tests should use the full namespace path when calling the frontend function.
In the case of TensorFlow, this would mean writing :code:`fn_tree="math.tan"` instead of :code:`fn_tree="tan"` in the frontend test.
* The :mod:`__init__.py` file in all frontends should be carefully checked, and you should verify that you are not adding aliases into the frontend which should not exist, such as the case of :func:`jax.abs` explained above.
* You should ensure that the tests are passing before merging any frontend PRs.
The only exception to this rule is if the test is failing due to a bug in the Ivy functional API, which does not need to be solved as part of the frontend task.
There will be some implicit discussion of the locations of frontend functions in these examples, however an explicit explanation of how to place a frontend function can be found in a sub-section of the Frontend APIs :ref:`open task <overview/contributing/open_tasks:Frontend APIs>`.
**NOTE:** Type hints, docstrings, and examples are not required when working on frontend functions.
**Frontend Arrays**
The native arrays of each framework have their own attributes and instance methods which differ from the attributes and instance methods of :class:`ivy.Array`.
As such we have implemented framework-specific array classes: :class:`tf_frontend.Tensor`, :class:`torch_frontend.Tensor`, :class:`numpy_frontend.ndarray`, and :class:`jax_frontend.DeviceArray`.
These classes simply wrap an :class:`ivy.Array`, which is stored in the :code:`ivy_array` attribute, and behave as closely as possible to the native framework array classes.
This is explained further in the :ref:`overview/deep_dive/ivy_frontends:Classes and Instance Methods` section.
As we aim to replicate the frontend frameworks as closely as possible, all functions accept their frontend array class (as well as :class:`ivy.Array` and :class:`ivy.NativeArray`) and return a frontend array.
However, since most logic in each function is handled by Ivy, the :class:`ivy.Array` must be extracted from any frontend array inputs.
Therefore we add the wrapper :code:`@to_ivy_arrays_and_back` to virtually all functions in the frontends.
There are more framework-specific classes we support in the frontends such as NumPy and Tensorflow :class:`Dtype` classes, NumPy and Jax :class:`Scalars`, NumPy :class:`Matrix`, etc.
All these increase the fidelity of our frontends.
Writing Frontend Functions
-------------------
**Jax**
JAX has two distinct groups of functions, those in the :mod:`jax.lax` namespace and those in the :mod:`jax.numpy` namespace.
The former set of functions map very closely to the API for the Accelerated Linear Algebra (`XLA <https://www.tensorflow.org/xla>`_) compiler, which is used under the hood to run high performance JAX code.
The latter set of functions map very closely to NumPy's well known API.
In general, all functions in the :mod:`jax.numpy` namespace are themselves implemented as a composition of the lower-level functions in the :mod:`jax.lax` namespace.
When transpiling between frameworks, the first step is to trace a computation graph of low level python functions for the source framework using Ivy's tracer, before then replacing these nodes with the associated functions in Ivy's frontend API.
Given that all jax code can be decomposed into :mod:`jax.lax` function calls, when transpiling JAX code it should always be possible to express the computation graph as a composition of only :mod:`jax.lax` functions.
Therefore, arguably these are the *only* functions we should need to implement in the JAX frontend.
However, in general we wish to be able to trace a graph in the backend framework with varying levels of dynamicism.
A graph of only :mod:`jax.lax` functions chained together in general is more *static* and less *dynamic* than a graph which chains :mod:`jax.numpy` functions together.
We wish to enable varying extents of dynamicism when creating a graph with our tracer, and therefore we also implement the functions in the :mod:`jax.numpy` namespace in our frontend API for JAX.
Thus, both :mod:`lax` and :mod:`numpy` modules are created in the JAX frontend API.
We start with the function :func:`lax.add` as an example.
.. code-block:: python
# in ivy/functional/frontends/jax/lax/operators.py
@to_ivy_arrays_and_back
def add(x, y):
return ivy.add(x, y)
:func:`lax.add` is categorised under :code:`operators` as shown in the `jax.lax`_ package directory.
We organize the functions using the same categorizations as the original framework, and also mimic the importing behaviour regarding modules and namespaces etc.
For the function arguments, these must be identical to the original function in Jax.
In this case, `jax.lax.add`_ has two arguments, and so we will also have the same two arguments in our Jax frontend :func:`lax.add`.
In this case, the function will then simply return :func:`ivy.add`, which in turn will link to the backend-specific implementation :func:`ivy.add` according to the framework set in the backend.
.. code-block:: python
# in ivy/functional/frontends/jax/lax/operators.py
@to_ivy_arrays_and_back
def tan(x):
return ivy.tan(x)
Using :func:`lax.tan` as a second example, we can see that this is placed under :mod:`operators`, again in the `jax.lax`_ directory.
By referring to the `jax.lax.tan`_ documentation, we can see that it has only one argument.
In the same manner as our :func:`add` function, we simply link its return to :func:`ivy.tan`, and again the computation then depends on the backend framework.
**NumPy**
.. code-block:: python
# in ivy/functional/frontends/numpy/mathematical_functions/arithmetic_operations.py
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _add(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
x1, x2 = promote_types_of_numpy_inputs(x1, x2)
ret = ivy.add(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
In NumPy, :func:`add` is categorised under :mod:`mathematical_functions` with a sub-category of :mod:`arithmetic_operations` as shown in the `numpy mathematical functions`_ directory.
It is important to note that :func:`add` is a universal function (`ufunc <https://numpy.org/doc/stable/reference/ufuncs.html>`_) in NumPy, thus the function is actually an object with instance methods like :code:`.at` and :code:`.reduce`, etc.
We deal with this in the NumPy frontend by including a :class:`ufunc` class and initialising it in the :mod:`__init__` file:
.. code-block:: python
# in ivy/functional/frontends/numpy/__init__.py
from ivy.functional.frontends.numpy.mathematical_functions.arithmetic_operations import _add
add = ufunc("_add")
As shown, we import the above function :func:`_add` and use it to initialise the :class:`ufunc` object which corresponds to the NumPy :func:`add` function.
Practically the :func:`add` object calls the :func:`_add` under the hood, but it has all the extra instance methods of the :class:`ufunc` class.
All other functions which are :class:`ufunc` objects in NumPy are implemented in the same way.
Of course if the :class:`ufunc` object and its respective function have the same name, we would run into problems where one would overwrite the other, to prevent this we make the actual function private by adding an underscore in the front of its name.
Since only the :class:`ufunc` object should be accessible to the user, this approach is sufficient.
When adding new NumPy functions which are :class:`ufuncs`, it's important to implement them in this way in order to properly replicate their functionality.
Namely, a private function needs to be created in the respective sub-category, this function needs to be imported in the :mod:`__init__` file, and a :class:`ufunc` object needs to be created that shares the name of the function.
For functions which are not :class:`ufuncs`, they are named normally without the underscore and are implemented as any other function.
The function arguments for this function are slightly more complex due to the extra optional arguments.
Additional handling code is added to recover the behaviour according to the `numpy.add <https://numpy.org/doc/1.23/reference/generated/numpy.add.html>`_ documentation.
For example, :code:`@handle_numpy_out` is added to functions with an :code:`out` argument and it handles the inplace update of the :class:`ivy.Array` specified by :code:`out`, or the :class:`ivy.Array` wrapped by a frontend :class:`ndarray`.
This wrapper was added because :code:`out` can be either a positional or keyword argument in most functions, thus it required some additional logic for proper handling.
Additionally, :code:`casting` and :code:`dtype` are handled in the :code:`@handle_numpy_casting` wrapper, which casts the input arguments to the desired dtype as specified by :code:`dtype` and the chosen :code:`casting` rules.
There's an additional wrapper for the :code:`dtype` argument :code:`@handle_numpy_dtype`.
This wrapper is included to handle the various formats of the :code:`dtype` argument which NumPy `accepts <https://numpy.org/doc/stable/reference/arrays.dtypes.html#specifying-and-constructing-data-types>`_, such as type strings, :class:`numpy.Dtype` objects, characters, etc.
In NumPy, most functions which can return a scalar value return it as a NumPy `Scalar <https://numpy.org/doc/stable/reference/arrays.scalars.html>`_.
To replicate this we add the wrapper :code:`@from_zero_dim_arrays_to_scalar` which converts outputs that would normally be 0-dim arrays from Ivy functions, to a NumPy scalar.
Of course the returned scalar object is actually an Ivy frontend equivalent object which behaves very similarly to the frontend :class:`ndarray`.
Finally, :code:`order` is handled in the :code:`@to_ivy_arrays_and_back` decorator.
The returned result is then obtained through :func:`ivy.add` just like the other examples.
However, the argument :code:`subok` is completely unhandled here because it controls whether or not subclasses of the :class:`numpy.ndarray` should be permitted as inputs to the function.
All ivy functions by default do enable subclasses of the :class:`ivy.Array` to be passed, and the frontend function will be operating with :class:`ivy.Array` instances rather than :class:`numpy.ndarray` instances, and so we omit this argument.
Again, it has no bearing on input-output behaviour and so this is not a problem when transpiling between frameworks.
See the section "Unused Arguments" below for more details.
.. code-block:: python
# in ivy/functional/frontends/numpy/mathematical_functions/trigonometric_functions.py
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _tan(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.tan(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
For the second example, :func:`tan` has a sub-category of :mod:`trigonometric_functions` according to the `numpy mathematical functions`_ directory.
By referring to the `numpy.tan`_ documentation, we can see it has the same additional arguments as the :func:`add` function and it's also a :class:`ufunc`.
In the same manner as :func:`add`, we handle the argument :code:`out`, :code:`where`, :code:`dtype`, :code:`casting`, and :code:`order` but we omit support for :code:`subok`.
**TensorFlow**
.. code-block:: python
# in ivy/functional/frontends/tensorflow/math.py
@to_ivy_arrays_and_back
def add(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
return ivy.add(x, y)
The :func:`add` function is categorised under the :mod:`math` folder in the TensorFlow frontend.
There are three arguments according to the `tf.math.add <https://www.tensorflow.org/api_docs/python/tf/math/add>`_ documentation, which are written accordingly as shown above.
Just like the previous examples, the implementation wraps :func:`ivy.add`, which itself defers to backend-specific functions depending on which framework is set in Ivy's backend.
The arguments :code:`x` and :code:`y` are both used in the implementation, but the argument :code:`name` is not used.
Similar to the omitted argument in the NumPy example above, the :code:`name` argument does not change the input-output behaviour of the function.
Rather, this argument is added purely for the purpose of operation logging and retrieval, and also graph visualization in TensorFlow.
Ivy does not support the unique naming of individual operations, and so we omit support for this particular argument.
Additionally, TensorFlow only allows explicit casting, therefore there are no promotion rules in the TensorFlow frontend, except in the case of array like or scalar inputs, which get casted to the dtype of the other argument if it's a :class:`Tensor`, or the default dtype if both arguments are array like or scalar.
The function :func:`check_tensorflow_casting` is added to functions with multiple arguments such as :func:`add`, and it ensures the second argument is the same type as the first, just as TensorFlow does.
.. code-block:: python
# in ivy/functional/frontends/tensorflow/math.py
@to_ivy_arrays_and_back
def tan(x, name=None):
return ivy.tan(x)
Likewise, :code:`tan` is also placed under :mod:`math`.
By referring to the `tf.math.tan`_ documentation, we add the same arguments, and simply wrap :func:`ivy.tan` in this case.
Again, we do not support the :code:`name` argument for the reasons outlined above.
**NOTE**
Many of the functions in the :mod:`tf.raw_ops` module have identical behaviour to functions in the general TensorFlow namespace e.g :func:`tf.argmax`.
However, these functions are specified to have key-word only arguments and in some cases they have different argument names.
In order to tackle these variations in behaviour, the :code:`map_raw_ops_alias` decorator was designed to wrap the functions that exist in the TensorFlow namespace, thus reducing unnecessary re-implementations.
.. code-block:: python
# in ivy/functional/frontends/tensorflow/math.py
@to_ivy_arrays_and_back
def argmax(input, axis, output_type=None, name=None):
if output_type in ["uint16", "int16", "int32", "int64"]:
return ivy.astype(ivy.argmax(input, axis=axis), output_type)
else:
return ivy.astype(ivy.argmax(input, axis=axis), "int64")
This function :func:`argmax` is implemented in the :mod:`tf.math` module of the TensorFlow framework, there exists an identical function in the :mod:`tf.raw_ops` module implemented as :func:`ArgMax`.
Both the functions have identical behaviour except for the fact that all arguments are passed as key-word only for :func:`tf.raw_ops.ArgMax`.
In some corner cases, arguments are renamed such as :func:`tf.math.argmax`, the :code:`dimension` argument replaces the :code:`axis` argument.
Let's see how the :code:`map_raw_ops_alias` decorator can be used to tackle these variations.
.. code-block:: python
# in ivy/functional/frontends/tensorflow/raw_ops.py
ArgMax = to_ivy_arrays_and_back(
map_raw_ops_alias(
tf_frontend.math.argmax,
kwargs_to_update={"dimension": "axis"},
)
)
The decorator :code:`map_raw_ops_alias` here, takes the existing behaviour of :func:`tf_frontend.math.argmax` as its first parameter, and changes all its arguments to key-word only. The argument :code:`kwargs_to_update` is a dictionary indicating all updates in arguments names to be made, in the case of :func:`tf.raw_ops.ArgMax`, :code:`dimension` is replacing :code:`axis`.
The wrapper mentioned above is implemented here `map_raw_ops_alias <https://github.com/unifyai/ivy/blob/54cc9cd955b84c50a1743dddddaf6e961f688dd5/ivy/functional/frontends/tensorflow/func_wrapper.py#L127>`_ in the ivy codebase.
**PyTorch**
.. code-block:: python
# in ivy/functional/frontends/torch/pointwise_ops.py
@to_ivy_arrays_and_back
def add(input, other, *, alpha=None, out=None):
return ivy.add(input, other, alpha=alpha, out=out)
For PyTorch, :func:`add` is categorised under :mod:`pointwise_ops` as is the case in the `torch`_ framework.
In this case, the native `torch.add`_ has both positional and keyword arguments, and we therefore use the same for our PyTorch frontend :func:`add`.
We wrap :func:`ivy.add` as usual.
.. code-block:: python
# in ivy/functional/frontends/torch/pointwise_ops.py
@to_ivy_arrays_and_back
def tan(input, *, out=None):
return ivy.tan(input, out=out)
:func:`tan` is also placed under :mod:`pointwise_ops` as is the case in the `torch`_ framework.
Looking at the `torch.tan`_ documentation, we can mimic the same arguments, and again simply wrap :func:`ivy.tan`, also making use of the :code:`out` argument in this case.
Short Frontend Implementations
-----------------------------
Ideally, all frontend functions should call the equivalent Ivy function and only be one line long. This is mainly because compositional implementations are bound to be slower than direct backend implementation calls.
In case a frontend function is complex and there is no equivalent Ivy function to use, it is strongly advised to add that function to our Experimental API. To do so, you are invited to open a *Missing Function Suggestion* issue as described in the `Open Tasks <../contributing/open_tasks.rst>`_ section. A member of our team will then review your issue, and if the proposed addition is deemed to be timely and sensible, we will add the function to the "Extend Ivy Functional API" `ToDo list issue <https://github.com/unifyai/ivy/issues/3856>`_.
If you would rather not wait around for a member of our team to review your suggestion, you can instead go straight ahead and add the frontend function as a heavy composition of the existing Ivy functions, with a :code:`#ToDo` comment included, explaining that this frontend implementation will be simplified when :func:`ivy.func_name` is added.
**Examples**
The native TensorFlow function :func:`tf.reduce_logsumexp` does not have an equivalent function in Ivy, therefore it can be composed of multiple Ivy functions instead.
**TensorFlow Frontend**
.. code-block:: python
# ivy/functional/frontends/tensorflow/math.py
@to_ivy_arrays_and_back
def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name="reduce_logsumexp"):
# stable logsumexp trick
max_input_tensor = ivy.max(input_tensor, axis=axis, keepdims=True)
return (
ivy.log(
ivy.sum(
ivy.exp(input_tensor - max_input_tensor),
axis=axis,
keepdims=keepdims,
)
)
+ max_input_tensor
).astype(input_tensor.dtype)
Through compositions, we can easily meet the required input-output behaviour for the TensorFlow frontend function.
The entire workflow for extending the Ivy Frontends as an external contributor is explained in more detail in the :ref:`Open Tasks <overview/contributing/open_tasks:Frontend APIs>` section.
Unused Arguments
----------------
As can be seen from the examples above, there are often cases where we do not add support for particular arguments in the frontend function.
Generally, we can omit support for a particular argument only if: the argument **does not** fundamentally affect the input-output behaviour of the function in a mathematical sense.
The only two exceptions to this rule are arguments related to either the data type or the device on which the returned array(s) should reside.
Examples of arguments which can be omitted, on account that they do not change the mathematics of the function are arguments which relate to:
* the algorithm or approximations used under the hood, such as :code:`precision` and :code:`preferred_element_type` in `jax.lax.conv_general_dilated <https://github.com/google/jax/blob/1338864c1fcb661cbe4084919d50fb160a03570e/jax/_src/lax/convolution.py#L57>`_.
* the specific array class in the original framework, such as :code:`subok` in `numpy.add <https://numpy.org/doc/1.23/reference/generated/numpy.add.html>`_.
* the labelling of functions for organizational purposes, such as :code:`name` in `tf.math.add <https://github.com/tensorflow/tensorflow/blob/v2.10.0/tensorflow/python/ops/math_ops.py#L3926-L4004>`_.
There are likely to be many other examples of arguments which do not fundamentally affect the input-output behaviour of the function in a mathematical sense, and so can also be omitted from Ivy's frontend implementation.
The reason we omit these arguments in Ivy is because Ivy is not designed to provide low-level control to functions that extend beyond the pure mathematics of the function.
This is a requirement because Ivy abstracts the backend framework, and therefore also abstracts everything below the backend framework's functional API, including the backend array class, the low-level language compiled to, the device etc.
Most ML frameworks do not offer per-array control of the memory layout, and control for the finer details of the algorithmic approximations under the hood, and so we cannot in general offer this level of control at the Ivy API level, nor the frontend API level as a direct result.
As explained above, this is not a problem, as the memory layout has no bearing at all on the input-output behaviour of the function.
In contrast, the algorithmic approximation may have a marginal bearing on the final results in some cases, but Ivy is only designed to unify to within a reasonable numeric approximation in any case, and so omitting these arguments also very much fits within Ivy's design.
Supported Data Types and Devices
--------------------------------
Sometimes, the corresponding function in the original framework might only support a subset of data types.
For example, :func:`tf.math.logical_and` only supports inputs of type :code:`tf.bool`.
However, Ivy's `implementation <https://github.com/unifyai/ivy/blob/6089953297b438c58caa71c058ed1599f40a270c/ivy/functional/frontends/tensorflow/math.py#L84>`_ is as follows, with direct wrapping around :func:`ivy.logical_and`:
.. code-block:: python
@to_ivy_arrays_and_back
def logical_and(x, y, name="LogicalAnd"):
return ivy.logical_and(x, y)
:func:`ivy.logical_and` supports all data types, and so :func:`ivy.functional.frontends.tensorflow.math.logical_and` can also easily support all data types.
However, the primary purpose of these frontend functions is for code transpilations, and in such cases it would never be useful to support extra data types beyond :code:`tf.bool`, as the tensorflow code being transpiled would not support this.
Additionally, the unit tests for all frontend functions use the original framework function as the ground truth, and so we can only test :func:`ivy.functional.frontends.tensorflow.math.logical_and` with boolean inputs anyway.
For these reasons, all frontend functions which correspond to functions with limited data type support in the native framework (in other words, which have even more restrictions than the data type limitations of the framework itself) should be flagged `as such <https://github.com/unifyai/ivy/blob/6089953297b438c58caa71c058ed1599f40a270c/ivy/functional/frontends/tensorflow/math.py#L88>`_ in a manner like the following:
.. code-block:: python
@with_unsupported_dtypes({"2.13.0 and below": ("float16", "bfloat16")}, "tensorflow")
The same logic applies to unsupported devices.
Even if the wrapped Ivy function supports more devices, we should still flag the frontend function supported devices to be the same as those supported by the function in the native framework.
Again, this is only needed if the limitations go beyond those of the framework itself.
For example, it is not necessary to uniquely flag every single NumPy function as supporting only CPU, as this is a limitation of the entire framework, and this limitation is already `globally flagged <https://github.com/unifyai/ivy/blob/6eb2cadf04f06aace9118804100b0928dc71320c/ivy/functional/backends/numpy/__init__.py#L21>`_.
It could also be the case that a frontend function supports a data type, but one or more of the backend frameworks does not, and therefore the frontend function may not support the data type due to backend limitation.
For example, the frontend function `jax.lax.cumprod <https://github.com/unifyai/ivy/blob/6e80b20d27d26b67a3876735c3e4cd9a1d38a0e9/ivy/functional/frontends/jax/lax/operators.py#L111>`_ does support all data types, but PyTorch does not support :code:`bfloat16` for the function :func:`cumprod`, even though the framework generally supports handling :code:`bfloat16` data type.
In that case, we should flag that the backend function does not support :code:`bfloat16` as this is done `here <https://github.com/unifyai/ivy/blob/6e80b20d27d26b67a3876735c3e4cd9a1d38a0e9/ivy/functional/backends/torch/statistical.py#L234>`_.
Classes and Instance Methods
----------------------------
Most frameworks include instance methods and special methods on their array class for common array processing functions, such as :func:`reshape`, :func:`expand_dims` and :func:`add`.
This simple design choice comes with many advantages, some of which are explained in our `Ivy Array <../design/ivy_as_a_framework/ivy_array.rst>`_ section.
**Important Note**
Before implementing the instance method or special method, make sure that the regular function in the specific frontend is already implemented.
In order to implement Ivy's frontend APIs to the extent that is required for arbitrary code transpilations, it's necessary for us to also implement these instance methods and special methods of the framework-specific array classes (:class:`tf.Tensor`, :class:`torch.Tensor`, :class:`numpy.ndarray`, :class:`jax.DeviceArray` etc).
**Instance Method**
**numpy.ndarray**
For an example of how these are implemented, we first show the instance method for :meth:`np.ndarray.argsort`, which is implemented in the frontend `ndarray class <https://github.com/unifyai/ivy/blob/94679019a8331cf9d911c024b9f3e6c9b09cad02/ivy/functional/frontends/numpy/ndarray/ndarray.py#L8>`_:
.. code-block:: python
# ivy/functional/frontends/numpy/ndarray/ndarray.py
def argsort(self, *, axis=-1, kind=None, order=None):
return np_frontend.argsort(self._ivy_array, axis=axis, kind=kind, order=order)
Under the hood, this simply calls the frontend :func:`np_frontend.argsort` function, which itself is implemented as follows:
.. code-block:: python
# ivy/functional/frontends/numpy/mathematical_functions/arithmetic_operations.py
@to_ivy_arrays_and_back
def argsort(
x,
/,
*,
axis=-1,
kind=None,
order=None,
):
return ivy.argsort(x, axis=axis)
**Special Method**
Some examples referring to the special methods would make things more clear.
For example let's take a look at how :meth:`tf_frontend.tensor.__add__` is implemented and how it's reverse :meth:`tf_frontend.tensor.__radd__` is implemented.
.. code-block:: python
# ivy/functional/frontends/tensorflow/tensor.py
def __radd__(self, x, name="radd"):
return tf_frontend.math.add(x, self._ivy_array, name=name)
def __add__(self, y, name="add"):
return self.__radd__(y)
Here also, both of them simply call the frontend :func:`tf_frontend.math.add` under the hood.
The functions with reverse operators should call the same frontend function as shown in the examples above.
The implementation for the :func:`tf_frontend.math.add` is shown as follows:
.. code-block:: python
# ivy/functional/frontends/tensorflow/math.py
@to_ivy_arrays_and_back
def add(x, y, name=None):
return ivy.add(x, y)
**numpy.matrix**
To support special classes and their instance methods, the equivalent classes are created in their respective frontend so that the useful instance methods are supported for transpilation.
For instance, the :class:`numpy.matrix` class is supported in the Ivy NumPy frontend.
Part of the code is shown below as an example:
.. code-block:: python
# ivy/functional/frontends/numpy/matrix/methods.py
class matrix:
def __init__(self, data, dtype=None, copy=True):
self._init_data(data, dtype)
def _init_data(self, data, dtype):
if isinstance(data, str):
self._process_str_data(data, dtype)
elif isinstance(data, (list, ndarray)) or ivy.is_array(data):
if isinstance(data, ndarray):
data = data.ivy_array
if ivy.is_array(data) and dtype is None:
dtype = data.dtype
data = ivy.array(data, dtype=dtype)
self._data = data
else:
raise ivy.exceptions.IvyException("data must be an array, list, or str")
ivy.assertions.check_equal(
len(ivy.shape(self._data)), 2, message="data must be 2D"
)
self._dtype = self._data.dtype
self._shape = ivy.shape(self._data)
With this class available, the supported instance methods can now be included in the class.
For example, :class:`numpy.matrix` has an instance method of :meth:`any`:
.. code-block:: python
# ivy/functional/frontends/numpy/matrix/methods.py
from ivy.functional.frontends.numpy import any
...
def any(self, axis=None, out=None):
if ivy.exists(axis):
return any(self.A, axis=axis, keepdims=True, out=out)
return any(self.A, axis=axis, out=out)
We need to create these frontend array classes and all of their instance methods and also their special methods such that we are able to transpile code which makes use of these methods.
As explained in `Ivy as a Transpiler <../design/ivy_as_a_transpiler.rst>`_, when transpiling code we first extract the computation graph in the source framework.
In the case of instance methods, we then replace each of the original instance methods in the extracted computation graph with these new instance methods defined in the Ivy frontend class.
Frontend Data Type Promotion Rules
----------------------------------
Each frontend framework has its own rules governing the common result type for two array operands during an arithmetic operation.
In order to ensure that each frontend framework implemented in Ivy has the same data type promotion behaviors as the native framework does, we have implemented data type promotion rules according to framework-specific data type promotion tables for these we are currently supporting as frontends.
The function can be accessed through calling :func:`promote_types_of_<frontend>_inputs` and pass in both array operands.
.. code-block:: python
# ivy/functional/frontends/torch/pointwise_ops.py
@to_ivy_arrays_and_back
def add(input, other, *, alpha=1, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.add(input, other, alpha=alpha, out=out)
Although in most cases, array operands being passed into an arithmetic operation function should be the same data type, using the data type promotion rules can add a layer of sanity check to prevent data precision losses or exceptions from further arithmetic operations.
TensorFlow is a framework where casting is completely explicit, except for array likes and scalars.
As such there are no promotion rules we replicate for the TensorFlow frontend, instead we check if the two arguments of the function are the same type using :func:`check_tensorflow_casting`.
.. code-block:: python
# ivy/functional/frontends/tensorflow/math.py
@to_ivy_arrays_and_back
def add(x, y, name=None):
x, y = check_tensorflow_casting(x, y)
return ivy.add(x, y)
NumPy Special Argument - Casting
--------------------------------
NumPy supports an additional, special argument - :code:`casting`, which allows user to determine the kind of dtype casting that fits their objectives.
The :code:`casting` rules are explained in the `numpy.can_cast documentation <https://numpy.org/doc/stable/reference/generated/numpy.can_cast.html>`_.
While handling this argument, the :code:`dtype` argument is used to state the desired return dtype.
To handle this, a decorator - :code:`handle_numpy_casting` is used to simplify the handling logic and reduce code redundancy.
It is located in the `ivy/functional/frontends/numpy/func_wrapper.py <https://github.com/unifyai/ivy/blob/45d443187678b33dd2b156f29a18b84efbc48814/ivy/functional/frontends/numpy/func_wrapper.py#L39>`_.
This decorator is then added to the numpy frontend functions with the :code:`casting` argument.
An example of the :func:`add` function is shown below.
.. code-block:: python
# ivy/functional/frontends/numpy/mathematical_functions/arithmetic_operations.py
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _add(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
x1, x2 = promote_types_of_numpy_inputs(x1, x2)
ret = ivy.add(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
There is a special case for the :code:`casting` argument, where the allowed dtype must be :code:`bool`, therefore a :code:`handle_numpy_casting_special` is included to handle this.
.. code-block:: python
# ivy/functional/frontends/numpy/func_wrapper.py
def handle_numpy_casting_special(fn: Callable) -> Callable:
@functools.wraps(fn)
def new_fn(*args, casting="same_kind", dtype=None, **kwargs):
ivy.assertions.check_elem_in_list(
casting,
["no", "equiv", "safe", "same_kind", "unsafe"],
message="casting must be one of [no, equiv, safe, same_kind, unsafe]",
)
if ivy.exists(dtype):
ivy.assertions.check_equal(
ivy.as_ivy_dtype(dtype),
"bool",
message="output is compatible with bool only",
)
return fn(*args, **kwargs)
new_fn.handle_numpy_casting_special = True
return new_fn
An example function using this is the :func:`numpy.isfinite` function.
.. code-block:: python
# ivy/functional/frontends/numpy/logic/array_type_testing.py
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting_special
@from_zero_dim_arrays_to_scalar
def _isfinite(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.isfinite(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
Frontends Duplicate Policy
--------------------------
Some frontend functions appear in multiple namespaces within the original framework that the frontend is replicating.
For example the :func:`np.asarray` function appears in `Array manipulation routines`_ and also in `Array creation routines`_.
This section outlines a policy that should serve as a guide for handling duplicate functions. The following sub-headings outline the policy:
**Listing duplicate frontend functions on the ToDo lists**
Essentially, there are two types of duplicate functions;
1. Functions that are listed in multiple namespaces but are callable from the same path, for example :func:`asarray` is listed in `manipulation routines` and `creation routines` however this function is called from the same path as :func:`np.asarray`.
2. Functions that are listed in multiple namespaces but are callable from different paths, for example the function :func:`tf.math.tan` and :func:`tf.raw_ops.Tan`.
When listing frontend functions, extra care should be taken to keep note of these two types of duplicate functions.
* For duplicate functions of the first type, we should list the function once in any namespace where it exists and leave it out of all other namespaces.
* For duplicates of the second type, we should list the function in each namespace where it exists but there should be a note to highlight that the function(s) on the list are duplicates and should therefore be implemented as aliases. For example, most of the functions in `tf.raw_ops` are aliases and this point is made clear when listing the functions on the ToDo list `here <https://github.com/unifyai/ivy/issues/1565>`_.
**Contributing duplicate frontend functions**
Before working on a frontend function, contributors should check if the function is designated as an alias on the ToDo list.
If the function is an alias, you should check if there is an implementation that can be aliased.
* If an implementation exists then simply create an alias of the implementation, for example many functions in `ivy/functional/frontends/tensorflow/raw_ops` are implemented as aliases `here <https://github.com/unifyai/ivy/blob/main/ivy/functional/frontends/tensorflow/raw_ops.py>`_.
* If there is no implementation to be aliased then feel free to contribute the implementation first, then go ahead to create the alias.
**Testing duplicate functions**
Unit tests should be written for all aliases. This is arguably a duplication, but having a unique test for each alias helps us to keep the testing code organised and aligned with the groupings in the frontend API.
**Round Up**
This should hopefully have given you a better grasp on what the Ivy Frontend APIs are for, how they should be implemented, and the things to watch out for!
We also have a short `YouTube tutorial series`_ on this as well if you prefer a video explanation!
If you have any questions, please feel free to reach out on `discord`_ in the `ivy frontends thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/SdiyetRNey8" class="video">
</iframe>
| ivy/docs/overview/deep_dive/ivy_frontends.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/ivy_frontends.rst",
"repo_id": "ivy",
"token_count": 13549
} | 5 |
Glossary
========
All of these new words can get confusing! We've created a glossary to help nail down some Ivy terms that you might find tricky.
.. glossary::
:sorted:
Pipeline
A pipeline is a means of automating the machine learning workflow by enabling data to be transformed and correlated into a model that can then be analyzed to achieve outputs.
Ivy Backends
Ivy Backends are supported frameworks that Ivy can convert code to.
The default is NumPy.
Ivy Frontends
Ivy Frontends are supported frameworks that Ivy can convert code from.
Framework
Frameworks are interfaces that allow scientists and developers to build and deploy machine learning models faster and easier.
E.g. Tensorflow and PyTorch.
Ivy Transpiler
The transpiler allows framework to framework code conversions for supported frameworks.
Ivy Container
An Ivy class which inherits from :code:`dict` allows for storing nested data.
Ivy Compiler
A wrapper function around native compiler functions, which uses lower level compilers such as XLA to compile to lower level languages such as C++, CUDA, TorchScript, etc.
Graph Compiler
Graph Compilers map the high-level computational graph coming from frameworks to operations that are executable on a specific device.
Ivy Tracer
Ivy's Tracer creates a graph as a composition of functions in the functional API in Python.
Ivy Functional API
Is used for defining complex models, the Ivy functional API does not implement its own backend but wraps around other frameworks functional APIs and brings them into alignment.
Framework Handler
Backend Handler
Used to control which framework Ivy is converting code to.
Automatic Code Conversions
Allows code to be converted from one framework to another whilst retaining its functional assets.
Primary Functions
Primary functions are the lowest level building blocks in Ivy and are generally implemented as light wrapping around an existing function in the backend framework, which serves a near-identical purpose.
Compositional Functions
Compositional functions are functions that are implemented as a composition of other Ivy functions,
Mixed Functions
Mixed functions are functions that have some backend-specific implementations but not for all backends.
Standalone Functions
Standalone functions are functions that do not reference any other primary, compositional, or mixed functions whatsoever.
These are mainly convenience functions.
Nestable Functions
Nestable functions are functions that can accept :class:`ivy.Container` instances in place of any of the arguments.
Convenience Functions
Convenience functions can be used to organize and improve the code for other functions.
Native Array
The :class:`ivy.NativeArray` is simply a placeholder class for a backend-specific array class, such as :class:`np.ndarray`, :class:`tf.Tensor` or :class:`torch.Tensor`.
Ivy Array
The :class:`ivy.Array` is a simple wrapper class, which wraps around the :class:`ivy.NativeArray`.
Submodule Helper Functions
These are standalone/convenience functions that are specific to a submodule.
| ivy/docs/overview/glossary.rst/0 | {
"file_path": "ivy/docs/overview/glossary.rst",
"repo_id": "ivy",
"token_count": 896
} | 6 |
.. _`RWorks ML-Unifying Companies`:
ML-Unifying Companies
=====================
.. _`Quansight`: https://quansight.com/
.. _`Travis Oliphant`: https://twitter.com/teoliphant
.. _`NumPy`: https://numpy.org/
.. _`SciPy`: https://scipy.org/
.. _`Numba`: https://numba.pydata.org/
.. _`Conda`: https://docs.conda.io/
.. _`NumFOCUS`: https://numfocus.org/
.. _`PyData`: https://pydata.org/
.. _`Anaconda`: https://www.anaconda.com/
.. _`Array API Standard`: https://data-apis.org/array-api
.. _`Modular`: https://www.modular.com/
.. _`MLIR`: https://mlir.llvm.org/
.. _`LLVM`: https://llvm.org/
.. _`OctoML`: https://octoml.ai/
.. _`Apache TVM`: https://tvm.apache.org/
.. _`discord`: https://discord.gg/sXyFF8tDtm
Quansight
---------
`Quansight`_ was founded by `Travis Oliphant`_, a leader in the Python Data community who has authored or led the creation of industry cornerstones such as `NumPy`_, `SciPy`_, `Numba`_, and `Conda`_, and helped establish `NumFOCUS`_ and the `PyData`_ conference series.
Through consulting services, Quansight provides the additional people and expertise needed to deploy new technology, solve complex problems, or optimize what is already in place, so that it runs faster and uses less memory.
They work with data engineering, DevOps, data science, MLOps, and analytics teams to improve their performance.
They provide services for Data Engineering & MLOps, Infrastructure, Scaling & Acceleration, Visualization & Dashboards, Open Source Integration, Algorithms, AI & Machine Learning, Packaging & Environment Management, and Jupyter Technologies.
They are the creators of the `Array API Standard`_.
Modular
-------
`Modular`_ is a Startup company founded by the creators of `MLIR`_.
Their observation is that fragmentation and technical complexity have held back the impact of AI to a privileged few.
The rest of the world isn’t benefiting as it should be from this transformational technology.
Their mission is to have a real, positive impact in the world by reinventing the way AI technology is developed and deployed into production with a next-generation developer platform.
There are very little extra details about their developer platform, but presumably it will provide a modular solution at a relatively low level of abstraction, given the `LLVM`_ and `MLIR`_ background of the founders.
OctoML
------
`OctoML`_ is a startup company founded by the creators of `Apache TVM`_.
Their mission is to make AI more sustainable and accessible, empowering more creators to harness the transformative power of ML to build intelligent applications.
They focus on efficient model execution and automation to scale services and reduce engineering burden.
Specifically, they enable models to run on a broad set of devices, making them easier to deploy without specialized skills.
The services include inference on the cloud, the edge, and a variety of platform and hardware vendors.
They strive to maximize performance, with very simple deployment and benchmarking features included.
| ivy/docs/overview/related_work/ml_unifying_companies.rst/0 | {
"file_path": "ivy/docs/overview/related_work/ml_unifying_companies.rst",
"repo_id": "ivy",
"token_count": 823
} | 7 |
# global
import copy
import functools
import numpy as np
from operator import mul
# local
from .wrapping import add_ivy_array_instance_methods
from .array import Array
| ivy/ivy/data_classes/array/__init__.py/0 | {
"file_path": "ivy/ivy/data_classes/array/__init__.py",
"repo_id": "ivy",
"token_count": 49
} | 8 |
# global
import abc
class _ArrayWithGradientsExperimental(abc.ABC):
pass
| ivy/ivy/data_classes/array/experimental/gradients.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/gradients.py",
"repo_id": "ivy",
"token_count": 27
} | 9 |
# global
import abc
from typing import Optional, Tuple, Union, List, Sequence
# local
import ivy
# ToDo: implement all methods here as public instance methods
# ToDo: update docstrings and typehints according to ivy\layers
class _ArrayWithLayers(abc.ABC):
def linear(
self: ivy.Array,
weight: Union[ivy.Array, ivy.NativeArray],
/,
*,
bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.linear. This method simply
wraps the function, and so the docstring for ivy.linear also applies to
this method with minimal changes.
Parameters
----------
self
The input array to compute linear transformation on.
*[outer_batch_shape,inner_batch_shape,in_features]*
weight
The weight matrix. *[outer_batch_shape,out_features,in_features]*
bias
The bias vector, default is ``None``. *[outer_batch_shape,out_features]*
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Result array of the linear transformation.
*[outer_batch_shape,inner_batch_shape,out_features]*
Examples
--------
>>> x = ivy.array([[1.1, 2.2, 3.3], \
[4.4, 5.5, 6.6], \
[7.7, 8.8, 9.9]])
>>> w = ivy.array([[1., 2., 3.], \
[4., 5., 6.], \
[7., 8., 9.]])
>>> b = ivy.array([1., 0., -1.])
>>> y = x.linear(w, bias=b)
>>> print(y)
ivy.array([[ 16.4, 35.2, 54. ],
[ 36.2, 84.7, 133. ],
[ 56. , 134. , 212. ]])
"""
return ivy.linear(
self._data,
weight,
bias=bias,
out=out,
)
def dropout(
self: ivy.Array,
prob: float,
/,
*,
scale: bool = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
training: bool = True,
seed: Optional[int] = None,
noise_shape: Optional[Sequence[int]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.dropout. This method simply
wraps the function, and so the docstring for ivy.dropout also applies
to this method with minimal changes.
Parameters
----------
self
The input array x to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
scale
Whether to scale the output by `1/(1-prob)`, default is ``True``.
dtype
output array data type. If dtype is None, the output array data type
must be inferred from x. Default: ``None``.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
seed
Set a default seed for random number generating (for
reproducibility).Default is ``None``.
noise_shape
a sequence representing the shape of the binary dropout mask that will be
multiplied with the input.
out
optional output array, for writing the result to. It must have
a shape that the inputs broadcast to.
Returns
-------
ret
Result array of the output after dropout is performed.
Examples
--------
With :class:`ivy.Array` instances:
>>> x = ivy.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.],
... [10., 11., 12.]])
>>> y = x.dropout(0.3)
>>> print(y)
ivy.array([[ 1.42857146, 2.85714293, 4.28571415],
[ 5.71428585, 7.14285755, 8.5714283 ],
[ 0. , 11.4285717 , 12.8571434 ],
[14.2857151 , 0. , 0. ]])
>>> x = ivy.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.],
... [10., 11., 12.]])
>>> y = x.dropout(0.3, scale=False)
>>> print(y)
ivy.array([[ 1., 2., 3.],
[ 4., 5., 0.],
[ 7., 0., 9.],
[10., 11., 0.]])
"""
return ivy.dropout(
self._data,
prob,
scale=scale,
dtype=dtype,
training=training,
seed=seed,
noise_shape=noise_shape,
out=out,
)
def dropout1d(
self: ivy.Array,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NWC",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.dropout1d. This method
simply wraps the function, and so the docstring for ivy.droput1d also
applies to this method with minimal changes.
Parameters
----------
self
The input array x to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
data_format
"NWC" or "NCW". Default is ``"NWC"``.
out
optional output array, for writing the result to. It must have
a shape that the inputs broadcast to.
Returns
-------
ret
Result array of the output after dropout is performed.
Examples
--------
>>> x = ivy.array([1, 1, 1]).reshape([1, 1, 3])
>>> y = x.dropout1d(0.5)
>>> print(y)
ivy.array([[[2., 0, 2.]]])
"""
return ivy.dropout1d(
self._data,
prob,
training=training,
data_format=data_format,
out=out,
)
def dropout2d(
self: ivy.Array,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NHWC",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.dropout2d. This method
simply wraps the function, and so the docstring for ivy.droput1d also
applies to this method with minimal changes.
Parameters
----------
self
The input array x to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
data_format
"NHWC" or "NCHW". Default is ``"NHWC"``.
out
optional output array, for writing the result to. It must have
a shape that the inputs broadcast to.
Returns
-------
ret
Result array of the output after dropout is performed.
Examples
--------
>>> x = ivy.array([[1, 1, 1], [2, 2, 2]])
>>> y = x.dropout2d(0.5)
>>> print(y)
ivy.array([[0., 0., 2.],
[4., 4., 4.]])
"""
return ivy.dropout2d(
self._data,
prob,
training=training,
data_format=data_format,
out=out,
)
def dropout3d(
self: ivy.Array,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NDHWC",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.dropout3d. This method
simply wraps the function, and so the docstring for ivy.droput3d also
applies to this method with minimal changes.
Parameters
----------
self
The input array x to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
data_format
"NDHWC" or "NCDHW". Default is ``"NDHWC"``.
out
optional output array, for writing the result to. It must have
a shape that the inputs broadcast to.
Returns
-------
ret
Result array of the output after dropout is performed.
"""
return ivy.dropout3d(
self._data,
prob,
training=training,
data_format=data_format,
out=out,
)
def scaled_dot_product_attention(
self: ivy.Array,
key: Union[ivy.Array, ivy.NativeArray],
value: Union[ivy.Array, ivy.NativeArray],
/,
*,
scale: Optional[float] = None,
mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
dropout_p: Optional[float] = 0.0,
is_causal: Optional[bool] = False,
training: Optional[bool] = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of
ivy.scaled_dot_product_attention. This method simply wraps the
function, and so the docstring for ivy.scaled_dot_product_attention
also applies to this method with minimal changes.
Parameters
----------
self
The queries input array. The shape of queries input array should be in
*[batch_shape,num_queries,feat_dim]*. The queries input array should
have the same size as keys and values.
key
The keys input array. The shape of keys input array should be in
*[batch_shape,num_keys,feat_dim]*. The keys input array should have
the same size as queries and values.
value
The values input array. The shape of values input should be in
*[batch_shape,num_keys,feat_dim]*. The values input array should
have the same size as queries and keys.
scale
The scale float value.
The scale float value is used to scale the query-key pairs before softmax.
mask
The mask input array. The mask to apply to the query-key values.
Default is None. The shape of mask input should be in
*[batch_shape,num_queries,num_keys]*.
dropout_p
Specifies the dropout probability, if greater than 0.0, dropout is applied
is_causal
If true, assumes causal attention masking and errors if both `mask` and
`is_causal` are set.
training
If True, dropout is used, otherwise dropout is not activated.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The output following application of scaled dot-product attention.
The output array is the weighted sum produced by the attention score
and value. The shape of output array is
*[batch_shape,num_queries,feat_dim]* .
Examples
--------
With :class:`ivy.Array` input:
>>> q = ivy.array([[[0.2, 1.], [2.2, 3.],[4.4, 5.6]]])
>>> k = ivy.array([[[0.6, 1.5], [2.4, 3.3],[4.2, 5.1]]])
>>> v = ivy.array([[[0.4, 1.3], [2.2, 3.1],[4.3, 5.3]]])
>>> result = ivy.scaled_dot_product_attention(q, k, v, scale=1, dropout_p=0.1,
... is_causal=True, training=True)
>>> print(result)
ivy.array([[[0.40000001, 1.29999995],
[2.19994521, 3.09994531],
[4.30000019, 5.30000019]]])
>>> q = ivy.array([[[0.2, 1.], [2.2, 3.],[4.4, 5.6]]])
>>> k = ivy.array([[[0.6, 1.5], [2.4, 3.3],[4.2, 5.1]]])
>>> v = ivy.array([[[0.4, 1.3], [2.2, 3.1],[4.3, 5.3]]])
>>> mask = ivy.array([[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0],[0.0, 0.0, 0.0]]])
>>> result = ivy.scaled_dot_product_attention(q,k,v,scale=1, mask=mask)
>>> print(result)
ivy.array([[[0.40000001, 1.29999995],
[2.19994521, 3.09994531],
[4.30000019, 5.30000019]]])
>>> q = ivy.array([[[0.2, 1.], [2.2, 3.], [4.4, 5.6]]])
>>> k = ivy.array([[[0.6, 1.5], [2.4, 3.3], [4.2, 5.1]]])
>>> v = ivy.array([[[0.4, 1.3], [2.2, 3.1], [4.3, 5.3]]])
>>> out = ivy.zeros(shape=(1, 3, 2))
>>> ivy.scaled_dot_product_attention(q, k, v, scale=1, dropout_p=0.1,
... is_causal=True, training=True, out=out)
>>> print(out)
ivy.array([[[0.40000001, 1.29999995],
[2.19994521, 3.09994531],
[4.30000019, 5.30000019]]])
"""
return ivy.scaled_dot_product_attention(
self._data,
key,
value,
scale=scale,
mask=mask,
dropout_p=dropout_p,
is_causal=is_causal,
training=training,
out=out,
)
def multi_head_attention(
self: ivy.Array,
/,
*,
key: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
value: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
num_heads: int = 8,
scale: Optional[float] = None,
attention_mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
in_proj_weights: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
q_proj_weights: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
k_proj_weights: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
v_proj_weights: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
out_proj_weights: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
in_proj_bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
out_proj_bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
is_causal: bool = False,
key_padding_mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
bias_k: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
bias_v: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
static_k: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
static_v: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
add_zero_attn: bool = False,
return_attention_weights: bool = False,
average_attention_weights: bool = True,
dropout: float = 0.0,
training: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
return ivy.multi_head_attention(
self._data,
key=key,
value=value,
num_heads=num_heads,
scale=scale,
attention_mask=attention_mask,
in_proj_weights=in_proj_weights,
q_proj_weights=q_proj_weights,
k_proj_weights=k_proj_weights,
v_proj_weights=v_proj_weights,
out_proj_weights=out_proj_weights,
in_proj_bias=in_proj_bias,
out_proj_bias=out_proj_bias,
is_causal=is_causal,
key_padding_mask=key_padding_mask,
bias_k=bias_k,
bias_v=bias_v,
static_k=static_k,
static_v=static_v,
add_zero_attn=add_zero_attn,
return_attention_weights=return_attention_weights,
average_attention_weights=average_attention_weights,
dropout=dropout,
training=training,
out=out,
)
def conv1d(
self: ivy.Array,
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int]],
padding: str,
/,
*,
data_format: str = "NWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int]] = 1,
dilations: Union[int, Tuple[int]] = 1,
bias: Optional[ivy.Array] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.conv1d. This method simply
wraps the function, and so the docstring for ivy.conv1d also applies to
this method with minimal changes.
Parameters
----------
self
Input image *[batch_size,w,d_in]* or *[batch_size,d_in,w]*.
filters
Convolution filters *[fw,d_in,d_out]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating the
per-dimension paddings.
data_format
"NWC" or "NCW". Defaults to "NWC".
filter_format
Either "channel_first" or "channel_last". Defaults to "channel_last".
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> x = ivy.array([[[1., 2.], [3., 4.], [6., 7.], [9., 11.]]]) # NWC
>>> filters = ivy.array([[[0., 1.], [1., 1.]]]) # WIO (I == C)
>>> result = x.conv1d(filters, (1,), 'VALID')
>>> print(result)
ivy.array([[[ 2., 3.],
... [ 4., 7.],
... [ 7., 13.],
... [11., 20.]]])
"""
return ivy.conv1d(
self._data,
filters,
strides,
padding,
data_format=data_format,
filter_format=filter_format,
x_dilations=x_dilations,
dilations=dilations,
bias=bias,
out=out,
)
def conv1d_transpose(
self: ivy.Array,
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int]],
padding: str,
/,
*,
output_shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
filter_format: str = "channel_last",
data_format: str = "NWC",
dilations: Union[int, Tuple[int]] = 1,
bias: Optional[ivy.Array] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.conv1d_transpose. This
method simply wraps the function, and so the docstring for
ivy.conv1d_transpose also applies to this method with minimal changes.
Parameters
----------
self
Input image *[batch_size,w,d_in]* or *[batch_size,d_in,w]*.
filters
Convolution filters *[fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’ (no
padding), or a sequence of n (low, high) integer pairs that give the padding
to apply before and after each spatial dimension.
output_shape
Shape of the output (Default value = None)
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IOW",input data formats, while "channel_last" corresponds to "WOI".
data_format
The ordering of the dimensions in the input, one of "NWC" or "NCW". "NWC"
corresponds to input with shape (batch_size, width, channels), while "NCW"
corresponds to input with shape (batch_size, channels, width).
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The result of the transpose convolution operation.
Examples
--------
>>> x = ivy.array([[[1., 2.], [3., 4.], [6., 7.], [9., 11.]]]) # NWC
>>> filters = ivy.array([[[0., 1.], [1., 1.]]]) # WIO (I == C)
>>> result = x.conv1d_transpose(filters, (1,), 'VALID')
>>> print(result)
ivy.array([[[ 2., 3.],
... [ 4., 7.],
... [ 7., 13.],
... [11., 20.]]])
"""
return ivy.conv1d_transpose(
self._data,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
bias=bias,
out=out,
)
def depthwise_conv2d(
self: ivy.Array,
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int], Tuple[int, int]],
padding: Union[str, List[int]],
/,
*,
data_format: str = "NHWC",
dilations: Union[int, Tuple[int], Tuple[int, int]] = 1,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.depthwise_conv2d. This
method simply wraps the function, and so the docstring for
ivy.depthwise_conv2d also applies to this method with minimal changes.
Parameters
----------
self
Input image *[batch_size,h,w,d]*.
filters
Convolution filters *[fh,fw,d_in]*. (d_in must be the same as d from self)
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating the
per-dimension paddings.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> x = ivy.randint(0, 255, shape=(1, 128, 128, 3)).astype(ivy.float32) / 255.0
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3])
>>> y = x.depthwise_conv2d(filters, 2, 'SAME')
>>> print(y.shape)
(1, 64, 64, 3)
"""
return ivy.depthwise_conv2d(
self._data,
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
out=out,
)
def conv2d(
self: ivy.Array,
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int, int]],
padding: str,
/,
*,
data_format: str = "NHWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int, int]] = 1,
dilations: Union[int, Tuple[int, int]] = 1,
bias: Optional[ivy.Container] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of `ivy.conv2d`. This method
simply wraps the function, and so the docstring for `ivy.conv2d` also
applies to this method with minimal changes.
Parameters
----------
self
Input image *[batch_size,h,w,d_in]* or *[batch_size,d_in,h,w]*.
filters
Convolution filters *[fh,fw,d_in,d_out]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
filter_format
Either "channel_first" or "channel_last". Defaults to "channel_last".
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> x = ivy.array([[[[1.], [2.0],[3.]],
... [[1.], [2.0],[3.]],
... [[1.], [2.0],[3.]]]]) #NHWC
>>> filters = ivy.array([[[[0.]], [[1.]], [[0.]]],
... [[[0.]], [[1.]], [[0.]]],
... [[[0.]], [[1.]], [[0.]]]]) #HWIO
>>> result = x.conv2d(filters, 1, 'SAME', data_format='NHWC',
... dilations= 1)
>>> print(result)
ivy.array([[
[[2.],[4.],[6.]],
[[3.],[6.],[9.]],
[[2.],[4.],[6.]]
]])
"""
return ivy.conv2d(
self,
filters,
strides,
padding,
data_format=data_format,
filter_format=filter_format,
x_dilations=x_dilations,
dilations=dilations,
bias=bias,
out=out,
)
def conv2d_transpose(
self: ivy.Array,
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int, int]],
padding: str,
/,
*,
output_shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
filter_format: str = "channel_last",
data_format: str = "NHWC",
dilations: Union[int, Tuple[int, int]] = 1,
out: Optional[ivy.Array] = None,
bias: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of `ivy.conv2d_transpose`. This
method simply wraps the function, and so the docstring for
`ivy.conv2d_transpose` also applies to this method with minimal
changes.
Parameters
----------
self
Input image *[batch_size,h,w,d_in]* or *[batch_size,d_in,h,w]*.
filters
Convolution filters *[fh,fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating the
per-dimension paddings.
output_shape
Shape of the output (Default value = None)
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IOHW",input data formats, while "channel_last" corresponds to "HWOI".
data_format
The ordering of the dimensions in the input, one of "NHWC" or "NCHW". "NHWC"
corresponds to inputs with shape (batch_size, height, width, channels),
while "NCHW" corresponds to input with shape (batch_size, channels, height,
width). Default is ``"NHWC"``.
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
Optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The result of the transpose convolution operation.
Examples
--------
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 28, 28, 3])
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 3, 6, 3])
>>> y = x.conv2d_transpose(filters,2,'SAME',)
>>> print(y.shape)
(1, 56, 56, 6)
"""
return ivy.conv2d_transpose(
self._data,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
out=out,
bias=bias,
)
def conv3d(
self: ivy.Array,
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int, int, int]],
padding: str,
/,
*,
data_format: str = "NDHWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int, int, int]] = 1,
dilations: Union[int, Tuple[int, int, int]] = 1,
bias: Optional[ivy.Array] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of `ivy.conv3d`. This method
simply wraps the function, and so the docstring for `ivy.conv3d` also
applies to this method with minimal changes.
Parameters
----------
x
Input volume *[batch_size,d,h,w,d_in]*.
filters
Convolution filters *[fd,fh,fw,d_in,d_out]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
"NDHWC" or "NCDHW". Defaults to "NDHWC".
filter_format
Either "channel_first" or "channel_last". Defaults to "channel_last".
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> x = ivy.ones((1, 3, 3, 3, 1)).astype(ivy.float32)
>>> filters = ivy.ones((1, 3, 3, 1, 1)).astype(ivy.float32)
>>> result = x.conv3d(filters, 2, 'SAME')
>>> print(result)
ivy.array([[[[[4.],[4.]],[[4.],[4.]]],[[[4.],[4.]],[[4.],[4.]]]]])
"""
return ivy.conv3d(
self._data,
filters,
strides,
padding,
data_format=data_format,
filter_format=filter_format,
x_dilations=x_dilations,
dilations=dilations,
bias=bias,
out=out,
)
def conv3d_transpose(
self: ivy.Array,
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]],
padding: Union[str, List[int]],
/,
*,
output_shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
filter_format: str = "channel_last",
data_format: str = "NDHWC",
dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
bias: Optional[ivy.Array] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of `ivy.conv3d_transpose`. This
method simply wraps the function, and so the docstring for
`ivy.conv3d_transpose` also applies to this method with minimal
changes.
Parameters
----------
self
Input volume *[batch_size,d,h,w,d_in]* or *[batch_size,d_in,d,h,w]*.
filters
Convolution filters *[fd,fh,fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
output_shape
Shape of the output (Default value = None)
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IODHW",input data formats, while "channel_last" corresponds to "DHWOI".
data_format
The ordering of the dimensions in the input, one of "NDHWC" or
"NCDHW". "NDHWC" corresponds to inputs with shape (batch_size,
depth, height, width, channels), while "NCDHW" corresponds
to input with shape (batch_size, channels, depth, height,
width).
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
The result of the transpose convolution operation.
Examples
--------
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 3, 28, 28, 3])
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 6, 3])
>>> y = x.conv3d_transpose(filters, 2, 'SAME')
>>> print(y.shape)
(1, 6, 56, 56, 6)
"""
return ivy.conv3d_transpose(
self._data,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
bias=bias,
out=out,
)
def lstm_update(
self: ivy.Array,
init_h: Union[ivy.Array, ivy.NativeArray],
init_c: Union[ivy.Array, ivy.NativeArray],
kernel: Union[ivy.Array, ivy.NativeArray],
recurrent_kernel: Union[ivy.Array, ivy.NativeArray],
/,
*,
bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
recurrent_bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Tuple[ivy.Array, ivy.Array]:
"""ivy.Array instance method variant of ivy.lstm_update. This method
simply wraps the function, and so the docstring for ivy.lstm_update
also applies to this method with minimal changes.
Parameters
----------
init_h
initial state tensor for the cell output *[batch_shape, out]*.
init_c
initial state tensor for the cell hidden state *[batch_shape, out]*.
kernel
weights for cell kernel *[in, 4 x out]*.
recurrent_kernel
weights for cell recurrent kernel *[out, 4 x out]*.
bias
bias for cell kernel *[4 x out]*. (Default value = None)
recurrent_bias
bias for cell recurrent kernel *[4 x out]*. (Default value = None)
Returns
-------
ret
hidden state for all timesteps *[batch_shape,t,out]* and cell state for last
timestep *[batch_shape,out]*
Examples
--------
>>> x = ivy.randint(0, 20, shape=(6, 20, 3))
>>> h_i = ivy.random_normal(shape=(6, 5))
>>> c_i = ivy.random_normal(shape=(6, 5))
>>> kernel = ivy.random_normal(shape=(3, 4 * 5))
>>> rc = ivy.random_normal(shape=(5, 4 * 5))
>>> result = x.lstm_update(h_i, c_i, kernel, rc)
>>> result[0].shape
(6, 20, 5)
>>> result[1].shape
(6, 5)
"""
return ivy.lstm_update(
self._data,
init_h,
init_c,
kernel,
recurrent_kernel,
bias=bias,
recurrent_bias=recurrent_bias,
)
| ivy/ivy/data_classes/array/layers.py/0 | {
"file_path": "ivy/ivy/data_classes/array/layers.py",
"repo_id": "ivy",
"token_count": 17955
} | 10 |
"""Ivy wrapping functions for conversions.
Collection of Ivy functions for wrapping functions to accept and return
ivy.Array instances.
"""
# global
from typing import Union, Dict, Optional, List
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithConversions(ContainerBase):
@staticmethod
def _static_to_native(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
nested: Union[bool, ivy.Container] = False,
include_derived: Optional[Union[Dict[str, bool], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.to_native.
This method simply wraps the function, and so the docstring for ivy.to_native
also applies to this method with minimal changes.
Parameters
----------
x
The input to be converted.
nested
Whether to apply the conversion on arguments in a nested manner. If so, all
dicts, lists and tuples will be traversed to their lowest leaves in search
of ivy.Array instances. Default is ``False``.
include_derived
Whether to also recursive for classes derived from tuple, list and dict.
Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Container object with all sub-arrays converted to their native format.
"""
return ContainerBase.cont_multi_map_in_function(
"to_native",
x,
nested=nested,
include_derived=include_derived,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def to_native(
self: ivy.Container,
nested: Union[bool, ivy.Container] = False,
include_derived: Optional[Union[Dict[str, bool], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.to_native.
This method simply wraps the function, and so the docstring for ivy.to_native
also applies to this method with minimal changes.
Parameters
----------
self
The input to be converted.
nested
Whether to apply the conversion on arguments in a nested manner. If so, all
dicts, lists and tuples will be traversed to their lowest leaves in search
of ivy.Array instances. Default is ``False``.
include_derived
Whether to also recursive for classes derived from tuple, list and dict.
Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Container object with all sub-arrays converted to their native format.
"""
return self._static_to_native(
self,
nested,
include_derived,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
out=out,
)
@staticmethod
def _static_to_ivy(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
nested: Union[bool, ivy.Container] = False,
include_derived: Optional[Union[Dict[str, bool], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.to_ivy.
This method simply wraps the function, and so the docstring for ivy.to_ivy also
applies to this method with minimal changes.
Parameters
----------
x
The input to be converted.
nested
Whether to apply the conversion on arguments in a nested manner. If so, all
dicts, lists and tuples will be traversed to their lowest leaves in search
of ivy.Array instances. Default is ``False``.
include_derived
Whether to also recursive for classes derived from tuple, list and dict.
Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Container object with all native sub-arrays converted to their ivy.Array
instances.
"""
return ContainerBase.cont_multi_map_in_function(
"to_ivy",
x,
nested=nested,
include_derived=include_derived,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def to_ivy(
self: ivy.Container,
nested: Union[bool, ivy.Container] = False,
include_derived: Optional[Union[Dict[str, bool], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.to_ivy.
This method simply wraps the function, and so the docstring for ivy.to_ivy also
applies to this method with minimal changes.
Parameters
----------
self
The input to be converted.
nested
Whether to apply the conversion on arguments in a nested manner. If so,
all dicts, lists and tuples will be traversed to their lowest leaves in
search of ivy.Array instances. Default is ``False``.
include_derived
Whether to also recursive for classes derived from tuple, list and dict.
Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Container object with all native sub-arrays converted to their ivy.Array
instances.
"""
return self._static_to_ivy(
self,
nested,
include_derived,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
out=out,
)
| ivy/ivy/data_classes/container/conversions.py/0 | {
"file_path": "ivy/ivy/data_classes/container/conversions.py",
"repo_id": "ivy",
"token_count": 4174
} | 11 |
# global
from typing import Union, Optional, List, Dict, Tuple, Sequence, Literal
# local
from ivy.data_classes.container.base import ContainerBase
import ivy
class _ContainerWithLinearAlgebraExperimental(ContainerBase):
@staticmethod
def static_eigh_tridiagonal(
alpha: Union[ivy.Array, ivy.NativeArray, ivy.Container],
beta: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
eigvals_only: Union[bool, ivy.Container] = True,
select: Union[str, ivy.Container] = "a",
select_range: Optional[
Union[Tuple[int, int], List[int], ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
tol: Optional[Union[float, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Union[ivy.Container, Tuple[ivy.Container, ivy.Container]]:
"""ivy.Container static method variant of ivy.eigh_tridiagonal. This
method simply wraps the function, and so the docstring for
ivy.eigh_tridiagonal also applies to this method with minimal changes.
Parameters
----------
alpha
An array or a container of real or complex arrays each of
shape (n), the diagonal elements of the matrix.
beta
An array or a container of real or complex arrays each of shape (n-1),
containing the elements of the first super-diagonal of the matrix.
eigvals_only
If False, both eigenvalues and corresponding eigenvectors are computed.
If True, only eigenvalues are computed. Default is True.
select
Optional string with values in {'a', 'v', 'i'}
(default is 'a') that determines which eigenvalues
to calculate: 'a': all eigenvalues. 'v': eigenvalues
in the interval (min, max] given by select_range.
'i': eigenvalues with indices min <= i <= max.
select_range
Size 2 tuple or list or array specifying the range of
eigenvalues to compute together with select. If select
is 'a', select_range is ignored.
tol
Optional scalar. Ignored when backend is not Tensorflow. The
absolute tolerance to which each eigenvalue is required. An
eigenvalue (or cluster) is considered to have converged if
it lies in an interval of this width. If tol is None (default),
the value eps*|T|_2 is used where eps is the machine precision,
and |T|_2 is the 2-norm of the matrix T.
Returns
-------
eig_vals
The eigenvalues of the matrix in non-decreasing order.
eig_vectors
If eigvals_only is False the eigenvectors are returned in the second
output argument.
Examples
--------
With :class:`ivy.Container` input:
>>> alpha = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([2., 2., 2.]))
>>> beta = ivy.array([0.,2.])
>>> y = ivy.Container.static_eigh_tridiagonal(alpha, beta)
>>> print(y)
{
a: ivy.array([-0.56155, 0., 3.56155]),
b: ivy.array([0., 2., 4.])
}
>>> alpha = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([2., 2., 2.]))
>>> beta = ivy.Container(a=ivy.array([0.,2.]), b=ivy.array([2.,2.]))
>>> y = ivy.Container.static_eigh_tridiagonal(alpha, beta)
>>> print(y)
{
a: ivy.array([-0.56155, 0., 3.56155]),
b: ivy.array([-0.82842, 2., 4.82842])
}
"""
return ContainerBase.cont_multi_map_in_function(
"eigh_tridiagonal",
alpha,
beta,
eigvals_only=eigvals_only,
select=select,
select_range=select_range,
tol=tol,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def eigh_tridiagonal(
self: ivy.Container,
beta: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
eigvals_only: Union[bool, ivy.Container] = True,
select: Union[str, ivy.Container] = "a",
select_range: Optional[
Union[Tuple[int, int], List[int], ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
tol: Optional[Union[float, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Union[ivy.Container, Tuple[ivy.Container, ivy.Container]]:
"""ivy.Container instance method variant of ivy.eigh_tridiagonal. This
method simply wraps the function, and so the docstring for
ivy.eigh_tridiagonal also applies to this method with minimal changes.
Parameters
----------
self
A container of real or complex arrays each of shape (n),
the diagonal elements of the matrix.
beta
An array or a container of real or complex arrays each of shape
(n-1), containing the elements of the first super-diagonal of the matrix.
eigvals_only
If False, both eigenvalues and corresponding eigenvectors are computed.
If True, only eigenvalues are computed. Default is True.
select
Optional string with values in {'a', 'v', 'i'} (default is 'a') that
determines which eigenvalues to calculate: 'a': all eigenvalues.
'v': eigenvalues in the interval (min, max] given by select_range.
'i': eigenvalues with indices min <= i <= max.
select_range
Size 2 tuple or list or array specifying the range of eigenvalues to
compute together with select. If select is 'a', select_range is ignored.
tol
Optional scalar. Ignored when backend is not Tensorflow. The absolute
tolerance to which each eigenvalue is required. An eigenvalue (or cluster)
is considered to have converged if it lies in an interval of this width.
If tol is None (default), the value eps*|T|_2 is used where eps is the
machine precision, and |T|_2 is the 2-norm of the matrix T.
Returns
-------
eig_vals
The eigenvalues of the matrix in non-decreasing order.
eig_vectors
If eigvals_only is False the eigenvectors are returned in
the second output argument.
Examples
--------
>>> alpha = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([2., 2., 2.]))
>>> beta = ivy.array([0.,2.])
>>> y = alpha.eigh_tridiagonal(beta)
>>> print(y)
{
a: ivy.array([-0.56155, 0., 3.56155]),
b: ivy.array([0., 2., 4.])
}
>>> alpha = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([2., 2., 2.]))
>>> beta = ivy.Container(a=ivy.array([0.,2.]), b=ivy.array([2.,2.]))
>>> y = alpha.eigh_tridiagonal(beta)
>>> print(y)
{
a: ivy.array([-0.56155, 0., 3.56155]),
b: ivy.array([-0.82842, 2., 4.82842])
}
"""
return self.static_eigh_tridiagonal(
self,
beta,
eigvals_only=eigvals_only,
select=select,
select_range=select_range,
tol=tol,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_diagflat(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
offset: Union[int, ivy.Container] = 0,
padding_value: Union[float, ivy.Container] = 0,
align: Union[str, ivy.Container] = "RIGHT_LEFT",
num_rows: Union[int, ivy.Container] = -1,
num_cols: Union[int, ivy.Container] = -1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"diagflat",
x,
offset=offset,
padding_value=padding_value,
align=align,
num_rows=num_rows,
num_cols=num_cols,
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def diagflat(
self: ivy.Container,
/,
*,
offset: Union[int, ivy.Container] = 0,
padding_value: Union[float, ivy.Container] = 0,
align: Union[str, ivy.Container] = "RIGHT_LEFT",
num_rows: Union[int, ivy.Container] = -1,
num_cols: Union[int, ivy.Container] = -1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.diagflat. This method
simply wraps the function, and so the docstring for ivy.diagflat also
applies to this method with minimal changes.
Examples
--------
>>> x = ivy.Container(a=[1,2])
>>> ivy.diagflat(x, k=1)
{
a: ivy.array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
}
"""
return self.static_diagflat(
self,
offset=offset,
padding_value=padding_value,
align=align,
num_rows=num_rows,
num_cols=num_cols,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_kron(
a: Union[ivy.Array, ivy.NativeArray, ivy.Container],
b: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.kron. This method simply
wraps the function, and so the docstring for ivy.kron also applies to
this method with minimal changes.
Parameters
----------
a
first container with input arrays.
b
second container with input arrays
out
optional output container, for writing the result to.
Returns
-------
ret
container including arrays corresponding to the Kronecker product of
the arrays in the input containers, computed element-wise
Examples
--------
>>> a = ivy.Container(x=ivy.array([1,2]), y=ivy.array(50))
>>> b = ivy.Container(x=ivy.array([3,4]), y=ivy.array(9))
>>> ivy.Container.static_kron(a, b)
{
a: ivy.array([3, 4, 6, 8])
b: ivy.array([450])
}
"""
return ContainerBase.cont_multi_map_in_function(
"kron",
a,
b,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def kron(
self: ivy.Container,
b: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.kron. This method
simply wraps the function, and so the docstring for ivy.kron also
applies to this method with minimal changes.
Examples
--------
>>> a = ivy.Container(x=ivy.array([1,2]), y=ivy.array([50]))
>>> b = ivy.Container(x=ivy.array([3,4]), y=ivy.array(9))
>>> a.kron(b)
{
a: ivy.array([3, 4, 6, 8])
b: ivy.array([450])
}
"""
return self.static_kron(
self,
b,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_matrix_exp(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"matrix_exp",
x,
out=out,
key_chains=key_chains,
to_apply=to_apply,
)
def matrix_exp(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.diagflat. This method
simply wraps the function, and so the docstring for ivy.diagflat also
applies to this method with minimal changes.
Examples
--------
>>> x = ivy.array([[[1., 0.],
[0., 1.]],
[[2., 0.],
[0., 2.]]])
>>> ivy.matrix_exp(x)
ivy.array([[[2.7183, 1.0000],
[1.0000, 2.7183]],
[[7.3891, 1.0000],
[1.0000, 7.3891]]])
"""
return self.static_matrix_exp(
self,
key_chains=key_chains,
to_apply=to_apply,
out=out,
)
@staticmethod
def static_eig(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.eig. This method simply
wraps the function, and so the docstring for ivy.eig also applies to
this method with minimal changes.
Parameters
----------
x
container with input arrays.
Returns
-------
ret
container including tuple of arrays corresponding to
eigenvealues and eigenvectors of input array
Examples
--------
>>> x = ivy.array([[1,2], [3,4]])
>>> c = ivy.Container({'x':{'xx':x}})
>>> ivy.Container.eig(c)
{
x: {
xx: (tuple(2), <class ivy.array.array.Array>, shape=[2, 2])
}
}
>>> ivy.Container.eig(c)['x']['xx']
(
ivy.array([-0.37228107+0.j, 5.3722816 +0.j]),
ivy.array([
[-0.8245648 +0.j, -0.41597357+0.j],
[0.56576747+0.j, -0.9093767 +0.j]
])
)
"""
return ContainerBase.cont_multi_map_in_function(
"eig",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def eig(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.eig. This method simply
wraps the function, and so the docstring for ivy.eig also applies to
this method with minimal changes.
Parameters
----------
x
container with input arrays.
Returns
-------
ret
container including arrays corresponding
eigenvealues and eigenvectors of input arrays
Examples
--------
>>> x = ivy.array([[1,2], [3,4]])
>>> c = ivy.Container({'x':{'xx':x}})
>>> c.eig()
{
x: {
xx: (tuple(2), <class ivy.array.array.Array>, shape=[2, 2])
}
}
>>>c.eig()['x']['xx']
(
ivy.array([-0.37228107+0.j, 5.3722816 +0.j]),
ivy.array([
[-0.8245648 +0.j, -0.41597357+0.j],
[0.56576747+0.j, -0.9093767 +0.j]
])
)
"""
return self.static_eig(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_eigvals(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.eigvals. This method
simply wraps the function, and so the docstring for ivy.eigvals also
applies to this method with minimal changes.
Parameters
----------
x
container with input arrays.
Returns
-------
ret
container including array corresponding
to eigenvalues of input array
Examples
--------
>>> x = ivy.array([[1,2], [3,4]])
>>> c = ivy.Container({'x':{'xx':x}})
>>> ivy.Container.eigvals(c)
{
x: {
xx: ivy.array([-0.37228132+0.j, 5.37228132+0.j])
}
}
>>> ivy.Container.eigvals(c)['x']['xx']
ivy.array([-0.37228132+0.j, 5.37228132+0.j])
"""
return ContainerBase.cont_multi_map_in_function(
"eigvals",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def eigvals(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.eigvals. This method
simply wraps the function, and so the docstring for ivy.eigvals also
applies to this method with minimal changes.
Parameters
----------
x
container with input arrays.
Returns
-------
ret
container including array corresponding
to eigenvalues of input array
Examples
--------
>>> x = ivy.array([[1,2], [3,4]])
>>> c = ivy.Container({'x':{'xx':x}})
>>> c.eigvals()
{
x: {
xx: ivy.array([-0.37228132+0.j, 5.37228132+0.j])
}
}
>>> c.eigvals()['x']['xx']
ivy.array([-0.37228132+0.j, 5.37228132+0.j])
"""
return self.static_eigvals(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_adjoint(
x: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
):
"""ivy.Container static method variant of ivy.adjoint. This method
simply wraps the function, and so the docstring for ivy.adjoint also
applies to this method with minimal changes.
Parameters
----------
x
container with input arrays of dimensions greater than 1.
out
optional output container, for writing the result to.
Returns
-------
ret
container including arrays corresponding to the conjugate transpose of
the arrays in the input container
Examples
--------
>>> x = np.array([[1.-1.j, 2.+2.j],
[3.+3.j, 4.-4.j]])
>>> y = np.array([[1.-2.j, 3.+4.j],
[1.-0.j, 2.+6.j]])
>>> c = ivy.Container(a=ivy.array(x), b=ivy.array(y))
>>> ivy.Container.static_adjoint(c)
{
a: ivy.array([[1.+1.j, 3.-3.j],
[2.-2.j, 4.+4.j]]),
b: ivy.array([[1.+2.j, 1.-0.j],
[3.-4.j, 2.-6.j]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"adjoint",
x,
out=out,
key_chains=key_chains,
to_apply=to_apply,
)
def adjoint(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
):
"""ivy.Container instance method variant of ivy.adjoint. This method
simply wraps the function, and so the docstring for ivy.adjoint also
applies to this method with minimal changes.
Examples
--------
>>> x = np.array([[1.-1.j, 2.+2.j],
[3.+3.j, 4.-4.j]])
>>> c = ivy.Container(a=ivy.array(x))
>>> c.adjoint()
{
a: ivy.array([[1.+1.j, 3.-3.j],
[2.-2.j, 4.+4.j]])
}
"""
return self.static_adjoint(
self, key_chains=key_chains, to_apply=to_apply, out=out
)
@staticmethod
def static_multi_dot(
x: Sequence[Union[ivy.Container, ivy.Array, ivy.NativeArray]],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.multi_dot. This method
simply wraps the function, and so the docstring for ivy.multi_dot also
applies to this method with minimal changes.
Parameters
----------
x
sequence of matrices to multiply.
out
optional output array, for writing the result to. It must have a valid
shape, i.e. the resulting shape after applying regular matrix multiplication
to the inputs.
Returns
-------
ret
dot product of the arrays.
Examples
--------
With :class:`ivy.Container` input:
>>> a = ivy.Container(x=ivy.arange(2 * 3).reshape((2, 3)),
... y=ivy.arange(2 * 3).reshape((2, 3)))
>>> b = ivy.Container(x=ivy.arange(3 * 2).reshape((3, 2)),
... y=ivy.arange(3 * 2).reshape((3, 2)))
>>> c = ivy.Container(x=ivy.arange(2 * 2).reshape((2, 2)),
... y=ivy.arange(2 * 2).reshape((2, 2)))
>>> ivy.Container.static_multi_dot((a, b, c))
{
x: ivy.array([[26, 49],
[80, 148]]),
y: ivy.array([[26, 49],
[80, 148]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"multi_dot",
x,
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def multi_dot(
self: ivy.Container,
arrays: Sequence[Union[ivy.Container, ivy.Array, ivy.NativeArray]],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.multi_dot. This method
simply wraps the function, and so the docstring for ivy.multi_dot also
applies to this method with minimal changes.
Examples
--------
>>> a = ivy.Container(x=ivy.arange(2 * 3).reshape((2, 3)),
... y=ivy.arange(2 * 3).reshape((2, 3)))
>>> b = ivy.Container(x=ivy.arange(3 * 2).reshape((3, 2)),
... y=ivy.arange(3 * 2).reshape((3, 2)))
>>> c = ivy.Container(x=ivy.arange(2 * 2).reshape((2, 2)),
... y=ivy.arange(2 * 2).reshape((2, 2)))
>>> a.multi_dot((b, c))
{
x: ivy.array([[26, 49],
[80, 148]]),
y: ivy.array([[26, 49],
[80, 148]])
}
"""
return self.static_multi_dot(
(self, *arrays),
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_cond(
x: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
p: Optional[Union[int, float, None, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
):
"""ivy.Container static method variant of ivy.cond. This method simply
wraps the function, and so the docstring for ivy.cond also applies to
this method with minimal changes.
Parameters
----------
x
container with input arrays.
p
order of the norm of the matrix (see ivy.norm).
Returns
-------
ret
container including array corresponding
to condition number of input array
Examples
--------
>>> x = ivy.array([[1,2], [3,4]])
>>> ivy.Container.static_cond(x)
ivy.array(14.933034)
"""
return ContainerBase.cont_multi_map_in_function(
"cond",
x,
p=p,
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def cond(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
p: Optional[Union[int, float, None, ivy.Container]] = None,
):
"""ivy.Container instance method variant of ivy.cond. This method
simply wraps the function, and so the docstring for ivy.cond also
applies to this method with minimal changes.
Parameters
----------
self
container with input arrays.
p
order of the norm of the matrix (see ivy.norm).
Returns
-------
ret
container including array corresponding
to condition number of input array
Examples
--------
>>> x = ivy.array([[1,2], [3,4]])
>>> c = ivy.Container(a=x)
>>> c.cond()
ivy.array(14.933034)
>>> x = ivy.array([[1,2], [3,4]])
>>> c = ivy.Container(a=x)
>>> c.cond(p=1)
ivy.array(21.0)
With :class:`ivy.Container` input:
>>> a = ivy.Container(x=ivy.arange(2 * 3).reshape((2, 3)),
... y=ivy.arange(2 * 3).reshape((2, 3)))
>>> a.cond()
{
x: ivy.array(14.933034),
y: ivy.array(14.933034)
}
"""
return self.static_cond(
self,
p=p,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_mode_dot(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
matrix_or_vector: Union[ivy.Array, ivy.NativeArray, ivy.Container],
mode: Union[int, ivy.Container],
transpose: Optional[Union[bool, ivy.Container]] = False,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.mode_dot. This method
simply wraps the function, and so the docstring for ivy.mode_dot also
applies to this method with minimal changes.
Parameters
----------
x
tensor of shape ``(i_1, ..., i_k, ..., i_N)``
matrix_or_vector
1D or 2D array of shape ``(J, i_k)`` or ``(i_k, )``
matrix or vectors to which to n-mode multiply the tensor
mode
int in the range(1, N)
transpose
If True, the matrix is transposed.
For complex tensors, the conjugate transpose is used.
out
optional output array, for writing the result to.
It must have a shape that the result can broadcast to.
Returns
-------
ivy.Container
`mode`-mode product of `tensor` by `matrix_or_vector`
* of shape :math:`(i_1, ..., i_{k-1}, J, i_{k+1}, ..., i_N)`
if matrix_or_vector is a matrix
* of shape :math:`(i_1, ..., i_{k-1}, i_{k+1}, ..., i_N)`
if matrix_or_vector is a vector
"""
return ContainerBase.cont_multi_map_in_function(
"mode_dot",
x,
matrix_or_vector,
mode,
transpose,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def mode_dot(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
matrix_or_vector: Union[ivy.Array, ivy.NativeArray, ivy.Container],
mode: Union[int, ivy.Container],
transpose: Optional[Union[bool, ivy.Container]] = False,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
):
"""ivy.Container instance method variant of ivy.mode_dot. This method
simply wraps the function, and so the docstring for ivy.mode_dot also
applies to this method with minimal changes.
Parameters
----------
x
tensor of shape ``(i_1, ..., i_k, ..., i_N)``
matrix_or_vector
1D or 2D array of shape ``(J, i_k)`` or ``(i_k, )``
matrix or vectors to which to n-mode multiply the tensor
mode
int in the range(1, N)
transpose
If True, the matrix is transposed.
For complex tensors, the conjugate transpose is used.
out
optional output array, for writing the result to.
It must have a shape that the result can broadcast to.
Returns
-------
ivy.Container
`mode`-mode product of `tensor` by `matrix_or_vector`
* of shape :math:`(i_1, ..., i_{k-1}, J, i_{k+1}, ..., i_N)`
if matrix_or_vector is a matrix
* of shape :math:`(i_1, ..., i_{k-1}, i_{k+1}, ..., i_N)`
if matrix_or_vector is a vector
"""
return self.static_mode_dot(
self,
matrix_or_vector,
mode,
transpose,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_multi_mode_dot(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
mat_or_vec_list: Sequence[Union[ivy.Array, ivy.NativeArray, ivy.Container]],
/,
modes: Optional[Union[Sequence[int], ivy.Container]] = None,
skip: Optional[Union[Sequence[int], ivy.Container]] = None,
transpose: Optional[Union[bool, ivy.Container]] = False,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.multi_mode_dot. This
method simply wraps the function, and so the docstring for
ivy.multi_mode_dot also applies to this method with minimal changes.
Parameters
----------
x
the input tensor
mat_or_vec_list
sequence of matrices or vectors of length ``tensor.ndim``
skip
None or int, optional, default is None
If not None, index of a matrix to skip.
modes
None or int list, optional, default is None
transpose
If True, the matrices or vectors in in the list are transposed.
For complex tensors, the conjugate transpose is used.
out
optional output array, for writing the result to.
It must have a shape that the result can broadcast to.
Returns
-------
ivy.Container
tensor times each matrix or vector in the list at mode `mode`
"""
return ContainerBase.cont_multi_map_in_function(
"multi_mode_dot",
x,
mat_or_vec_list,
skip,
modes,
transpose,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def multi_mode_dot(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
mat_or_vec_list: Sequence[Union[ivy.Array, ivy.NativeArray, ivy.Container]],
/,
modes: Optional[Union[Sequence[int], ivy.Container]] = None,
skip: Optional[Union[Sequence[int], ivy.Container]] = None,
transpose: Optional[Union[bool, ivy.Container]] = False,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.multi_mode_dot. This
method simply wraps the function, and so the docstring for
ivy.multi_mode_dot also applies to this method with minimal changes.
Parameters
----------
self
the input tensor
mat_or_vec_list
sequence of matrices or vectors of length ``tensor.ndim``
modes
None or int list, optional, default is None
skip
None or int, optional, default is None
If not None, index of a matrix to skip.
transpose
If True, the matrices or vectors in in the list are transposed.
For complex tensors, the conjugate transpose is used.
out
optional output array, for writing the result to.
It must have a shape that the result can broadcast to.
Returns
-------
ivy.Container
tensor times each matrix or vector in the list at mode `mode`
"""
return self.static_multi_mode_dot(
self,
mat_or_vec_list,
skip,
modes,
transpose,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_svd_flip(
U: Union[ivy.Array, ivy.NativeArray, ivy.Container],
V: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
u_based_decision: Optional[Union[bool, ivy.Container]] = True,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, ivy.Container]:
"""ivy.Container static method variant of ivy.svd_flip. This method
simply wraps the function, and so the docstring for ivy.svd_flip also
applies to this method with minimal changes.
Parameters
----------
U
left singular matrix output of SVD
V
right singular matrix output of SVD
u_based_decision
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : container with the same dimensions as the input.
"""
return ContainerBase.cont_multi_map_in_function(
"svd_flip",
U,
V,
u_based_decision,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def svd_flip(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
V: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
u_based_decision: Optional[Union[bool, ivy.Container]] = True,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, ivy.Container]:
"""ivy.Container instance method variant of ivy.svd_flip. This method
simply wraps the function, and so the docstring for ivy.svd_flip
applies to this method with minimal changes.
Parameters
----------
self
left singular matrix output of SVD
V
right singular matrix output of SVD
u_based_decision
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : container with the same dimensions as the input.
"""
return self.static_svd_flip(
self,
V,
u_based_decision,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_make_svd_non_negative(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
U: Union[ivy.Array, ivy.NativeArray, ivy.Container],
S: Union[ivy.Array, ivy.NativeArray, ivy.Container],
V: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
nntype: Optional[Union[Literal["nndsvd", "nndsvda"], ivy.Container]] = "nndsvd",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, ivy.Container]:
"""ivy.Container static method variant of ivy.make_svd_non_negative.
This method simply wraps the function, and so the docstring for
ivy.make_svd_non_negative also applies to this method with minimal
changes.
Parameters
----------
x
tensor being decomposed.
U
left singular matrix from SVD.
S
diagonal matrix from SVD.
V
right singular matrix from SVD.
nntype
whether to fill small values with 0.0 (nndsvd),
or the tensor mean (nndsvda, default).
[1]: Boutsidis & Gallopoulos. Pattern Recognition, 41(4): 1350-1362, 2008.
"""
return ContainerBase.cont_multi_map_in_function(
"make_svd_non_negative",
x,
U,
S,
V,
nntype=nntype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def make_svd_non_negative(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
U: Union[ivy.Array, ivy.NativeArray, ivy.Container],
S: Union[ivy.Array, ivy.NativeArray, ivy.Container],
V: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
nntype: Optional[Union[Literal["nndsvd", "nndsvda"], ivy.Container]] = "nndsvd",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, ivy.Container]:
"""ivy.Container instance method variant of ivy.make_svd_non_negative.
This method simply wraps the function, and so the docstring for
ivy.make_svd_non_negative applies to this method with minimal changes.
Parameters
----------
self
tensor being decomposed.
U
left singular matrix from SVD.
S
diagonal matrix from SVD.
V
right singular matrix from SVD.
nntype
whether to fill small values with 0.0 (nndsvd),
or the tensor mean (nndsvda, default).
[1]: Boutsidis & Gallopoulos. Pattern Recognition, 41(4): 1350-1362, 2008.
"""
return self.static_make_svd_non_negative(
self,
U,
S,
V,
nntype=nntype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_tensor_train(
input_tensor: Union[ivy.Array, ivy.NativeArray, ivy.Container],
rank: Union[Sequence[int], ivy.Container],
/,
*,
svd: Optional[Union[Literal["truncated_svd"], ivy.Container]] = "truncated_svd",
verbose: Optional[Union[bool, ivy.Container]] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, Sequence[ivy.Container]]:
"""ivy.Container static method variant of ivy.tensor_train. This method
simply wraps the function, and so the docstring for ivy.tensor_train
also applies to this method with minimal changes.
Parameters
----------
input_tensor
tensor to be decomposed.
rank
maximum allowable TT-ranks of the decomposition.
svd
SVD method to use.
verbose
level of verbosity.
"""
return ContainerBase.cont_multi_map_in_function(
"tensor_train",
input_tensor,
rank,
svd=svd,
verbose=verbose,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def tensor_train(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
rank: Union[Sequence[int], ivy.Container],
/,
*,
svd: Optional[Union[Literal["truncated_svd"], ivy.Container]] = "truncated_svd",
verbose: Optional[Union[bool, ivy.Container]] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, Sequence[ivy.Container]]:
"""ivy.Container instance method variant of ivy.tensor_train. This
method simply wraps the function, and so the docstring for
ivy.tensor_train also applies to this method with minimal changes.
Parameters
----------
input_tensor
tensor to be decomposed.
rank
maximum allowable TT-ranks of the decomposition.
svd
SVD method to use.
verbose
level of verbosity.
"""
return self.static_tensor_train(
self,
rank,
svd=svd,
verbose=verbose,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_truncated_svd(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
compute_uv: Union[bool, ivy.Container] = True,
n_eigenvecs: Optional[Union[int, ivy.Container]] = None,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Union[ivy.Container, Tuple[ivy.Container, ivy.Container, ivy.Container]]:
"""ivy.Container static method variant of ivy.truncated_svd. This
method simply wraps the function, and so the docstring for
ivy.truncated_svd also applies to this method with minimal changes.
Parameters
----------
x
Container of 2D-arrays
compute_uv
If ``True`` then left and right singular vectors
will be computed and returned in ``U`` and ``Vh``,
respectively. Otherwise, only the singular values
will be computed, which can be significantly faster.
n_eigenvecs
if specified, number of eigen[vectors-values] to return
else full matrices will be returned
Returns
-------
ret
a namedtuple ``(U, S, Vh)``
Each returned container must have the same
floating-point data type as ``x``.
"""
return ContainerBase.cont_multi_map_in_function(
"truncated_svd",
x,
compute_uv,
n_eigenvecs,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def truncated_svd(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
compute_uv: Union[bool, ivy.Container] = True,
n_eigenvecs: Optional[Union[int, ivy.Container]] = None,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Union[ivy.Container, Tuple[ivy.Container, ivy.Container, ivy.Container]]:
"""ivy.Container instance method variant of ivy.truncated_svd. This
method simply wraps the function, and so the docstring for
ivy.truncated_svd also applies to this method with minimal changes.
Parameters
----------
x
Container of 2D-arrays
compute_uv
If ``True`` then left and right singular vectors
will be computed and returned in ``U`` and ``Vh``
respectively. Otherwise, only the singular values will
be computed, which can be significantly faster.
n_eigenvecs
if specified, number of eigen[vectors-values] to return
else full matrices will be returned
Returns
-------
ret
a namedtuple ``(U, S, Vh)``
Each returned container must have the
same floating-point data type as ``x``.
"""
return self.static_truncated_svd(
self,
compute_uv,
n_eigenvecs,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_initialize_tucker(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
rank: Union[Sequence[int], ivy.Container],
modes: Union[Sequence[int], ivy.Container],
/,
*,
init: Optional[
Union[Literal["svd", "random"], ivy.TuckerTensor, ivy.Container]
] = "svd",
seed: Optional[Union[int, ivy.Container]] = None,
svd: Optional[Union[Literal["truncated_svd"], ivy.Container]] = "truncated_svd",
non_negative: Optional[Union[bool, ivy.Container]] = False,
mask: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
svd_mask_repeats: Optional[Union[int, ivy.Container]] = 5,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, Sequence[ivy.Container]]:
"""ivy.Container static method variant of ivy.initialize_tucker. This
method simply wraps the function, and so the docstring for
ivy.initialize_tucker also applies to this method with minimal changes.
Parameters
----------
x
input tensor
rank
number of components
modes
modes to consider in the input tensor
seed
Used to create a random seed distribution
when init == 'random'
init
initialization scheme for tucker decomposition.
svd
function to use to compute the SVD
non_negative
if True, non-negative factors are returned
mask
array of booleans with the same shape as ``tensor`` should be 0 where
the values are missing and 1 everywhere else. Note: if tensor is
sparse, then mask should also be sparse with a fill value of 1 (or
True).
svd_mask_repeats
number of iterations for imputing the values in the SVD matrix when
mask is not None
Returns
-------
core
initialized core tensor
factors
list of factors
"""
return ContainerBase.cont_multi_map_in_function(
"initialize_tucker",
x,
rank,
modes,
seed=seed,
init=init,
svd=svd,
non_negative=non_negative,
mask=mask,
svd_mask_repeats=svd_mask_repeats,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def initialize_tucker(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
rank: Union[Sequence[int], ivy.Container],
modes: Union[Sequence[int], ivy.Container],
/,
*,
init: Optional[
Union[Literal["svd", "random"], ivy.TuckerTensor, ivy.Container]
] = "svd",
seed: Optional[Union[int, ivy.Container]] = None,
svd: Optional[Union[Literal["truncated_svd"], ivy.Container]] = "truncated_svd",
non_negative: Optional[Union[bool, ivy.Container]] = False,
mask: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
svd_mask_repeats: Optional[Union[int, ivy.Container]] = 5,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, Sequence[ivy.Container]]:
"""ivy.Container instance method variant of ivy.initialize_tucker. This
method simply wraps the function, and so the docstring for
ivy.initialize_tucker also applies to this method with minimal changes.
Parameters
----------
x
input tensor
rank
number of components
modes
modes to consider in the input tensor
seed
Used to create a random seed distribution
when init == 'random'
init
initialization scheme for tucker decomposition.
svd
function to use to compute the SVD
non_negative
if True, non-negative factors are returned
mask
array of booleans with the same shape as ``tensor`` should be 0 where
the values are missing and 1 everywhere else. Note: if tensor is
sparse, then mask should also be sparse with a fill value of 1 (or
True).
svd_mask_repeats
number of iterations for imputing the values in the SVD matrix when
mask is not None
Returns
-------
core
initialized core tensor
factors
list of factors
"""
return self.static_initialize_tucker(
self,
rank,
modes,
seed=seed,
init=init,
svd=svd,
non_negative=non_negative,
mask=mask,
svd_mask_repeats=svd_mask_repeats,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_partial_tucker(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
rank: Union[Sequence[int], ivy.Container],
modes: Union[Sequence[int], ivy.Container],
/,
*,
n_iter_max: Optional[Union[int, ivy.Container]] = 100,
init: Optional[
Union[Literal["svd", "random"], ivy.TuckerTensor, ivy.Container]
] = "svd",
svd: Optional[Union[Literal["truncated_svd"], ivy.Container]] = "truncated_svd",
seed: Optional[Union[int, ivy.Container]] = None,
mask: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
svd_mask_repeats: Optional[Union[int, ivy.Container]] = 5,
tol: Optional[Union[float, ivy.Container]] = 10e-5,
verbose: Optional[Union[bool, ivy.Container]] = False,
return_errors: Optional[Union[bool, ivy.Container]] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, Sequence[ivy.Container]]:
"""ivy.Container static method variant of ivy.partial_tucker. This
method simply wraps the function, and so the docstring for
ivy.partial_tucker also applies to this method with minimal changes.
Parameters
----------
x
input tensor
rank
number of components
modes
modes to consider in the input tensor
seed
Used to create a random seed distribution
when init == 'random'
init
initialization scheme for tucker decomposition.
svd
function to use to compute the SVD
mask
array of booleans with the same shape as ``tensor`` should be 0 where
the values are missing and 1 everywhere else. Note: if tensor is
sparse, then mask should also be sparse with a fill value of 1 (or
True).
svd_mask_repeats
number of iterations for imputing the values in the SVD matrix when
mask is not None
Returns
-------
core
initialized core tensor
factors
list of factors
"""
return ContainerBase.cont_multi_map_in_function(
"partial_tucker",
x,
rank,
modes,
seed=seed,
init=init,
svd=svd,
n_iter_max=n_iter_max,
mask=mask,
svd_mask_repeats=svd_mask_repeats,
tol=tol,
verbose=verbose,
return_errors=return_errors,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def partial_tucker(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
rank: Union[Sequence[int], ivy.Container],
modes: Union[Sequence[int], ivy.Container],
/,
*,
n_iter_max: Optional[Union[int, ivy.Container]] = 100,
init: Optional[
Union[Literal["svd", "random"], ivy.TuckerTensor, ivy.Container]
] = "svd",
svd: Optional[Union[Literal["truncated_svd"], ivy.Container]] = "truncated_svd",
seed: Optional[Union[int, ivy.Container]] = None,
mask: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
svd_mask_repeats: Optional[Union[int, ivy.Container]] = 5,
tol: Optional[Union[float, ivy.Container]] = 10e-5,
verbose: Optional[Union[bool, ivy.Container]] = False,
return_errors: Optional[Union[bool, ivy.Container]] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, Sequence[ivy.Container]]:
"""ivy.Container static method variant of ivy.partial_tucker. This
method simply wraps the function, and so the docstring for
ivy.partial_tucker also applies to this method with minimal changes.
Parameters
----------
self
input tensor
rank
number of components
modes
modes to consider in the input tensor
seed
Used to create a random seed distribution
when init == 'random'
init
initialization scheme for tucker decomposition.
svd
function to use to compute the SVD
mask
array of booleans with the same shape as ``tensor`` should be 0 where
the values are missing and 1 everywhere else. Note: if tensor is
sparse, then mask should also be sparse with a fill value of 1 (or
True).
svd_mask_repeats
number of iterations for imputing the values in the SVD matrix when
mask is not None
Returns
-------
core
initialized core tensor
factors
list of factors
"""
return self.static_partial_tucker(
self,
rank,
modes,
n_iter_max=n_iter_max,
init=init,
svd=svd,
seed=seed,
mask=mask,
svd_mask_repeats=svd_mask_repeats,
tol=tol,
verbose=verbose,
return_errors=return_errors,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_tucker(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
rank: Union[Sequence[int], ivy.Container],
/,
*,
fixed_factors: Optional[Union[Sequence[int], ivy.Container]] = None,
n_iter_max: Optional[Union[int, ivy.Container]] = 100,
init: Optional[
Union[Literal["svd", "random"], ivy.TuckerTensor, ivy.Container]
] = "svd",
svd: Optional[Union[Literal["truncated_svd"], ivy.Container]] = "truncated_svd",
seed: Optional[Union[int, ivy.Container]] = None,
mask: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
svd_mask_repeats: Optional[Union[int, ivy.Container]] = 5,
tol: Optional[Union[float, ivy.Container]] = 10e-5,
verbose: Optional[Union[bool, ivy.Container]] = False,
return_errors: Optional[Union[bool, ivy.Container]] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, Sequence[ivy.Container]]:
"""ivy.Container static method variant of ivy.tucker. This method
simply wraps the function, and so the docstring for ivy.tucker also
applies to this method with minimal changes.
Parameters
----------
x
input tensor
rank
size of the core tensor, ``(len(ranks) == tensor.ndim)``
if int, the same rank is used for all modes
fixed_factors
if not None, list of modes for which to keep the factors fixed.
Only valid if a Tucker tensor is provided as init.
n_iter_max
maximum number of iteration
init
{'svd', 'random'}, or TuckerTensor optional
if a TuckerTensor is provided, this is used for initialization
svd
str, default is 'truncated_svd'
function to use to compute the SVD,
seed
Used to create a random seed distribution
when init == 'random'
mask
array of booleans with the same shape as ``tensor`` should be 0 where
the values are missing and 1 everywhere else. Note: if tensor is
sparse, then mask should also be sparse with a fill value of 1 (or
True).
svd_mask_repeats
number of iterations for imputing the values in the SVD matrix when
mask is not None
tol
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
verbose
if True, different in reconstruction errors are returned at each
iteration.
return_errors
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
Returns
-------
Container of ivy.TuckerTensors or ivy.TuckerTensors and
container of reconstruction errors if return_errors is True.
References
----------
.. [1] tl.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
"""
return ContainerBase.cont_multi_map_in_function(
"tucker",
x,
rank,
fixed_factors=fixed_factors,
seed=seed,
init=init,
svd=svd,
n_iter_max=n_iter_max,
mask=mask,
svd_mask_repeats=svd_mask_repeats,
tol=tol,
verbose=verbose,
return_errors=return_errors,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def tucker(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
rank: Union[Sequence[int], ivy.Container],
/,
*,
fixed_factors: Optional[Union[Sequence[int], ivy.Container]] = None,
n_iter_max: Optional[Union[int, ivy.Container]] = 100,
init: Optional[
Union[Literal["svd", "random"], ivy.TuckerTensor, ivy.Container]
] = "svd",
svd: Optional[Union[Literal["truncated_svd"], ivy.Container]] = "truncated_svd",
seed: Optional[Union[int, ivy.Container]] = None,
mask: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
svd_mask_repeats: Optional[Union[int, ivy.Container]] = 5,
tol: Optional[Union[float, ivy.Container]] = 10e-5,
verbose: Optional[Union[bool, ivy.Container]] = False,
return_errors: Optional[Union[bool, ivy.Container]] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, Sequence[ivy.Container]]:
"""ivy.Container static method variant of ivy.tucker. This method
simply wraps the function, and so the docstring for ivy.tucker also
applies to this method with minimal changes.
Parameters
----------
x
input tensor
rank
size of the core tensor, ``(len(ranks) == tensor.ndim)``
if int, the same rank is used for all modes
fixed_factors
if not None, list of modes for which to keep the factors fixed.
Only valid if a Tucker tensor is provided as init.
n_iter_max
maximum number of iteration
init
{'svd', 'random'}, or TuckerTensor optional
if a TuckerTensor is provided, this is used for initialization
svd
str, default is 'truncated_svd'
function to use to compute the SVD,
seed
Used to create a random seed distribution
when init == 'random'
mask
array of booleans with the same shape as ``tensor`` should be 0 where
the values are missing and 1 everywhere else. Note: if tensor is
sparse, then mask should also be sparse with a fill value of 1 (or
True).
svd_mask_repeats
number of iterations for imputing the values in the SVD matrix when
mask is not None
tol
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
verbose
if True, different in reconstruction errors are returned at each
iteration.
return_errors
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
Returns
-------
Container of ivy.TuckerTensors or ivy.TuckerTensors and
container of reconstruction errors if return_errors is True.
References
----------
.. [1] tl.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
"""
return self.static_tucker(
self,
rank,
fixed_factors=fixed_factors,
n_iter_max=n_iter_max,
init=init,
svd=svd,
seed=seed,
mask=mask,
svd_mask_repeats=svd_mask_repeats,
tol=tol,
verbose=verbose,
return_errors=return_errors,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_dot(
a: Union[ivy.Array, ivy.NativeArray, ivy.Container],
b: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Union[ivy.Array, ivy.Container]:
"""Compute the dot product between two arrays `a` and `b` using the
current backend's implementation. The dot product is defined as the sum
of the element- wise product of the input arrays.
Parameters
----------
a
First input array.
b
Second input array.
out
Optional output array. If provided, the output array to store the result.
Returns
-------
ret
The dot product of the input arrays.
Examples
--------
With :class:`ivy.Array` inputs:
>>> a = ivy.array([1, 2, 3])
>>> b = ivy.array([4, 5, 6])
>>> result = ivy.dot(a, b)
>>> print(result)
ivy.array(32)
>>> a = ivy.array([[1, 2], [3, 4]])
>>> b = ivy.array([[5, 6], [7, 8]])
>>> c = ivy.empty_like(a)
>>> ivy.dot(a, b, out=c)
>>> print(c)
ivy.array([[19, 22],
[43, 50]])
>>> a = ivy.array([[1.1, 2.3, -3.6]])
>>> b = ivy.array([[-4.8], [5.2], [6.1]])
>>> c = ivy.zeros((1, 1))
>>> ivy.dot(a, b, out=c)
>>> print(c)
ivy.array([[-15.28]])
"""
return ContainerBase.cont_multi_map_in_function(
"dot",
a,
b,
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def dot(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
b: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Union[ivy.Array, ivy.Container]:
"""Compute the dot product between two arrays `a` and `b` using the
current backend's implementation. The dot product is defined as the sum
of the element- wise product of the input arrays.
Parameters
----------
self
First input array.
b
Second input array.
out
Optional output array. If provided, the output array to store the result.
Returns
-------
ret
The dot product of the input arrays.
Examples
--------
With :class:`ivy.Array` inputs:
>>> a = ivy.array([1, 2, 3])
>>> b = ivy.array([4, 5, 6])
>>> result = ivy.dot(a, b)
>>> print(result)
ivy.array(32)
>>> a = ivy.array([[1, 2], [3, 4]])
>>> b = ivy.array([[5, 6], [7, 8]])
>>> c = ivy.empty_like(a)
>>> ivy.dot(a, b, out=c)
>>> print(c)
ivy.array([[19, 22],
[43, 50]])
>>> a = ivy.array([[1.1, 2.3, -3.6]])
>>> b = ivy.array([[-4.8], [5.2], [6.1]])
>>> c = ivy.zeros((1, 1))
>>> ivy.dot(a, b, out=c)
>>> print(c)
ivy.array([[-15.28]])
"""
return self.static_dot(
self,
b,
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_tt_matrix_to_tensor(
tt_matrix: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.tt_matrix_to_tensor. This
method simply wraps the function, and so the docstring for
ivy.tt_matrix_to_tensor also applies to this method with minimal
changes.
Parameters
----------
tt_matrix
array of 4D-arrays
TT-Matrix factors (known as core) of shape
(rank_k, left_dim_k, right_dim_k, rank_{k+1})
out
optional output container, for writing the result to.
Returns
-------
output_tensor
tensor whose TT-Matrix decomposition was given by 'factors'
Examples
--------
>>> x = ivy.Container(a=ivy.array([[[[[0.49671414],
... [-0.1382643]],
...
... [[0.64768857],
... [1.5230298]]]],
... [[[[-0.23415337],
... [-0.23413695]],
...
... [[1.57921278],
... [0.76743472]]]]])))
>>> y = ivy.Container.static_tt_matrix_to_tensor(x)
>>> print(y["a"])
ivy.array([[[[-0.1163073 , -0.11629914],
[ 0.03237505, 0.03237278]],
[[ 0.78441733, 0.38119566],
[-0.21834874, -0.10610882]]],
[[[-0.15165846, -0.15164782],
[-0.35662258, -0.35659757]],
[[ 1.02283812, 0.49705869],
[ 2.40518808, 1.16882598]]]])
"""
return ContainerBase.cont_multi_map_in_function(
"tt_matrix_to_tensor",
tt_matrix,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def tt_matrix_to_tensor(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.tt_matrix_to_tensor.
This method simply wraps the function, and so the docstring for
ivy.tt_matrix_to_tensor also applies to this method with minimal
changes.
Parameters
----------
tt_matrix
array of 4D-arrays
TT-Matrix factors (known as core) of shape
(rank_k, left_dim_k, right_dim_k, rank_{k+1})
out
optional output container, for writing the result to.
Returns
-------
output_tensor
tensor whose TT-Matrix decomposition was given by 'factors'
Examples
--------
>>> x = ivy.Container(a=ivy.array([[[[[0.49671414],
... [-0.1382643]],
...
... [[0.64768857],
... [1.5230298]]]],
... [[[[-0.23415337],
... [-0.23413695]],
...
... [[1.57921278],
... [0.76743472]]]]])))
>>> y = ivy.Container.tt_matrix_to_tensor(x)
>>> print(y["a"])
ivy.array([[[[-0.1163073 , -0.11629914],
[ 0.03237505, 0.03237278]],
[[ 0.78441733, 0.38119566],
[-0.21834874, -0.10610882]]],
[[[-0.15165846, -0.15164782],
[-0.35662258, -0.35659757]],
[[ 1.02283812, 0.49705869],
[ 2.40518808, 1.16882598]]]])
"""
return self.static_tt_matrix_to_tensor(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_higher_order_moment(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
order: Union[Sequence[int], ivy.Container],
/,
*,
out: Optional[ivy.Array] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.higher_order_moment. This
method simply wraps the function, and so the docstring for
ivy.higher_order_moment also applies to this method with minimal
changes.
Parameters
----------
x
matrix of size (n_samples, n_features)
or tensor of size(n_samples, D1, ..., DN)
order
number of the higher-order moment to compute
Returns
-------
tensor
if tensor is a matrix of size (n_samples, n_features),
tensor of size (n_features, )*order
"""
return ContainerBase.cont_multi_map_in_function(
"higher_order_moment",
x,
order,
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def higher_order_moment(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
order: Union[Sequence[int], ivy.Container],
/,
*,
out: Optional[ivy.Array] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.higher_order_moment.
This method simply wraps the function, and so the docstring for
ivy.higher_order_moment also applies to this method with minimal
changes.
Parameters
----------
x
matrix of size (n_samples, n_features)
or tensor of size(n_samples, D1, ..., DN)
order
number of the higher-order moment to compute
Returns
-------
tensor
if tensor is a matrix of size (n_samples, n_features),
tensor of size (n_features, )*order
Examples
--------
>>> a = ivy.array([[1, 2], [3, 4]])
>>> result = ivy.higher_order_moment(a, 3)
>>> print(result)
ivy.array([[
[14, 19],
[19, 26]],
[[19, 26],
[26, 36]
]])
"""
return self.static_higher_order_moment(
self,
order,
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_batched_outer(
tensors: Sequence[Union[ivy.Array, ivy.NativeArray, ivy.Container]],
/,
*,
out: Optional[ivy.Array] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.batched_outer. This
method simply wraps the function, and so the docstring for
ivy.batched_outer also applies to this method with minimal changes.
Parameters
----------
tensors
list of tensors of shape (n_samples, J1, ..., JN) ,
(n_samples, K1, ..., KM) ...
Returns
-------
outer product of tensors
of shape (n_samples, J1, ..., JN, K1, ..., KM, ...)
Examples
--------
>>> a = ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
>>> b = ivy.array([[[.1, .2], [.3, .4]], [[.5, .6], [.7, .8]]])
>>> result = ivy.batched_outer(a, b)
>>> print(result)
ivy.array([[[[[0.1, 0.2],
[0.30000001, 0.40000001]],
[[0.2 , 0.40000001],
[0.60000002, 0.80000001]]],
[[[0.3 , 0.60000001],
[0.90000004, 1.20000002]],
[[0.40000001, 0.80000001],
[1.20000005, 1.60000002]]]],
[[[[2.5 , 3.00000012],
[3.49999994, 4.00000006]],
[[3. , 3.60000014],
[4.19999993, 4.80000007]]],
[[[3.5 , 4.20000017],
[4.89999992, 5.60000008]],
[[4. , 4.80000019],
[5.5999999 , 6.4000001 ]]]]])
"""
return ContainerBase.cont_multi_map_in_function(
"batched_outer",
tensors,
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def batched_outer(
self: ivy.Container,
tensors: Sequence[Union[ivy.Container, ivy.Array, ivy.NativeArray]],
/,
*,
out: Optional[ivy.Array] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.batched_outer. This
method simply wraps the function, and so the docstring for
ivy.batched_outer also applies to this method with minimal changes.
Parameters
----------
tensors
list of tensors of shape (n_samples, J1, ..., JN) ,
(n_samples, K1, ..., KM) ...
Returns
-------
outer product of tensors
of shape (n_samples, J1, ..., JN, K1, ..., KM, ...)
Examples
--------
>>> a = ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
>>> b = ivy.array([[[.1, .2], [.3, .4]], [[.5, .6], [.7, .8]]])
>>> result = ivy.batched_outer(a, b)
>>> print(result)
ivy.array([[[[[0.1, 0.2],
[0.30000001, 0.40000001]],
[[0.2 , 0.40000001],
[0.60000002, 0.80000001]]],
[[[0.3 , 0.60000001],
[0.90000004, 1.20000002]],
[[0.40000001, 0.80000001],
[1.20000005, 1.60000002]]]],
[[[[2.5 , 3.00000012],
[3.49999994, 4.00000006]],
[[3. , 3.60000014],
[4.19999993, 4.80000007]]],
[[[3.5 , 4.20000017],
[4.89999992, 5.60000008]],
[[4. , 4.80000019],
[5.5999999 , 6.4000001 ]]]]])
"""
return self.static_batched_outer(
(self, *tensors),
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
| ivy/ivy/data_classes/container/experimental/linear_algebra.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/linear_algebra.py",
"repo_id": "ivy",
"token_count": 43299
} | 12 |
# For Review
# global
from typing import (
Optional,
Union,
List,
Tuple,
Dict,
Iterable,
Sequence,
)
from numbers import Number
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithManipulation(ContainerBase):
@staticmethod
def _static_concat(
xs: Union[
Tuple[Union[ivy.Array, ivy.NativeArray, ivy.Container], ...],
List[Union[ivy.Array, ivy.NativeArray, ivy.Container]],
],
/,
*,
axis: Union[int, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.concat.
This method simply wraps the function, and so the docstring for
ivy.concat also applies to this method with minimal changes.
"""
return ContainerBase.cont_multi_map_in_function(
"concat",
xs,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def concat(
self: ivy.Container,
/,
xs: Union[
Tuple[Union[ivy.Array, ivy.NativeArray, ivy.Container], ...],
List[Union[ivy.Array, ivy.NativeArray, ivy.Container]],
],
*,
axis: Union[int, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.concat.
This method simply wraps the function, and so the docstring for
ivy.concat also applies to this method with minimal changes.
"""
new_xs = xs.cont_copy() if ivy.is_ivy_container(xs) else xs.copy()
new_xs.insert(0, self.cont_copy())
return self._static_concat(
new_xs,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_expand_dims(
x: ivy.Container,
/,
*,
copy: Optional[Union[bool, ivy.Container]] = None,
axis: Union[int, Sequence[int], ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.expand_dims. This method
simply wraps the function, and so the docstring for ivy.expand_dims
also applies to this method with minimal changes.
Parameters
----------
x
input container.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
axis
position where a new axis (dimension) of size one will be added. If an
element of the container has the rank of ``N``, then the ``axis`` needs
to be between ``[-N-1, N]``. Default: ``0``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
A container with the elements of ``x``, but with the dimensions of
its elements added by one in a given ``axis``.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1.]),
... b=ivy.array([3., 4.]),
... c=ivy.array([6., 7.]))
>>> y = ivy.Container.static_expand_dims(x, axis=1)
>>> print(y)
{
a: ivy.array([[0.],
[1.]]),
b: ivy.array([[3.],
[4.]]),
c: ivy.array([[6.],
[7.]])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]),
... c=ivy.array([6., 7., 8.]))
>>> container_axis = ivy.Container(a=0, b=-1, c=(0,1))
>>> y = ivy.Container.static_expand_dims(x, axis=container_axis)
>>> print(y)
{
a: ivy.array([[0., 1., 2.]]),
b: ivy.array([[3.],
[4.],
[5.]]),
c: ivy.array([[[6., 7., 8.]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"expand_dims",
x,
copy=copy,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def expand_dims(
self: ivy.Container,
/,
*,
copy: Optional[Union[bool, ivy.Container]] = None,
axis: Union[int, Sequence[int], ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.expand_dims. This
method simply wraps the function, and so the docstring for
ivy.expand_dims also applies to this method with minimal changes.
Parameters
----------
self
input container.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
axis
position where a new axis (dimension) of size one will be added. If an
element of the container has the rank of ``N``, the ``axis`` needs to
be between ``[-N-1, N]``. Default: ``0``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
A container with the elements of ``self``, but with the dimensions of
its elements added by one in a given ``axis``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[0., 1.],
... [2., 3.]]),
... b=ivy.array([[4., 5.],
... [6., 7.]]))
>>> y = x.expand_dims(axis=1)
>>> print(y)
{
a: ivy.array([[[0., 1.]],
[[2., 3.]]]),
b: ivy.array([[[4., 5.]],
[[6., 7.]]])
}
"""
return self._static_expand_dims(
self,
copy=copy,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_split(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[Union[bool, ivy.Container]] = None,
num_or_size_splits: Optional[
Union[int, Sequence[int], ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
axis: Union[int, ivy.Container] = 0,
with_remainder: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> List[ivy.Container]:
"""ivy.Container static method variant of ivy.split. This method simply
wraps the function, and so the docstring for ivy.split also applies to
this method with minimal changes.
Parameters
----------
x
array to be divided into sub-arrays.
num_or_size_splits
Number of equal arrays to divide the array into along the given axis if an
integer. The size of each split element if a sequence of integers
or 1-D array. Default is to divide into as many 1-dimensional arrays
as the axis dimension.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
axis
The axis along which to split, default is ``0``.
with_remainder
If the tensor does not split evenly, then store the last remainder entry.
Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains will
be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied. Default
is False.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
list of containers of sub-arrays.
Examples
--------
>>> x = ivy.Container(a=ivy.array([2, 1, 5, 9]), b=ivy.array([3, 7, 2, 11]))
>>> y = ivy.Container.static_split(x, num_or_size_splits=2)
>>> print(y)
[{
a: ivy.array([2, 1]),
b: ivy.array([3, 7])
}, {
a: ivy.array([5, 9]),
b: ivy.array([2, 11])
}]
"""
return ContainerBase.cont_multi_map_in_function(
"split",
x,
copy=copy,
num_or_size_splits=num_or_size_splits,
axis=axis,
with_remainder=with_remainder,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def split(
self: ivy.Container,
/,
*,
copy: Optional[Union[bool, ivy.Container]] = None,
num_or_size_splits: Optional[
Union[int, Sequence[int], ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
axis: Union[int, ivy.Container] = 0,
with_remainder: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> List[ivy.Container]:
"""ivy.Container instance method variant of ivy.split. This method
simply wraps the function, and so the docstring for ivy.split also
applies to this method with minimal changes.
Parameters
----------
self
array to be divided into sub-arrays.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
num_or_size_splits
Number of equal arrays to divide the array into along the given axis if an
integer. The size of each split element if a sequence of integers
or 1-D array. Default is to divide into as many 1-dimensional arrays
as the axis dimension.
axis
The axis along which to split, default is ``0``.
with_remainder
If the tensor does not split evenly, then store the last remainder entry.
Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains will
be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied. Default
is False.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
list of containers of sub-arrays.
Examples
--------
>>> x = ivy.Container(a=ivy.array([2, 1, 5, 9]), b=ivy.array([3, 7, 2, 11]))
>>> y = x.split(num_or_size_splits=2)
>>> print(y)
[{
a: ivy.array([2, 1]),
b: ivy.array([3, 7])
}, {
a: ivy.array([5, 9]),
b: ivy.array([2, 11])
}]
"""
return self._static_split(
self,
copy=copy,
num_or_size_splits=num_or_size_splits,
axis=axis,
with_remainder=with_remainder,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def _static_permute_dims(
x: ivy.Container,
/,
axes: Union[Tuple[int, ...], ivy.Container],
*,
copy: Optional[Union[bool, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.permute_dims. This method
simply wraps the function, and so the docstring for ivy.permute_dims
also applies to this method with minimal changes.
Parameters
----------
x
input container.
axes
tuple containing a permutation of (0, 1, ..., N-1) where N is the number
of axes (dimensions) of x.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
A container with the elements of ``self`` permuted along the given axes.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[0., 1., 2.]]), b=ivy.array([[3., 4., 5.]]))
>>> y = ivy.Container.static_permute_dims(x, axes=(1, 0))
>>> print(y)
{
a:ivy.array([[0.],[1.],[2.]]),
b:ivy.array([[3.],[4.],[5.]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"permute_dims",
x,
axes,
copy=copy,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def permute_dims(
self: ivy.Container,
/,
axes: Union[Tuple[int, ...], ivy.Container],
*,
copy: Optional[Union[bool, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.permute_dims. This
method simply wraps the function, and so the docstring for
ivy.permute_dims also applies to this method with minimal changes.
Parameters
----------
self
input container.
axes
tuple containing a permutation of (0, 1, ..., N-1) where N is the number
of axes (dimensions) of x.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
A container with the elements of ``self`` permuted along the given axes.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[0., 1., 2.]]), b=ivy.array([[3., 4., 5.]]))
>>> y = x.permute_dims(axes=(1, 0))
>>> print(y)
{
a:ivy.array([[0.],[1.],[2.]]),
b:ivy.array([[3.],[4.],[5.]])
}
"""
return self._static_permute_dims(
self,
axes,
copy=copy,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_flip(
x: ivy.Container,
/,
*,
copy: Optional[Union[bool, ivy.Container]] = None,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.flip. This method simply
wraps the function, and so the docstring for ivy.flip also applies to
this method with minimal changes.
Parameters
----------
x
input container.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
axis
axis (or axes) along which to flip. If axis is None,
all input array axes are flipped. If axis is negative,
axis is counted from the last dimension. If provided more
than one axis, only the specified axes. Default: None.
key_chains
The key-chains to apply or not apply the method to. Default is None.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is True.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is False.
map_sequences
Whether to also map method to sequences (lists, tuples). Default is False.
out
optional output container, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
an output container having the same data type and
shape as ``x`` and whose elements, relative to ``x``, are reordered.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1, 0, 1]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container.static_flip(x)
>>> print(y)
{
a: ivy.array([1, 0, -1]),
b: ivy.array([4, 3, 2])
}
>>> x = ivy.Container(a=ivy.array([-1, 0, 1]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container.static_flip(x, axis=0)
>>> print(y)
{
a: ivy.array([1, 0, -1]),
b: ivy.array([4, 3, 2])
}
"""
return ContainerBase.cont_multi_map_in_function(
"flip",
x,
copy=copy,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def flip(
self: ivy.Container,
/,
*,
copy: Optional[Union[bool, ivy.Container]] = None,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.flip. This method
simply wraps the function, and so the docstring for ivy.flip also
applies to this method with minimal changes.
Parameters
----------
self
input container.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
axis
axis (or axes) along which to flip. If axis is None,
all input array axes are flipped. If axis is negative,
axis is counted from the last dimension. If provided
more than one axis, only the specified axes. Default: None.
key_chains
The key-chains to apply or not apply the method to. Default is None.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is True.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is False.
map_sequences
Whether to also map method to sequences (lists, tuples). Default is False.
out
optional output container, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
an output container having the same data type and
shape as ``self`` and whose elements, relative to ``self``, are reordered.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1, 0, 1]),
... b=ivy.array([2, 3, 4]))
>>> y = x.flip()
>>> print(y)
{
a: ivy.array([1, 0, -1]),
b: ivy.array([4, 3, 2])
}
>>> x = ivy.Container(a=ivy.array([-1, 0, 1]),
... b=ivy.array([2, 3, 4]))
>>> y = x.flip(axis=0)
>>> print(y)
{
a: ivy.array([1, 0, -1]),
b: ivy.array([4, 3, 2])
}
"""
return self._static_flip(
self,
copy=copy,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_reshape(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
shape: Union[ivy.Shape, ivy.NativeShape, Sequence[int], ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
copy: Optional[Union[bool, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
order: Union[str, ivy.Container] = "C",
allowzero: Union[bool, ivy.Container] = True,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.reshape. This method
simply wraps the function, and so the docstring for ivy.reshape also
applies to this method with minimal changes.
Parameters
----------
x
input container.
shape
The new shape should be compatible with the original shape.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
order
Read the elements of x using this index order, and place the elements into
the reshaped array using this index order.
‘C’ means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first axis index
changing slowest.
‘F’ means to read / write the elements using Fortran-like index order, with
the first index changing fastest, and the last index changing slowest.
Note that the ‘C’ and ‘F’ options take no account of the memory layout
of the underlying array, and only refer to the order of indexing.
Default order is 'C'
Returns
-------
ret
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0, 1, 2, 3, 4, 5]),
... b=ivy.array([0, 1, 2, 3, 4, 5]))
>>> y = ivy.Container.static_reshape(x, (3,2))
>>> print(y)
{
a: ivy.array([[0, 1],
[2, 3],
[4, 5]]),
b: ivy.array([[0, 1],
[2, 3],
[4, 5]])
}
>>> x = ivy.Container(a=ivy.array([0, 1, 2, 3, 4, 5]),
... b=ivy.array([0, 1, 2, 3, 4, 5]))
>>> y = ivy.Container.static_reshape(x, (3,2), order='F')
>>> print(y)
{
a: ivy.array([[0, 3],
[1, 4],
[2, 5]]),
b: ivy.array([[0, 3],
[1, 4],
[2, 5]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"reshape",
x,
shape,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
copy=copy,
allowzero=allowzero,
out=out,
order=order,
)
def reshape(
self: ivy.Container,
/,
shape: Union[ivy.Shape, ivy.NativeShape, Sequence[int], ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
copy: Optional[Union[bool, ivy.Container]] = None,
order: Union[str, ivy.Container] = "C",
allowzero: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.reshape. This method
simply wraps the function, and so the docstring for ivy.reshape also
applies to this method with minimal changes.
Parameters
----------
self
input container.
shape
The new shape should be compatible with the original shape.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
order
Read the elements of the input container using this index order,
and place the elements into the reshaped array using this index order.
‘C’ means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first axis index
changing slowest.
‘F’ means to read / write the elements using Fortran-like index order, with
the first index changing fastest, and the last index changing slowest.
Note that the ‘C’ and ‘F’ options take no account of the memory layout
of the underlying array, and only refer to the order of indexing.
Default order is 'C'
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an output container having the same data type as ``self``
and elements as ``self``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0, 1, 2, 3, 4, 5]),
... b=ivy.array([0, 1, 2, 3, 4, 5]))
>>> y = x.reshape((2,3))
>>> print(y)
{
a: ivy.array([[0, 1, 2],
[3, 4, 5]]),
b: ivy.array([[0, 1, 2],
[3, 4, 5]])
}
>>> x = ivy.Container(a=ivy.array([0, 1, 2, 3, 4, 5]),
... b=ivy.array([0, 1, 2, 3, 4, 5]))
>>> y = x.reshape((2,3), order='F')
>>> print(y)
{
a: ivy.array([[0, 2, 4],
[1, 3, 5]]),
b: ivy.array([[0, 2, 4],
[1, 3, 5]])
}
"""
return self._static_reshape(
self,
shape,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
copy=copy,
allowzero=allowzero,
out=out,
order=order,
)
@staticmethod
def _static_roll(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
shift: Union[int, Tuple[int, ...], ivy.Container],
*,
axis: Optional[Union[int, Tuple[int, ...], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.roll. This method simply
wraps the function, and so the docstring for ivy.roll also applies to
this method with minimal changes.
Parameters
----------
x
input container.
shift
number of places by which the elements are shifted. If ``shift`` is a tuple,
then ``axis`` must be a tuple of the same size, and each of the given axes
must be shifted by the corresponding element in ``shift``. If ``shift`` is
an ``int`` and ``axis`` a tuple, then the same ``shift`` must be used for
all specified axes. If a shift is positivclipe, then array elements must be
shifted positively (toward larger indices) along the dimension of ``axis``.
If a shift is negative, then array elements must be shifted negatively
(toward smaller indices) along the dimension of ``axis``.
axis
axis (or axes) along which elements to shift. If ``axis`` is ``None``, the
array must be flattened, shifted, and then restored to its original shape.
Default ``None``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an output container having the same data type as ``x`` and whose elements,
relative to ``x``, are shifted.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> y = ivy.Container.static_roll(x, 1)
>>> print(y)
{
a: ivy.array([2., 0., 1.]),
b: ivy.array([5., 3., 4.])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> shift = ivy.Container(a=1, b=-1)
>>> y = ivy.Container.static_roll(x, shift)
>>> print(y)
{
a: ivy.array([2., 0., 1.]),
b: ivy.array([4., 5., 3.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"roll",
x,
shift,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def roll(
self: ivy.Container,
/,
shift: Union[int, Sequence[int], ivy.Container],
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.roll. This method
simply wraps the function, and so the docstring for ivy.roll also
applies to this method with minimal changes.
Parameters
----------
self
input container.
shift
number of places by which the elements are shifted. If ``shift`` is a tuple,
then ``axis`` must be a tuple of the same size, and each of the given axes
must be shifted by the corresponding element in ``shift``. If ``shift`` is
an ``int`` and ``axis`` a tuple, then the same ``shift`` must be used for
all specified axes. If a shift is positive, then array elements must be
shifted positively (toward larger indices) along the dimension of ``axis``.
If a shift is negative, then array elements must be shifted negatively
(toward smaller indices) along the dimension of ``axis``.
axis
axis (or axes) along which elements to shift. If ``axis`` is ``None``, the
array must be flattened, shifted, and then restored to its original shape.
Default ``None``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an output container having the same data type as ``self`` and whose
elements, relative to ``self``, are shifted.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = x.roll(1)
>>> print(y)
{
a: ivy.array([2., 0., 1.]),
b: ivy.array([5., 3., 4.])
}
"""
return self._static_roll(
self,
shift,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_squeeze(
x: ivy.Container,
/,
axis: Union[int, Sequence[int], ivy.Container],
*,
copy: Optional[Union[bool, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.squeeze. This method
simply wraps the function, and so the docstring for ivy.squeeze also
applies to this method with minimal changes.
Parameters
----------
x
input container.
axis
axis (or axes) to squeeze.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an output container with the results.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[[10.], [11.]]]),
... b=ivy.array([[[11.], [12.]]]))
>>> y = ivy.Container.static_squeeze(x, 0)
>>> print(y)
{
a: ivy.array([[10., 11.]]),
b: ivy.array([[11., 12.]])
}
>>> x = ivy.Container(a=ivy.array([[[10.], [11.]]]),
... b=ivy.array([[[11.], [12.]]]))
>>> y = ivy.Container.static_squeeze(x, [0, 2])
>>> print(y)
{
a: ivy.array([[10.], [11.]]),
b: ivy.array([[11.], [12.]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"squeeze",
x,
axis=axis,
copy=copy,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def squeeze(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]],
copy: Optional[Union[bool, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.squeeze. This method
simply wraps the function, and so the docstring for ivy.squeeze also
applies to this method with minimal changes.
Parameters
----------
self
input container.
axis
axis (or axes) to squeeze.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an output container with the results.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[[10.], [11.]]]),
... b=ivy.array([[[11.], [12.]]]))
>>> y = x.squeeze(axis=2)
>>> print(y)
{
a: ivy.array([[10., 11.]]),
b: ivy.array([[11., 12.]])
}
>>> x = ivy.Container(a=ivy.array([[[10.], [11.]]]),
... b=ivy.array([[[11.], [12.]]]))
>>> y = x.squeeze(axis=0)
>>> print(y)
{
a: ivy.array([[10.],
[11.]]),
b: ivy.array([[11.],
[12.]])
}
"""
return self._static_squeeze(
self,
axis=axis,
copy=copy,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_stack(
xs: Union[
Tuple[Union[ivy.Array, ivy.NativeArray, ivy.Container]],
List[Union[ivy.Array, ivy.NativeArray, ivy.Container]],
],
/,
*,
axis: Union[int, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.stack. This method simply
wraps the function, and so the docstring for ivy.stack also applies to
this method with minimal changes.
Parameters
----------
xs
Container with leaves to join. Each array leavve must have the same shape.
axis
axis along which the array leaves will be joined. More details can be found
in the docstring for ivy.stack.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an output container with the results.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[0, 1], [2,3]]), b=ivy.array([[4, 5]]))
>>> z = ivy.Container.static_stack(x,axis = 1)
>>> print(z)
{
a: ivy.array([[0, 2],
[1, 3]]),
b: ivy.array([[4],
[5]])
}
>>> x = ivy.Container(a=ivy.array([[0, 1], [2,3]]), b=ivy.array([[4, 5]]))
>>> y = ivy.Container(a=ivy.array([[3, 2], [1,0]]), b=ivy.array([[1, 0]]))
>>> z = ivy.Container.static_stack([x,y])
>>> print(z)
{
a: ivy.array([[[0, 1],
[2, 3]],
[[3, 2],
[1, 0]]]),
b: ivy.array([[[4, 5]],
[[1, 0]]])
}
>>> x = ivy.Container(a=ivy.array([[0, 1], [2,3]]), b=ivy.array([[4, 5]]))
>>> y = ivy.Container(a=ivy.array([[3, 2], [1,0]]), b=ivy.array([[1, 0]]))
>>> z = ivy.Container.static_stack([x,y],axis=1)
>>> print(z)
{
a: ivy.array([[[0, 1],
[3, 2]],
[[2, 3],
[1, 0]]]),
b: ivy.array([[[4, 5],
[1, 0]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"stack",
xs,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def stack(
self: ivy.Container,
/,
xs: Union[
Tuple[Union[ivy.Array, ivy.NativeArray, ivy.Container]],
List[Union[ivy.Array, ivy.NativeArray, ivy.Container]],
],
*,
axis: Union[int, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.stack. This method
simply wraps the function, and so the docstring for ivy.stack also
applies to this method with minimal changes.
Parameters
----------
self
Container with leaves to join with leaves of other arrays/containers.
Each array leave must have the same shape.
xs
Container with other leaves to join.
Each array leave must have the same shape.
axis
axis along which the array leaves will be joined. More details can be found
in the docstring for ivy.stack.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an output container with the results.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[0, 1], [2,3]]), b=ivy.array([[4, 5]]))
>>> y = ivy.Container(a=ivy.array([[3, 2], [1,0]]), b=ivy.array([[1, 0]]))
>>> x.stack([y])
{
a: ivy.array([[[0, 1],
[2, 3]],
[[3, 2],
[1, 0]]]),
b: ivy.array([[[4, 5]],
[[1, 0]]])
}
"""
new_xs = xs.cont_copy() if ivy.is_ivy_container(xs) else xs.copy()
new_xs.insert(0, self.cont_copy())
return self._static_stack(
new_xs,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_repeat(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
repeats: Union[int, Iterable[int], ivy.Container],
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.repeat. This method
simply wraps the function, and so the docstring for ivy.repeat also
applies to this method with minimal changes.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = ivy.Container.static_repeat(2)
>>> print(y)
{
a: ivy.array([0., 0., 1., 1., 2., 2.]),
b: ivy.array([3., 3., 4., 4., 5., 5.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"repeat",
x,
repeats,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def repeat(
self: ivy.Container,
/,
repeats: Union[int, Iterable[int], ivy.Container],
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.repeat. This method
simply wraps the function, and so the docstring for ivy.repeat also
applies to this method with minimal changes.
Parameters
----------
x
Input container.
repeats
The number of repetitions for each element. repeats is broadcast to fit the
shape of the given axis.
axis
The axis along which to repeat values. By default, use the flattened input
array, and return a flat output array.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The output container with repreated leaves.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = x.repeat(2)
>>> print(y)
{
a: ivy.array([0., 0., 1., 1., 2., 2.]),
b: ivy.array([3., 3., 4., 4., 5., 5.])
}
"""
return self._static_repeat(
self,
repeats,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_tile(
x: ivy.Container,
/,
repeats: Union[Iterable[int], ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.tile. This method simply
wraps the function, and so the docstring for ivy.tile also applies to
this method with minimal changes.
Parameters
----------
x
Input Container.
repeats
The number of repetitions of x along each axis.
out
optional output array, for writing the result to. It must have
a shape that the inputs broadcast to.
Returns
-------
ret
The container output with tiled leaves.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[0, 1], [2,3]]), b=ivy.array([[4, 5]]))
>>> y = ivy.Container.static_tile((2,3))
>>> print(y)
{
a: ivy.array([[0,1,0,1,0,1],
[2,3,2,3,2,3],
[0,1,0,1,0,1],
[2,3,2,3,2,3]]),
b: ivy.array([[4,5,4,5,4,5],
[4,5,4,5,4,5]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"tile",
x,
repeats,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def tile(
self: ivy.Container,
/,
repeats: Union[Iterable[int], ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.tile. This method
simply wraps the function, and so the docstring for ivy.tile also
applies to this method with minimal changes.
Parameters
----------
self
Input container.
repeats
The number of repetitions of x along each axis.
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
The container output with tiled leaves.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[0, 1], [2,3]]), b=ivy.array([[4, 5]]))
>>> y = x.tile((2,3))
>>> print(y)
{
a: (<class ivy.data_classes.array.array.Array> shape=[4, 6]),
b: (<class ivy.data_classes.array.array.Array> shape=[2, 6])
}
"""
return self._static_tile(
self,
repeats,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_constant_pad(
x: ivy.Container,
/,
pad_width: Union[Iterable[Tuple[int]], ivy.Container],
*,
value: Union[Number, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.constant_pad. This method
simply wraps the function, and so the docstring for ivy.constant_pad
also applies to this method with minimal changes.
Parameters
----------
x
Input container with leaves to pad.
pad_width
Number of values padded to the edges of each axis.
Specified as ((before_1, after_1), … (before_N, after_N)), where N
is number of axes of x.
value
The constant value to pad the array with.
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
Output container with padded array leaves of rank equal to x with
shape increased according to pad_width.
Examples
--------
>>> x = ivy.Container(a = ivy.array([1, 2, 3]), b = ivy.array([4, 5, 6]))
>>> y = ivy.Container.static_constant_pad(x, pad_width = [[2, 3]])
>>> print(y)
{
a: ivy.array([0, 0, 1, 2, 3, 0, 0, 0]),
b: ivy.array([0, 0, 4, 5, 6, 0, 0, 0])
}
"""
return ContainerBase.cont_multi_map_in_function(
"constant_pad",
x,
pad_width,
value=value,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def constant_pad(
self: ivy.Container,
/,
pad_width: Union[Iterable[Tuple[int]], ivy.Container],
*,
value: Union[Number, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.constant_pad. This
method simply wraps the function, and so the docstring for
ivy.constant_pad also applies to this method with minimal changes.
Parameters
----------
self
Input container with leaves to pad.
pad_width
Number of values padded to the edges of each axis.
Specified as ((before_1, after_1), … (before_N, after_N)), where N
is number of axes of x.
value
The constant value to pad the array with.
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
Output container with padded array leaves of rank equal to x with
shape increased according to pad_width.
Examples
--------
>>> x = ivy.Container(a = ivy.array([1, 2, 3]), b = ivy.array([4, 5, 6]))
>>> y = x.constant_pad(pad_width = [[2, 3]])
>>> print(y)
{
a: ivy.array([0, 0, 1, 2, 3, 0, 0, 0]),
b: ivy.array([0, 0, 4, 5, 6, 0, 0, 0])
}
"""
return self._static_constant_pad(
self,
pad_width,
value=value,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_zero_pad(
x: ivy.Container,
/,
pad_width: Union[Iterable[Tuple[int]], ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.zero_pad. This method
simply wraps the function, and so the docstring for ivy.zero_pad also
applies to this method with minimal changes.
Parameters
----------
x
Input array to pad.
pad_width
Number of values padded to the edges of each axis. Specified as
((before_1, after_1), … (before_N, after_N)),
where N is number of axes of x.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
Padded array of rank equal to x with shape increased according to pad_width.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a = ivy.array([1., 2., 3.]), b = ivy.array([3., 4., 5.]))
>>> y = ivy.zero_pad(x, pad_width = [[2, 3]])
>>> print(y)
{
a: ivy.array([0., 0., 1., 2., 3., 0., 0., 0.]),
b: ivy.array([0., 0., 3., 4., 5., 0., 0., 0.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"zero_pad",
x,
pad_width,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def zero_pad(
self: ivy.Container,
/,
pad_width: Union[Iterable[Tuple[int]], ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.zero_pad. This method
simply wraps the function, and so the docstring for ivy.zero_pad also
applies to this method with minimal changes.
Parameters
----------
self
Input array to pad.
pad_width
Number of values padded to the edges of each axis. Specified as
((before_1, after_1), … (before_N, after_N)),
where N is number of axes of x.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
Padded array of rank equal to x with shape increased according to pad_width.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a = ivy.array([1., 2., 3.]), b = ivy.array([3., 4., 5.]))
>>> y = x.zero_pad(pad_width = [[2, 3]])
>>> print(y)
{
a: ivy.array([0., 0., 1., 2., 3., 0., 0., 0.]),
b: ivy.array([0., 0., 3., 4., 5., 0., 0., 0.])
}
"""
return self._static_zero_pad(
self,
pad_width,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_swapaxes(
x: ivy.Container,
axis0: Union[int, ivy.Container],
axis1: Union[int, ivy.Container],
/,
*,
copy: Optional[Union[bool, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.swapaxes. This method
simply wraps the function, and so the docstring for ivy.swapaxes also
applies to this method with minimal changes.
Parameters
----------
x
Input container
axis0
First axis to be swapped.
axis1
Second axis to be swapped.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
x with its axes permuted.
>>> a = ivy.array([[1, 2, 3], [4, 5, 6]])
>>> b = ivy.array([[7, 8, 9], [10, 11, 12]])
>>> x = ivy.Container(a = a, b = b)
>>> y = x.swapaxes(0, 1)
>>> print(y)
{
a: ivy.array([[1, 4],
[2, 5],
[3, 6]]),
b: ivy.array([[7, 10],
[8, 11],
[9, 12]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"swapaxes",
x,
axis0,
axis1,
copy=copy,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def swapaxes(
self: ivy.Container,
axis0: Union[int, ivy.Container],
axis1: Union[int, ivy.Container],
/,
*,
copy: Optional[Union[bool, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.swapaxes. This method
simply wraps the function, and so the docstring for ivy.swapaxes also
applies to this method with minimal changes.
Parameters
----------
self
Input container.
axis0
First axis to be swapped.
axis1
Second axis to be swapped.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
x with its axes permuted.
Examples
--------
>>> a = ivy.array([[1, 2, 3], [4, 5, 6]])
>>> b = ivy.array([[7, 8, 9], [10, 11, 12]])
>>> x = ivy.Container(a = a, b = b)
>>> y = x.swapaxes(0, 1)
>>> print(y)
{
a: ivy.array([[1, 4],
[2, 5],
[3, 6]]),
b: ivy.array([[7, 10],
[8, 11],
[9, 12]])
}
"""
return self._static_swapaxes(
self,
axis0,
axis1,
copy=copy,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_unstack(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
copy: Optional[Union[bool, ivy.Container]] = None,
axis: Union[int, ivy.Container] = 0,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.unstack. This method
simply wraps the function, and so the docstring for ivy.unstack also
applies to this method with minimal changes.
Parameters
----------
x
Input array or container to unstack.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
axis
Axis for which to unpack the array.
keepdims
Whether to keep dimension 1 in the unstack dimensions. Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
List of arrays, unpacked along specified dimensions, or containers
with arrays unpacked at leaves
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]),
b=ivy.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]]))
>>> y = ivy.Container.static_unstack(x, axis=0)
>>> print(y)
[{
a: ivy.array([[1, 2],
[3, 4]]),
b: ivy.array([[9, 10],
[11, 12]])
}, {
a: ivy.array([[5, 6],
[7, 8]]),
b: ivy.array([[13, 14],
[15, 16]])
}]
>>> x = ivy.Container(a=ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]),
b=ivy.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]]))
>>> y = ivy.Container.static_unstack(x, axis=1, keepdims=True)
>>> print(y)
[{
a: ivy.array([[[1, 2]],
[[5, 6]]]),
b: ivy.array([[[9, 10]],
[[13, 14]]])
}, {
a: ivy.array([[[3, 4]],
[[7, 8]]]),
b: ivy.array([[[11, 12]],
[[15, 16]]])
}]
"""
return ContainerBase.cont_multi_map_in_function(
"unstack",
x,
copy=copy,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def unstack(
self: ivy.Container,
/,
*,
copy: Optional[Union[bool, ivy.Container]] = None,
axis: Union[int, ivy.Container] = 0,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.unstack. This method
simply wraps the function, and so the docstring for ivy.unstack also
applies to this method with minimal changes.
Parameters
----------
self
Input container to unstack at leaves.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
axis
Axis for which to unpack the array.
keepdims
Whether to keep dimension 1 in the unstack dimensions. Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
Containers with arrays unpacked at leaves
Examples
--------
With one :class:`ivy.Container` instances:
>>> x = ivy.Container(a=ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]),
b=ivy.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]]))
>>> x.unstack(axis=0)
[{
a: ivy.array([[1, 2],
[3, 4]]),
b: ivy.array([[9, 10],
[11, 12]])
}, {
a: ivy.array([[5, 6],
[7, 8]]),
b: ivy.array([[13, 14],
[15, 16]])
}]
"""
return self._static_unstack(
self,
copy=copy,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def _static_clip(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x_min: Optional[
Union[Number, ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
x_max: Optional[
Union[Number, ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.clip. This method simply
wraps the function, and so the docstring for ivy.clip also applies to
this method with minimal changes.
Parameters
----------
x
Input array or container containing elements to clip.
x_min
Minimum value.
x_max
Maximum value.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
A container with the elements of x, but where values < x_min are replaced
with x_min, and those > x_max with x_max.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> y = ivy.Container.static_clip(x, 1., 5.)
>>> print(y)
{
a: ivy.array([1., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> x_min = ivy.Container(a=0, b=0)
>>> x_max = ivy.Container(a=1, b=1)
>>> y = ivy.Container.static_clip(x, x_min, x_max)
>>> print(y)
{
a: ivy.array([0., 1., 1.]),
b: ivy.array([1., 1., 1.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"clip",
x,
x_min,
x_max,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def clip(
self: ivy.Container,
/,
x_min: Optional[
Union[Number, ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
x_max: Optional[
Union[Number, ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.clip. This method
simply wraps the function, and so the docstring for ivy.clip also
applies to this method with minimal changes.
Parameters
----------
self
Input container containing elements to clip.
x_min
Minimum value.
x_max
Maximum value.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
A container with the elements of x, but where values < x_min are replaced
with x_min, and those > x_max with x_max.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = x.clip(1,2)
>>> print(y)
{
a: ivy.array([1., 1., 2.]),
b: ivy.array([2., 2., 2.])
}
"""
return self._static_clip(
self,
x_min,
x_max,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
| ivy/ivy/data_classes/container/manipulation.py/0 | {
"file_path": "ivy/ivy/data_classes/container/manipulation.py",
"repo_id": "ivy",
"token_count": 42277
} | 13 |
# flake8: noqa
# local
from .nested_array import NestedArray, NestedArrayBase # noqa
| ivy/ivy/data_classes/nested_array/__init__.py/0 | {
"file_path": "ivy/ivy/data_classes/nested_array/__init__.py",
"repo_id": "ivy",
"token_count": 31
} | 14 |
use super::{Literal, PjRtBuffer};
use crate::{c_lib, Result};
use pyo3::prelude::*;
#[derive(Clone)]
#[pyclass(unsendable)]
pub struct PjRtLoadedExecutable {
pub(super) exe: c_lib::pjrt_loaded_executable,
pub(super) client: super::PjRtClient,
}
impl PjRtLoadedExecutable {
/// The client that owns this executable.
pub fn client(&self) -> &super::PjRtClient {
&self.client
}
fn process_execute_outputs(
&self,
outputs: *mut *mut c_lib::pjrt_buffer,
) -> Vec<Vec<PjRtBuffer>> {
unsafe {
let mut vec = vec![];
loop {
let outputs = *outputs.add(vec.len());
if outputs.is_null() {
break;
}
let mut replica_vec = vec![];
loop {
let buffer = *outputs.add(replica_vec.len());
if buffer.is_null() {
break;
}
replica_vec.push(PjRtBuffer { buffer, client: self.client.clone() });
}
libc::free(outputs as *mut libc::c_void);
vec.push(replica_vec);
}
libc::free(outputs as *mut libc::c_void);
vec
}
}
pub fn execute<L: std::borrow::Borrow<Literal>>(
&self,
args: &[L],
) -> Result<Vec<Vec<PjRtBuffer>>> {
let mut outputs = std::ptr::null_mut();
let args: Vec<_> = args.iter().map(|x| x.borrow().0).collect();
let status =
unsafe { c_lib::execute(self.exe, args.as_ptr(), args.len() as i32, &mut outputs) };
super::handle_status(status)?;
Ok(self.process_execute_outputs(outputs))
}
pub fn execute_b<L: std::borrow::Borrow<PjRtBuffer>>(
&self,
args: &[L],
) -> Result<Vec<Vec<PjRtBuffer>>> {
let mut outputs = std::ptr::null_mut();
let args: Vec<_> = args.iter().map(|x| x.borrow().buffer).collect();
let status =
unsafe { c_lib::execute_b(self.exe, args.as_ptr(), args.len() as i32, &mut outputs) };
super::handle_status(status)?;
Ok(self.process_execute_outputs(outputs))
}
}
impl Drop for PjRtLoadedExecutable {
fn drop(&mut self) {
unsafe { c_lib::pjrt_loaded_executable_free(self.exe) }
}
}
| ivy/ivy/engines/XLA/rust_api/src/wrappers/pjrt_loaded_executable.rs/0 | {
"file_path": "ivy/ivy/engines/XLA/rust_api/src/wrappers/pjrt_loaded_executable.rs",
"repo_id": "ivy",
"token_count": 1228
} | 15 |
"""Collection of Jax device functions, wrapped to fit Ivy syntax and
signature."""
# global
import os
import jax
from typing import Union, Optional
import jaxlib.xla_extension
# local
import ivy
from ivy.functional.backends.jax import JaxArray
from ivy.functional.ivy.device import (
_shift_native_arrays_on_default_device,
Profiler as BaseProfiler,
)
# Helpers #
# --------#
def _to_array(x):
if isinstance(x, jax.interpreters.ad.JVPTracer):
return _to_array(x.primal)
elif isinstance(x, jax.interpreters.partial_eval.DynamicJaxprTracer):
return _to_array(x.aval)
elif isinstance(x, jax.interpreters.batching.BatchTracer):
return _to_array(x.val)
return x
# API #
# ----#
def dev(
x: JaxArray,
/,
*,
as_native: bool = False,
) -> Union[ivy.Device, jaxlib.xla_extension.Device]:
if isinstance(x, jax.interpreters.partial_eval.DynamicJaxprTracer):
return ""
if hasattr(x, "device_buffer"):
dv = _to_array(x).device_buffer.device()
else:
dv = jax.devices()[0]
return dv if as_native else as_ivy_dev(dv)
def to_device(
x: JaxArray,
device: jaxlib.xla_extension.Device,
/,
*,
stream: Optional[int] = None,
out: Optional[JaxArray] = None,
):
if device is not None:
cur_dev = as_native_dev(dev(x))
if cur_dev != device:
x = jax.device_put(x, as_native_dev(device))
return x
# this is a non-wrapped function used to place JAX arrays on respective devices,
# since if we use to_device, it will return ivy.array which is not desirable
def _to_device(x, device=None):
if device is not None:
cur_dev = as_native_dev(dev(x))
if cur_dev != device:
x = jax.device_put(x, as_native_dev(device))
return x
def as_ivy_dev(device, /):
if isinstance(device, str):
return ivy.Device(device)
if device is None:
return None
p, dev_id = (device.platform, device.id)
if p == "cpu":
return ivy.Device(p)
return ivy.Device(p + ":" + str(dev_id))
def as_native_dev(device, /):
if not isinstance(device, str):
return device
dev_split = ivy.Device(device).split(":")
device = dev_split[0]
if len(dev_split) > 1:
idx = int(dev_split[1])
else:
idx = 0
return jax.devices(device)[idx]
def handle_soft_device_variable(*args, fn, **kwargs):
args, kwargs, device_shifting_dev = _shift_native_arrays_on_default_device(
*args, **kwargs
)
with jax.default_device(device_shifting_dev):
return fn(*args, **kwargs)
def clear_cached_mem_on_dev(device: str, /):
return None
def _dev_is_available(base_dev):
try:
jax.devices(base_dev)
return True
except RuntimeError:
return False
def gpu_is_available() -> bool:
return _dev_is_available("gpu")
def num_gpus() -> int:
try:
return len(jax.devices("gpu"))
except RuntimeError:
return 0
def tpu_is_available() -> bool:
return _dev_is_available("tpu")
# noinspection PyMethodMayBeStatic
class Profiler(BaseProfiler):
def __init__(self, save_dir: str):
super().__init__(save_dir)
self._save_dir = os.path.join(self._save_dir, "profile")
def start(self):
jax.profiler.start_trace(self._save_dir)
def stop(self):
jax.profiler.stop_trace()
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
| ivy/ivy/functional/backends/jax/device.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/device.py",
"repo_id": "ivy",
"token_count": 1539
} | 16 |
# global
from typing import Optional, Union, Sequence
import jax.numpy as jnp
import jax
import jaxlib.xla_extension
# local
import ivy
from ivy.functional.backends.jax import JaxArray
from ivy.functional.backends.jax.random import RNG, _setRNG, _getRNG # noqa
from ivy.functional.ivy.random import (
_check_bounds_and_get_shape,
_check_shapes_broadcastable,
)
from ivy.func_wrapper import with_unsupported_dtypes
from .. import backend_version
# Extra #
# ----- #
# dirichlet
def dirichlet(
alpha: Union[JaxArray, float, Sequence[float]],
/,
*,
size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
dtype: Optional[jnp.dtype] = None,
seed: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if seed is not None:
rng_input = jax.random.PRNGKey(seed)
else:
RNG_, rng_input = jax.random.split(_getRNG())
_setRNG(RNG_)
return jax.random.dirichlet(rng_input, alpha, shape=size, dtype=dtype)
def beta(
a: Union[float, JaxArray],
b: Union[float, JaxArray],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[jaxlib.xla_extension.Device] = None,
dtype: Optional[jnp.dtype] = None,
seed: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
shape = _check_bounds_and_get_shape(a, b, shape).shape
RNG_, rng_input = jax.random.split(_getRNG())
_setRNG(RNG_)
if seed is not None:
jax.random.PRNGKey(seed)
return jax.random.beta(rng_input, a, b, shape, dtype)
@with_unsupported_dtypes({"0.4.24 and below": ("bfloat16",)}, backend_version)
def gamma(
alpha: Union[float, JaxArray],
beta: Union[float, JaxArray],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[jaxlib.xla_extension.Device] = None,
dtype: Optional[jnp.dtype] = None,
seed: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
shape = _check_bounds_and_get_shape(alpha, beta, shape).shape
RNG_, rng_input = jax.random.split(_getRNG())
_setRNG(RNG_)
if seed is not None:
jax.random.PRNGKey(seed)
return jax.random.gamma(rng_input, alpha, shape, dtype) / beta
def poisson(
lam: Union[float, JaxArray],
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[jaxlib.xla_extension.Device] = None,
dtype: Optional[jnp.dtype] = None,
seed: Optional[int] = None,
fill_value: Optional[Union[float, int]] = 0,
out: Optional[JaxArray] = None,
) -> JaxArray:
lam = jnp.array(lam)
if seed:
rng_input = jax.random.PRNGKey(seed)
else:
RNG_, rng_input = jax.random.split(_getRNG())
_setRNG(RNG_)
if shape is not None:
shape = jnp.array(shape)
list_shape = shape.tolist()
_check_shapes_broadcastable(lam.shape, list_shape)
else:
list_shape = None
if jnp.any(lam < 0):
pos_lam = jnp.where(lam < 0, 0, lam)
ret = jax.random.poisson(rng_input, pos_lam, shape=list_shape).astype(dtype)
ret = jnp.where(lam < 0, fill_value, ret)
else:
ret = jax.random.poisson(rng_input, lam, shape=list_shape).astype(dtype)
return ret
def bernoulli(
probs: Union[float, JaxArray],
*,
logits: Optional[Union[float, JaxArray]] = None,
shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None,
device: Optional[jaxlib.xla_extension.Device] = None,
dtype: Optional[jnp.dtype] = None,
seed: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
dtype = dtype if dtype is not None else probs.dtype
if seed:
rng_input = jax.random.PRNGKey(seed)
else:
RNG_, rng_input = jax.random.split(_getRNG())
_setRNG(RNG_)
if logits is not None:
probs = jax.nn.softmax(logits, axis=-1)
if hasattr(probs, "shape") and not _check_shapes_broadcastable(shape, probs.shape):
shape = probs.shape
return jax.random.bernoulli(rng_input, probs, shape=shape).astype(dtype)
| ivy/ivy/functional/backends/jax/experimental/random.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/experimental/random.py",
"repo_id": "ivy",
"token_count": 1810
} | 17 |
# global
import jax.numpy as jnp
from typing import Optional, Literal, Union, List
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.backends.jax import JaxArray
from . import backend_version
def argsort(
x: JaxArray,
/,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
out: Optional[JaxArray] = None,
) -> JaxArray:
kind = "stable" if stable else "quicksort"
return (
jnp.argsort(-x, axis=axis, kind=kind)
if descending
else jnp.argsort(x, axis=axis, kind=kind)
)
def sort(
x: JaxArray,
/,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
out: Optional[JaxArray] = None,
) -> JaxArray:
kind = "stable" if stable else "quicksort"
ret = jnp.asarray(jnp.sort(x, axis=axis, kind=kind))
if descending:
ret = jnp.asarray(jnp.flip(ret, axis=axis))
return ret
def searchsorted(
x: JaxArray,
v: JaxArray,
/,
*,
side: Literal["left", "right"] = "left",
sorter: Optional[Union[JaxArray, List[int]]] = None,
ret_dtype: jnp.dtype = jnp.int64,
out: Optional[JaxArray] = None,
) -> JaxArray:
assert ivy.is_int_dtype(ret_dtype), ValueError(
"only Integer data types are supported for ret_dtype."
)
if sorter is not None:
assert ivy.is_int_dtype(sorter.dtype), TypeError(
f"Only signed integer data type for sorter is allowed, got {sorter.dtype}."
)
x = jnp.take_along_axis(x, sorter, axis=-1)
if x.ndim != 1:
assert x.shape[:-1] == v.shape[:-1], RuntimeError(
"the first N-1 dimensions of x array and v array "
f"must match, got {x.shape} and {v.shape}"
)
original_shape = v.shape
out_array = [] # JAX arrays are immutable.
x = x.reshape(-1, x.shape[-1])
v = v.reshape(-1, v.shape[-1])
for i in range(x.shape[0]):
out_array.append(jnp.searchsorted(x[i], v[i], side=side))
ret = jnp.array(out_array).reshape(original_shape)
else:
ret = jnp.searchsorted(x, v, side=side)
return ret.astype(ret_dtype)
# msort
@with_unsupported_dtypes({"0.4.24 and below": ("complex",)}, backend_version)
def msort(
a: Union[JaxArray, list, tuple],
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.sort(a, axis=0, kind="mergesort")
| ivy/ivy/functional/backends/jax/sorting.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/sorting.py",
"repo_id": "ivy",
"token_count": 1113
} | 18 |
from typing import Union, Optional, Tuple, List, Sequence
from numbers import Number
import mxnet as mx
from ivy.utils.exceptions import IvyNotImplementedException
from ivy.func_wrapper import with_supported_dtypes
from .. import backend_version
def amax(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def amin(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
@with_supported_dtypes(
{"1.9.1 and below": ("float16", "float32", "float64")},
backend_version,
)
def lgamma(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
return mx.np.log(mx.npx.gamma(x))
def sinc(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def fmax(
x1: Union[(None, mx.ndarray.NDArray)],
x2: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def float_power(
x1: Union[(None, mx.ndarray.NDArray, float, list, tuple)],
x2: Union[(None, mx.ndarray.NDArray, float, list, tuple)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def copysign(
x1: Union[(None, mx.ndarray.NDArray, Number)],
x2: Union[(None, mx.ndarray.NDArray, Number)],
/,
*,
out: Optional[None] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def count_nonzero(
a: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[(int, Tuple[(int, ...)])]] = None,
keepdims: bool = False,
dtype: Optional[None] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def nansum(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[(Tuple[(int, ...)], int)]] = None,
dtype: Optional[None] = None,
keepdims: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def gcd(
x1: Union[(None, mx.ndarray.NDArray, int, list, tuple)],
x2: Union[(None, mx.ndarray.NDArray, float, list, tuple)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def isclose(
a: Union[(None, mx.ndarray.NDArray)],
b: Union[(None, mx.ndarray.NDArray)],
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def signbit(
x: Union[(None, mx.ndarray.NDArray, float, int, list, tuple)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def hypot(
x1: Union[(None, mx.ndarray.NDArray)],
x2: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def allclose(
x1: Union[(None, mx.ndarray.NDArray)],
x2: Union[(None, mx.ndarray.NDArray)],
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> bool:
raise IvyNotImplementedException()
def fix(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def nextafter(
x1: Union[(None, mx.ndarray.NDArray)],
x2: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def diff(
x: Union[(None, mx.ndarray.NDArray, list, tuple)],
/,
*,
n: int = 1,
axis: int = (-1),
prepend: Optional[
Union[(None, mx.ndarray.NDArray, int, float, list, tuple)]
] = None,
append: Optional[Union[(None, mx.ndarray.NDArray, int, float, list, tuple)]] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def zeta(
x: Union[(None, mx.ndarray.NDArray)],
q: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def gradient(
x: None,
/,
*,
spacing: Union[(int, list, tuple)] = 1,
axis: Optional[Union[(int, list, tuple)]] = None,
edge_order: int = 1,
) -> Union[(None, List[None])]:
raise IvyNotImplementedException()
def xlogy(
x: Union[(None, mx.ndarray.NDArray)],
y: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def conj(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def ldexp(
x1: Union[(None, mx.ndarray.NDArray)],
x2: Union[(None, mx.ndarray.NDArray, int)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def frexp(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[
Union[(Tuple[(None, None)], Tuple[(mx.ndarray.NDArray, mx.ndarray.NDArray)])]
] = None,
) -> Union[(Tuple[(None, None)], Tuple[(mx.ndarray.NDArray, mx.ndarray.NDArray)])]:
raise IvyNotImplementedException()
def modf(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/experimental/elementwise.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/experimental/elementwise.py",
"repo_id": "ivy",
"token_count": 3079
} | 19 |
"""Collection of MXNet network layers, wrapped to fit Ivy syntax and
signature."""
# global
import mxnet as mx
from typing import Optional, Tuple, Union, Sequence
import ivy
# local
from ivy.utils.exceptions import IvyNotImplementedException
def conv1d(
x: Union[(None, mx.ndarray.NDArray)],
filters: Union[(None, mx.ndarray.NDArray)],
strides: Union[(int, Tuple[int])],
padding: Union[(str, int, Sequence[Tuple[(int, int)]])],
/,
*,
data_format: str = "NWC",
dilations: Union[(int, Tuple[int])] = 1,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def conv1d_transpose(
x: Union[(None, mx.ndarray.NDArray)],
filters: Union[(None, mx.ndarray.NDArray)],
strides: Union[(int, Tuple[int])],
padding: str,
/,
*,
output_shape: Optional[Union[(ivy.NativeShape, Sequence[int])]] = None,
filter_format: str = "channel_last",
data_format: str = "NWC",
dilations: Union[(int, Tuple[int])] = 1,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
):
raise IvyNotImplementedException()
def conv2d(
x: Union[(None, mx.ndarray.NDArray)],
filters: Union[(None, mx.ndarray.NDArray)],
strides: Union[(int, Tuple[(int, int)])],
padding: Union[(str, int, Sequence[Tuple[(int, int)]])],
/,
*,
data_format: str = "NHWC",
dilations: Union[(int, Tuple[(int, int)])] = 1,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def conv2d_transpose(
x: Union[(None, mx.ndarray.NDArray)],
filters: Union[(None, mx.ndarray.NDArray)],
strides: Union[(int, Tuple[(int, int)])],
padding: str,
/,
*,
output_shape: Optional[Union[(ivy.NativeShape, Sequence[int])]] = None,
filter_format: str = "channel_last",
data_format: str = "NHWC",
dilations: Union[(int, Tuple[(int, int)])] = 1,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
):
raise IvyNotImplementedException()
def depthwise_conv2d(
x: Union[(None, mx.ndarray.NDArray)],
filters: Union[(None, mx.ndarray.NDArray)],
strides: Union[(int, Tuple[(int, int)])],
padding: Union[(str, int, Sequence[Tuple[(int, int)]])],
/,
*,
data_format: str = "NHWC",
dilations: Union[(int, Tuple[(int, int)])] = 1,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def conv3d(
x: Union[(None, mx.ndarray.NDArray)],
filters: Union[(None, mx.ndarray.NDArray)],
strides: Union[(int, Tuple[(int, int, int)])],
padding: Union[(str, int, Sequence[Tuple[(int, int)]])],
/,
*,
data_format: str = "NDHWC",
dilations: Union[(int, Tuple[(int, int, int)])] = 1,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
):
raise IvyNotImplementedException()
def conv3d_transpose(
x: None,
filters: None,
strides: Union[(int, Tuple[(int, int, int)])],
padding: str,
/,
*,
output_shape: Optional[Union[(ivy.NativeShape, Sequence[int])]] = None,
filter_format: str = "channel_last",
data_format: str = "NDHWC",
dilations: Union[(int, Tuple[(int, int, int)])] = 1,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> None:
raise IvyNotImplementedException()
def conv_general_dilated(
x: Union[(None, mx.ndarray.NDArray)],
filters: Union[(None, mx.ndarray.NDArray)],
strides: Union[(int, Tuple[int], Tuple[(int, int)], Tuple[(int, int, int)])],
padding: Union[(str, int, Sequence[Tuple[(int, int)]])],
/,
*,
dims: int = 2,
data_format: str = "channel_last",
feature_group_count: int = 1,
x_dilations: Union[
(int, Tuple[int], Tuple[(int, int)], Tuple[(int, int, int)])
] = 1,
dilations: Union[(int, Tuple[int], Tuple[(int, int)], Tuple[(int, int, int)])] = 1,
bias: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def conv_general_transpose(
x: Union[(None, mx.ndarray.NDArray)],
filters: Union[(None, mx.ndarray.NDArray)],
strides: Union[(int, Tuple[(int, int)])],
padding: str,
/,
*,
dims: int = 2,
filter_format: str = "channel_last",
data_format: str = "channel_last",
output_shape: Optional[Union[(ivy.NativeShape, Sequence[int])]] = None,
dilations: Union[(int, Tuple[int], Tuple[(int, int)], Tuple[(int, int, int)])] = 1,
feature_group_count: int = 1,
bias: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/layers.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/layers.py",
"repo_id": "ivy",
"token_count": 2124
} | 20 |
"""Collection of Numpy general functions, wrapped to fit Ivy syntax and
signature."""
# global
import os
import time
import logging
import numpy as np
from typing import Union, Optional, Any
# local
import ivy
from ivy.functional.ivy.device import Profiler as BaseProfiler
def dev(x: np.ndarray, /, *, as_native: bool = False) -> Union[ivy.Device, str]:
if as_native:
return "cpu"
return as_ivy_dev("cpu")
def as_ivy_dev(device: str, /):
if "gpu" in device:
logging.warning(
"Native Numpy does not support GPU placement, consider using Jax instead"
)
return ivy.Device("cpu")
def as_native_dev(device: str, /):
if "gpu" in device:
logging.warning(
"Native Numpy does not support GPU placement, consider using Jax instead"
)
return "cpu"
def clear_cached_mem_on_dev(device: str, /):
return None
def tpu_is_available() -> bool:
return False
def num_gpus() -> int:
return 0
def gpu_is_available() -> bool:
return False
def to_device(
x: np.ndarray,
device: str,
/,
*,
stream: Optional[Union[int, Any]] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if device is not None:
device = as_native_dev(device)
return x
def handle_soft_device_variable(*args, fn, **kwargs):
return fn(*args, **kwargs)
class Profiler(BaseProfiler):
def __init__(self, save_dir: str):
# ToDO: add proper numpy profiler
super().__init__(save_dir)
os.makedirs(save_dir, exist_ok=True)
self._start_time = None
def start(self):
self._start_time = time.perf_counter()
def stop(self):
time_taken = time.perf_counter() - self._start_time
with open(os.path.join(self._save_dir, "profile.log"), "w+") as f:
f.write(f"took {time_taken} seconds to complete")
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
| ivy/ivy/functional/backends/numpy/device.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/device.py",
"repo_id": "ivy",
"token_count": 831
} | 21 |
# global
from typing import Optional, Union, Sequence
import numpy as np
# local
import ivy
from ivy.functional.ivy.random import (
_check_bounds_and_get_shape,
_check_shapes_broadcastable,
)
# dirichlet
def dirichlet(
alpha: Union[np.ndarray, float, Sequence[float]],
/,
*,
size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
dtype: Optional[np.dtype] = None,
seed: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
size = size if size is not None else len(alpha)
dtype = dtype if dtype is not None else np.float64
if seed is not None:
np.random.seed(seed)
return np.asarray(np.random.dirichlet(alpha, size=size), dtype=dtype)
dirichlet.support_native_out = False
def beta(
alpha: Union[float, np.ndarray],
beta: Union[float, np.ndarray],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[str] = None,
dtype: Optional[np.dtype] = None,
seed: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
shape = _check_bounds_and_get_shape(alpha, beta, shape).shape
if seed is not None:
np.random.seed(seed)
return np.asarray(np.random.beta(alpha, beta, shape), dtype=dtype)
def gamma(
alpha: Union[float, np.ndarray],
beta: Union[float, np.ndarray],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[str] = None,
dtype: Optional[np.dtype] = None,
seed: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
shape = _check_bounds_and_get_shape(alpha, beta, shape).shape
if seed is not None:
np.random.seed(seed)
return np.asarray(np.random.gamma(alpha, beta, shape), dtype=dtype)
def poisson(
lam: Union[float, np.ndarray],
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[str] = None,
dtype: Optional[np.dtype] = None,
seed: Optional[int] = None,
fill_value: Optional[Union[float, int]] = 0,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
lam = np.array(lam)
if seed:
np.random.seed(seed)
if shape is not None:
_check_shapes_broadcastable(lam.shape, shape)
if np.any(lam < 0):
pos_lam = np.where(lam < 0, 0, lam)
ret = np.random.poisson(pos_lam, shape)
ret = np.where(lam < 0, fill_value, ret)
else:
ret = np.random.poisson(lam, shape)
return np.asarray(ret, dtype=dtype)
def bernoulli(
probs: Union[float, np.ndarray],
*,
logits: Optional[Union[float, np.ndarray]] = None,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[str] = None,
dtype: Optional[np.dtype] = None,
seed: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
dtype = dtype if dtype is not None else probs.dtype
if seed is not None:
np.random.seed(seed)
if logits is not None:
probs = np.asarray(ivy.softmax(logits), dtype=dtype)
if not _check_shapes_broadcastable(shape, probs.shape):
shape = probs.shape
return np.asarray(np.random.binomial(1, p=probs, size=shape), dtype=dtype)
| ivy/ivy/functional/backends/numpy/experimental/random.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/experimental/random.py",
"repo_id": "ivy",
"token_count": 1371
} | 22 |
# global
import numpy as np
from typing import Tuple, Optional
from collections import namedtuple
from packaging import version
# local
import ivy
def unique_all(
x: np.ndarray,
/,
*,
axis: Optional[int] = None,
by_value: bool = True,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
Results = namedtuple(
"Results",
["values", "indices", "inverse_indices", "counts"],
)
values, indices, inverse_indices, counts = np.unique(
x,
return_index=True,
return_counts=True,
return_inverse=True,
axis=axis,
)
nan_count = np.sum(np.isnan(x)).item()
if (nan_count > 1) & (np.sum(np.isnan(values)).item() == 1):
counts[np.where(np.isnan(values))[0]] = 1
counts = np.append(counts, np.full(fill_value=1, shape=(nan_count - 1,)))
values = np.append(
values, np.full(fill_value=np.nan, shape=(nan_count - 1,)), axis=0
)
nan_idx = np.where(np.isnan(x.flatten()))[0]
indices = np.concatenate((indices[:-1], nan_idx), axis=0)
if not by_value:
sort_idx = np.argsort(indices)
values = np.take(values, sort_idx, axis=axis)
counts = np.take(counts, sort_idx)
indices = np.take(indices, sort_idx)
inv_sort_idx = ivy.current_backend().invert_permutation(sort_idx)
inverse_indices = np.vectorize(lambda y: np.take(inv_sort_idx, y))(
inverse_indices
)
return Results(
values.astype(x.dtype),
indices,
inverse_indices,
counts,
)
def unique_counts(
x: np.ndarray,
/,
) -> Tuple[np.ndarray, np.ndarray]:
v, c = np.unique(x, return_counts=True)
nan_count = np.count_nonzero(np.isnan(x))
if nan_count > 1:
nan_idx = np.where(np.isnan(v))
c[nan_idx] = 1
v = np.append(v, np.full(nan_count - 1, np.nan)).astype(x.dtype)
c = np.append(c, np.full(nan_count - 1, 1)).astype("int32")
Results = namedtuple("Results", ["values", "counts"])
return Results(v, c)
def unique_inverse(
x: np.ndarray,
/,
*,
axis: Optional[int] = None,
) -> Tuple[np.ndarray, np.ndarray]:
Results = namedtuple("Results", ["values", "inverse_indices"])
values, inverse_indices = np.unique(x, return_inverse=True, axis=axis)
nan_count = np.count_nonzero(np.isnan(x))
if nan_count > 1:
values = np.append(values, np.full(nan_count - 1, np.nan), axis=axis).astype(
x.dtype
)
inverse_indices = np.reshape(inverse_indices, x.shape)
return Results(values, inverse_indices)
def unique_values(x: np.ndarray, /, *, out: Optional[np.ndarray] = None) -> np.ndarray:
nan_count = np.count_nonzero(np.isnan(x))
if version.parse(np.__version__) >= version.parse("1.21.0") and nan_count > 1:
unique = np.append(
np.unique(x.flatten()), np.full(nan_count - 1, np.nan)
).astype(x.dtype)
else:
unique = np.unique(x.flatten()).astype(x.dtype)
return unique
| ivy/ivy/functional/backends/numpy/set.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/set.py",
"repo_id": "ivy",
"token_count": 1435
} | 23 |
# global
from typing import Optional, Tuple, Union
import math
import paddle
import ivy.functional.backends.paddle as paddle_backend
from paddle.device import core
from ivy.func_wrapper import (
with_supported_dtypes,
with_unsupported_device_and_dtypes,
)
# local
import ivy
from .. import backend_version
# noinspection PyProtectedMember
# Helpers for calculating Window Functions
# ----------------------------------------
# Code from cephes for i0
def _kaiser_window(window_length, beta):
n = paddle.arange(0, window_length)
alpha = (window_length - 1) / 2.0
return paddle_backend.i0(
beta * paddle.sqrt(1 - paddle_backend.divide((n - alpha), alpha) ** 2.0)
) / paddle_backend.i0(beta)
# Array API Standard #
# -------------------#
def kaiser_window(
window_length: int,
periodic: bool = True,
beta: float = 12.0,
*,
dtype: Optional[paddle.dtype] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if window_length < 2:
return paddle.ones([window_length], dtype=dtype)
if periodic is False:
return _kaiser_window(window_length, beta).cast(dtype)
else:
return _kaiser_window(window_length + 1, beta)[:-1].cast(dtype)
def vorbis_window(
window_length: paddle.Tensor,
*,
dtype: Optional[paddle.dtype] = paddle.float32,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if window_length == 0:
return paddle.to_tensor([], dtype=dtype)
i = paddle_backend.arange(1, window_length * 2, 2)
pi = paddle.full(shape=i.shape, fill_value=math.pi)
return paddle.sin((pi / 2) * (paddle.sin(pi * i / (window_length * 2)) ** 2)).cast(
dtype
)
def hann_window(
size: int,
/,
*,
periodic: Optional[bool] = True,
dtype: Optional[paddle.dtype] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if size < 2:
return paddle.ones([size], dtype=dtype)
if periodic:
count = paddle.arange(size) / size
else:
count = paddle.linspace(start=0, stop=size, num=size)
return (0.5 - 0.5 * paddle.cos(2 * math.pi * count)).cast(dtype)
def tril_indices(
n_rows: int,
n_cols: Optional[int] = None,
k: Optional[int] = 0,
/,
*,
device: core.Place = None,
) -> Tuple[paddle.Tensor, ...]:
# special case due to inconsistent behavior when n_cols=1 and n_rows=0
if not (n_cols and n_rows):
return paddle.to_tensor([], dtype="int64"), paddle.to_tensor([], dtype="int64")
return tuple(paddle.tril_indices(n_rows, col=n_cols, offset=k, dtype="int64"))
@with_supported_dtypes(
{"2.4.2 and below": ("float64", "float32", "int32", "int64")},
backend_version,
)
def unsorted_segment_min(
data: paddle.Tensor,
segment_ids: paddle.Tensor,
num_segments: Union[int, paddle.Tensor],
) -> paddle.Tensor:
ivy.utils.assertions.check_unsorted_segment_valid_params(
data, segment_ids, num_segments
)
if data.dtype == paddle.float32:
init_val = 3.4028234663852886e38 # float32 max
elif data.dtype == paddle.float64:
init_val = 1.7976931348623157e308 # float64 max
elif data.dtype == paddle.int32:
init_val = 2147483647
elif data.dtype == paddle.int64:
init_val = 9223372036854775807
else:
raise TypeError("Unsupported data type")
# Using paddle.full is causing integer overflow for int64
res = paddle.empty((num_segments,) + tuple(data.shape[1:]), dtype=data.dtype)
res[:] = init_val
for i in range(num_segments):
mask_index = segment_ids == i
if paddle.any(mask_index):
res[i] = paddle.min(data[mask_index], 0)
return res
def blackman_window(
size: int,
/,
*,
periodic: Optional[bool] = True,
dtype: Optional[paddle.dtype] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if size < 2:
return paddle.ones([size], dtype=dtype)
if periodic:
count = paddle.arange(size) / size
else:
count = paddle.linspace(start=0, stop=size, num=size)
return (
(0.42 - 0.5 * paddle.cos(2 * math.pi * count))
+ (0.08 * paddle.cos(2 * math.pi * 2 * count))
).cast(dtype)
def unsorted_segment_sum(
data: paddle.Tensor,
segment_ids: paddle.Tensor,
num_segments: Union[int, paddle.Tensor],
) -> paddle.Tensor:
# Used the same check which is used for unsorted_segment_min as the
# check should be same
# Might require to change the assertion function name to
# check_unsorted_segment_valid_params
ivy.utils.assertions.check_unsorted_segment_valid_params(
data, segment_ids, num_segments
)
# Sum computation in paddle does not support int32, so needs to
# be converted to float32
needs_conv = False
if data.dtype == paddle.int32:
data = paddle.cast(data, "float32")
needs_conv = True
res = paddle.zeros((num_segments,) + tuple(data.shape[1:]), dtype=data.dtype)
for i in range(num_segments):
mask_index = segment_ids == i
if paddle.any(mask_index):
res[i] = paddle.sum(data[mask_index], axis=0)
# condition for converting float32 back to int32
if needs_conv is True:
res = paddle.cast(res, "int32")
return res
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"uint8",
"complex",
)
}
},
backend_version,
)
def trilu(
x: paddle.Tensor,
/,
*,
k: int = 0,
upper: bool = True,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if upper:
return paddle.triu(x=x, diagonal=k)
return paddle.tril(x=x, diagonal=k)
def mel_weight_matrix(
num_mel_bins: int,
dft_length: int,
sample_rate: int,
lower_edge_hertz: float = 0.0,
upper_edge_hertz: float = 3000.0,
):
n_fft = (dft_length - 1) * 2
mel_mat = paddle.audio.functional.compute_fbank_matrix(
sample_rate,
n_fft,
num_mel_bins,
lower_edge_hertz,
upper_edge_hertz,
)
return paddle.transpose(mel_mat, (1, 0))
def unsorted_segment_mean(
data: paddle.Tensor,
segment_ids: paddle.Tensor,
num_segments: Union[int, paddle.Tensor],
) -> paddle.Tensor:
ivy.utils.assertions.check_unsorted_segment_valid_params(
data, segment_ids, num_segments
)
# Sum computation in paddle does not support int32, so needs to
# be converted to float32
needs_conv = False
if data.dtype == paddle.int32:
data = paddle.cast(data, "float32")
needs_conv = True
res = paddle.zeros((num_segments,) + tuple(data.shape[1:]), dtype=data.dtype)
count = paddle.bincount(segment_ids)
count = paddle.where(count > 0, count, paddle.to_tensor([1], dtype="int32"))
res = unsorted_segment_sum(data, segment_ids, num_segments)
res = res / paddle.reshape(count, (-1, 1))
# condition for converting float32 back to int32
if needs_conv is True:
res = paddle.cast(res, "int32")
return res
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("float16", "int8", "int16", "uint8", "complex", "bool")
}
},
backend_version,
)
def polyval(
coeffs: paddle.Tensor,
x: paddle.Tensor,
) -> paddle.Tensor:
with ivy.PreciseMode(True):
promoted_type = ivy.promote_types(ivy.dtype(coeffs[0]), ivy.dtype(x[0]))
coeffs, x = ivy.promote_types_of_inputs(coeffs, x)
y = paddle.zeros_like(x)
for coeff in coeffs:
y = y * x + coeff
y = paddle.to_tensor(y)
y = y.astype(promoted_type)
return y
| ivy/ivy/functional/backends/paddle/experimental/creation.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/experimental/creation.py",
"repo_id": "ivy",
"token_count": 3384
} | 24 |
# global
from typing import Optional, Union, Tuple, Sequence, Any
import paddle
import ivy.functional.backends.paddle as paddle_backend
import ivy
from copy import deepcopy
# local
from ivy.func_wrapper import (
with_unsupported_device_and_dtypes,
with_supported_dtypes,
)
from . import backend_version
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64", "int32", "int64")},
backend_version,
)
def median(
input: paddle.Tensor,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: Optional[bool] = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if paddle.is_complex(input):
ret = paddle.complex(
paddle.median(input.real(), axis=axis, keepdim=True),
paddle.median(input.imag(), axis=axis, keepdim=True),
)
else:
ret = paddle.median(input, axis=axis, keepdim=True)
# keepdims is set to True because in versions up to 2.6.0
# there was a problem when the axis was defined, and it was the
# only axis in the tensor, so it needs to be handled manually
if not keepdims:
ret = paddle_backend.squeeze(ret, axis=axis)
# The following code is to simulate other frameworks
# output shapes behaviour since min output dim is 1 in paddle
if isinstance(axis, Sequence):
if len(axis) == input.ndim:
axis = None
if (input.ndim == 1 or axis is None) and not keepdims:
ret = ret.squeeze()
return ret.astype(input.dtype)
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64", "int64")}, backend_version
)
def nanmean(
a: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: Optional[bool] = False,
dtype: Optional[paddle.dtype] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
ret_dtype = dtype if dtype is not None else a.dtype
a = a.cast(ret_dtype)
if paddle.is_complex(a):
ret = paddle.complex(
paddle.nanmean(a.real(), axis=axis, keepdim=keepdims),
paddle.nanmean(a.imag(), axis=axis, keepdim=keepdims),
)
else:
ret = paddle.nanmean(a, axis=axis, keepdim=keepdims)
# The following code is to simulate other frameworks
# output shapes behavior since min output dim is 1 in paddle
if isinstance(axis, Sequence):
if len(axis) == a.ndim:
axis = None
if (a.ndim == 1 or axis is None) and not keepdims:
ret = ret.squeeze()
return ret.astype(ret_dtype)
def _infer_dtype(dtype: paddle.dtype):
default_dtype = ivy.infer_default_dtype(dtype)
if ivy.dtype_bits(dtype) < ivy.dtype_bits(default_dtype):
return default_dtype
return dtype
def _validate_quantile(q):
if isinstance(q, float):
q = paddle.to_tensor(q)
if q.ndim == 1 and q.size < 10:
for i in range(q.size):
if not (0.0 <= q[i] <= 1.0):
return False
else:
if not (paddle.all(q >= 0) and paddle.all(q <= 1)):
return False
return True
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"uint8",
"float16",
"bfloat16",
"complex64",
"complex128",
)
}
},
backend_version,
)
def nanmin(
a: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: Optional[bool] = False,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[paddle.Tensor] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
nan_mask = paddle.isnan(a)
if where is not None:
nan_mask = paddle.logical_or(nan_mask, paddle.logical_not(where))
a_copy = a.clone()
a_copy = paddle.where(nan_mask, paddle.full_like(a_copy, float("inf")), a_copy)
if axis is None:
result = paddle.min(a_copy, keepdim=keepdims)
else:
result = paddle.min(a_copy, axis=axis, keepdim=keepdims)
if initial is not None:
initial = paddle.to_tensor(initial, dtype=a.dtype)
result = paddle.minimum(result, initial)
return result
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, backend_version)
def nanprod(
a: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: Optional[bool] = False,
dtype: Optional[paddle.dtype] = None,
out: Optional[paddle.Tensor] = None,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
dtype = ivy.as_native_dtype(dtype)
if dtype is None:
dtype = _infer_dtype(a.dtype)
a = a.cast(dtype)
if initial is None:
initial = 1
a = paddle.nan_to_num(a, nan=1.0)
ret = paddle.prod(a, axis=axis, keepdim=keepdims) * initial
if isinstance(axis, Sequence):
if len(axis) == a.ndim:
axis = None
if (a.ndim == 1 or axis is None) and not keepdims:
ret = ret.squeeze()
return ret.cast(dtype)
def _to_positive_axis(axis, ndim):
if not isinstance(axis, (list, tuple)):
axis = [axis]
if len(axis) == 0:
raise ValueError("Axis can't be empty!")
if len(set(axis)) != len(axis):
raise ValueError("Duplicated axis!")
for i in range(len(axis)):
if not (isinstance(axis[i], int) and (ndim > axis[i] >= -ndim)):
raise ValueError("Axis must be int in range [-rank(x), rank(x))")
if axis[i] < 0:
axis[i] += ndim
return axis
def _handle_axis(a, q, fn, keepdims=False, axis=None, interpolation="nearest"):
nd = a.ndim
axis_arg = deepcopy(axis)
if axis is not None:
axis = _to_positive_axis(axis, nd)
if len(axis) == 1:
axis_arg = axis[0]
else:
keep = set(range(nd)) - set(axis)
nkeep = len(keep)
for i, s in enumerate(sorted(keep)):
a = a.moveaxis(s, i)
a = a.reshape(
a.shape[:nkeep]
+ [
-1,
]
)
axis_arg = -1
ret = fn(a, q, axis=axis_arg, interpolation=interpolation)
if keepdims:
if axis is None:
index_ret = (None,) * nd
else:
index_ret = tuple(None if i in axis else slice(None) for i in range(nd))
ret = ret[(Ellipsis,) + index_ret]
# if keepdims:
# axis = axis if axis is not None else list(range(a.ndim))
# ret = ret.unsqueeze(axis)
return ret
def _quantile(a, q, axis=None, interpolation="nearest"):
if isinstance(q, float):
q = paddle.to_tensor(q)
ret_dtype = a.dtype
if q.ndim > 1:
raise ValueError("q argument must be a scalar or 1-dimensional!")
if axis is None:
axis = 0
a = paddle.flatten(a)
elif axis != 0:
a = a.moveaxis(axis, 0)
axis = 0
n = a.shape[axis]
indices = q * (n - 1)
a = paddle.sort(a, axis)
if interpolation == "lower":
indices = paddle.floor(indices)
elif interpolation == "higher":
indices = paddle.ceil(indices)
elif interpolation == "nearest":
indices = paddle.round(indices)
elif interpolation == "midpoint":
index_floor = paddle.floor(indices)
index_ceil = paddle.ceil(indices)
indices = (index_ceil + index_floor) / 2
indices_below = paddle.floor(indices).astype(paddle.int32)
indices_upper = paddle.ceil(indices).astype(paddle.int32)
weights = indices - indices_below.astype(paddle.float64)
if interpolation == "nearest_jax":
indices_below = paddle.clip(indices_below, 0, n - 1)
indices_upper = paddle.clip(indices_upper, 0, n - 1)
tensor_upper = paddle.gather(a, indices_upper, axis=axis)
tensor_below = paddle.gather(a, indices_below, axis=axis)
pred = weights <= 0.5
out = paddle.where(pred, tensor_below, tensor_upper)
else:
tensor_upper = paddle.gather(a, indices_upper, axis=axis)
tensor_below = paddle.gather(a, indices_below, axis=axis)
out = paddle.lerp(
tensor_below.astype(paddle.float64),
tensor_upper.astype(paddle.float64),
weights.astype(paddle.float64),
)
return out.astype(ret_dtype)
def _compute_quantile_wrapper(
x,
q,
axis=None,
keepdims=False,
interpolation="linear",
):
if not _validate_quantile(q):
raise ValueError("Quantiles must be in the range [0, 1]")
if interpolation not in [
"linear",
"lower",
"higher",
"midpoint",
"nearest",
"nearest_jax",
]:
raise ValueError(
"Interpolation must be 'linear', 'lower', 'higher', 'midpoint' or 'nearest'"
)
return _handle_axis(
x,
q,
_quantile,
keepdims=keepdims,
axis=axis,
interpolation=interpolation,
)
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"uint8",
"float16",
"bfloat16",
"complex64",
"complex128",
)
}
},
backend_version,
)
def quantile(
a: paddle.Tensor,
q: Union[paddle.Tensor, float],
/,
*,
axis: Optional[Union[Sequence[int], int]] = None,
keepdims: Optional[bool] = False,
interpolation: Optional[str] = "linear",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
# added the nearest_jax mode to enable jax-like calculations for method="nearest"
return _compute_quantile_wrapper(
x=a,
q=q,
axis=axis,
keepdims=keepdims,
interpolation=interpolation,
)
def corrcoef(
x: paddle.Tensor,
/,
*,
y: Optional[paddle.Tensor] = None,
rowvar: Optional[bool] = True,
name: Optional[str] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.linalg.corrcoef(
x=x,
rowvar=rowvar,
name=name,
)
def histogram(
a: paddle.Tensor,
/,
*,
bins: Optional[Union[int, paddle.Tensor]] = None,
axis: Optional[int] = None,
extend_lower_interval: Optional[bool] = False,
extend_upper_interval: Optional[bool] = False,
dtype: Optional[paddle.Tensor] = None,
range: Optional[Tuple[float]] = None,
weights: Optional[paddle.Tensor] = None,
density: Optional[bool] = False,
out: Optional[paddle.Tensor] = None,
) -> Tuple[paddle.Tensor]:
if range is None:
min_range = 0
max_range = 0
else:
min_range = range[0]
max_range = range[1]
return paddle.histogram(a, bins=bins, min=min_range, max=max_range)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, backend_version
)
def nanmedian(
input: paddle.Tensor,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: Optional[bool] = False,
dtype: Optional[paddle.dtype] = None,
overwrite_input: Optional[bool] = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if dtype is None:
dtype = input.dtype
return paddle.nanmedian(x=input, axis=axis, keepdim=keepdims)
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"uint8",
"float16",
"bool",
)
}
},
backend_version,
)
def unravel_index(
indices: paddle.Tensor,
shape: Tuple[int],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> Tuple[Any, ...]:
if indices.ndim == 0:
indices = indices.unsqueeze(0)
coord = []
indices = indices
for dim in reversed(shape):
coord.append((indices % dim).astype("int32"))
indices = paddle.floor(indices / dim)
return tuple(reversed(coord))
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"uint8",
"float16",
"float32",
"float64",
"complex64",
"complex128",
"bool",
)
}
},
backend_version,
)
def bincount(
x: paddle.Tensor,
/,
*,
weights: Optional[paddle.Tensor] = None,
minlength: int = 0,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.bincount(x, weights=weights, minlength=minlength).cast(
x.dtype if weights is None else weights.dtype
)
def igamma(
a: paddle.Tensor,
/,
*,
x: paddle.Tensor,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
results = []
ret_dtype = a.dtype if out is None else out.dtype
if paddle.float16 in [a.dtype, x.dtype]:
a = a.astype("float32")
x = x.astype("float32")
for ai, xi in zip(a.flatten(), x.flatten()):
ai = ai.astype("float64")
xi = xi.astype("float64")
def integrand(t):
return paddle.exp(-t) * paddle.pow(t, ai - 1)
intervals = paddle.linspace(0, xi, 10001).astype("float64")
interval_width = xi / 10000
values = integrand(intervals)
integral = paddle.multiply((values[:-1] + values[1:]) / 2, interval_width)
result = paddle.divide(paddle.sum(integral), paddle.exp(paddle.lgamma(ai)))
results.append(result)
return paddle.to_tensor(results, dtype=ret_dtype).reshape(a.shape)
def cov(
x1: paddle.Tensor,
x2: paddle.Tensor = None,
/,
*,
rowVar: bool = True,
bias: bool = False,
ddof: Optional[int] = None,
fweights: Optional[paddle.Tensor] = None,
aweights: Optional[paddle.Tensor] = None,
dtype: Optional[paddle.dtype] = None,
) -> paddle.Tensor:
if fweights is not None:
fweights = fweights.astype("float64")
if aweights is not None:
aweights = aweights.astype("float64")
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be an integer")
if len(x1.shape) > 2:
raise ValueError("x1 has more than 2 dimensions")
if x2 is not None:
if len(x2.shape) > 2:
raise ValueError("x2 has more than 2 dimensions")
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
if dtype is None:
x1 = x1.astype("float64")
if x2 is not None:
x2 = x2.astype("float64")
else:
x1 = x1.astype(dtype)
if x2 is not None:
x2 = x2.astype(dtype)
X = x1
if not rowVar and X.shape[0] != 1:
X = paddle.transpose(X, perm=tuple(range(len(X.shape) - 1, -1, -1)))
if x2 is not None:
if not rowVar and x2.shape[0] != 1:
x2 = paddle.transpose(x2, perm=tuple(range(len(x2.shape) - 1, -1, -1)))
if len(x2.shape) > 1:
X = paddle.concat([X, x2], axis=0)
else:
X = paddle.stack([X, x2], axis=0)
if not rowVar:
X = paddle.transpose(X, perm=tuple(range(len(X.shape) - 1, -1, -1)))
return paddle.linalg.cov(
X, rowvar=rowVar, ddof=ddof, fweights=fweights, aweights=aweights
)
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "bool", "float32", "float64")},
backend_version,
)
def cummax(
x: paddle.Tensor,
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[paddle.dtype] = None,
out: Optional[paddle.Tensor] = None,
) -> Tuple[paddle.Tensor, paddle.Tensor]:
if x.dtype in (paddle.complex128, paddle.complex64):
x = x.real()
if not (exclusive or reverse):
return __find_cummax(x, axis=axis)
elif exclusive and reverse:
x, indices = __find_cummax(ivy.flip(x, axis=(axis,)), axis=axis)
x, indices = ivy.swapaxes(x, axis, -1), ivy.swapaxes(indices, axis, -1)
x = ivy.concat((ivy.zeros_like(x[..., -1:]), x[..., :-1]), axis=-1)
indices = ivy.concat(
(ivy.zeros_like(indices[..., -1:]), indices[..., :-1]), axis=-1
)
x, indices = ivy.swapaxes(x, axis, -1), ivy.swapaxes(indices, axis, -1)
return ivy.flip(x, axis=(axis,)), ivy.flip(indices, axis=(axis,))
elif exclusive:
x = ivy.swapaxes(x, axis, -1)
x = ivy.concat((ivy.zeros_like(x[..., -1:]), x[..., :-1]), axis=-1)
x = ivy.swapaxes(x, axis, -1)
x, indices = __find_cummax(x, axis=axis)
return x, indices
else:
x, indices = __find_cummax(ivy.flip(x, axis=(axis,)), axis=axis)
return ivy.flip(x, axis=axis), ivy.flip(indices, axis=axis)
def __find_cummax(
x: paddle.Tensor, axis: int = 0, dtype: Optional[paddle.dtype] = None
) -> Tuple[paddle.Tensor, paddle.Tensor]:
indices = []
values = []
x_dtype = x.dtype if dtype is None else dtype
if (
isinstance(x.tolist()[0], list)
and len(x[0].shape) >= 1
and (isinstance(x[0], (paddle.Tensor, ivy.Array)))
):
if axis >= 1:
if not isinstance(x, list):
x = x.tolist()
for ret1 in x:
value, indice = __find_cummax(
paddle.to_tensor(ret1, dtype=x_dtype), axis=axis - 1, dtype=x_dtype
)
indices.append(indice)
values.append(value)
else:
x_list = x.numpy()
z_list = __get_index(x_list.tolist())
indices, values, n1 = x_list.copy(), x_list.copy(), {}
indices.fill(0)
values.fill(0)
z_list = sorted(z_list, key=lambda i: i[1])
for y, y_index in z_list:
multi_index = y_index
if tuple(multi_index[1:]) not in n1:
n1[tuple(multi_index[1:])] = multi_index[0]
indices[y_index] = multi_index[0]
values[y_index] = y
elif (
y
>= x_list[
tuple([n1[tuple(multi_index[1:])]] + list(multi_index[1:]))
]
):
n1[tuple(multi_index[1:])] = multi_index[0]
indices[y_index] = multi_index[0]
values[y_index] = y
else:
indices[y_index] = n1[tuple(multi_index[1:])]
values[y_index] = x_list[
tuple([n1[tuple(multi_index[1:])]] + list(multi_index[1:]))
]
else:
if not isinstance(x, list):
x = x.tolist()
n = 0
for idx, y in enumerate(x):
if x[n] > y:
values.append(x[n])
elif x[n] <= y or idx == 0:
n = idx
values.append(y)
indices.append(n)
if isinstance(x, paddle.Tensor):
return paddle.to_tensor(values, dtype=x.dtype), paddle.to_tensor(
indices, dtype="int64"
)
else:
return ivy.array(values, dtype=x_dtype), ivy.array(indices, dtype="int64")
def __get_index(lst, indices=None, prefix=None):
if indices is None:
indices = []
if prefix is None:
prefix = []
if isinstance(lst, list):
for i, sub_lst in enumerate(lst):
sub_indices = prefix + [i]
__get_index(sub_lst, indices, sub_indices)
else:
indices.append((lst, tuple(prefix)))
return indices
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("uint8", "int8", "int16")}},
backend_version,
)
def cummin(
x: paddle.Tensor,
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[paddle.dtype] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
dtype = dtype if dtype is not None else x.dtype
if reverse:
x = paddle.flip(x, axis=[axis])
x_unstacked = paddle.unbind(x, axis=axis)
cummin_x_unstacked = []
cummin_x_unstacked.append(x_unstacked[0])
for i, x_sub in enumerate(x_unstacked[1:]):
cummin_x_sub = paddle.minimum(cummin_x_unstacked[i], x_sub)
cummin_x_unstacked.append(cummin_x_sub)
cummin_x = paddle.stack(cummin_x_unstacked, axis=axis)
if reverse:
cummin_x = paddle.flip(cummin_x, axis=[axis])
return cummin_x.cast(dtype)
| ivy/ivy/functional/backends/paddle/experimental/statistical.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/experimental/statistical.py",
"repo_id": "ivy",
"token_count": 10201
} | 25 |
"""TensorFlow activation functions.
Collection of TensorFlow activation functions, wrapped to fit Ivy syntax
and signature.
"""
from typing import Optional, Union, Literal
# global
import tensorflow as tf
from tensorflow.python.types.core import Tensor
# local
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from . import backend_version
import ivy.functional.backends.tensorflow as tf_backend
def gelu(
x: Tensor,
/,
*,
approximate: bool = False,
complex_mode="jax",
out: Optional[Tensor] = None,
) -> Tensor:
if x.dtype in [tf.complex64, tf.complex128]:
return 0.5 * x * (1 + tf.math.tanh(0.7978845608 * (x + 0.044715 * x * x * x)))
return tf.nn.gelu(x, approximate)
def leaky_relu(
x: Tensor,
/,
*,
alpha: float = 0.2,
complex_mode="jax",
out: Optional[Tensor] = None,
) -> Tensor:
return tf.nn.leaky_relu(x, alpha)
@with_supported_dtypes(
{
"2.15.0 and below": (
"float",
"int",
"complex",
)
},
backend_version,
)
def relu(x: Tensor, /, *, complex_mode="jax", out: Optional[Tensor] = None) -> Tensor:
return tf.nn.relu(x)
def sigmoid(
x: Tensor, /, *, complex_mode="jax", out: Optional[Tensor] = None
) -> Tensor:
return 1 / (1 + tf.exp(-x))
def softmax(
x: Tensor, /, *, axis: Optional[int] = None, out: Optional[Tensor] = None
) -> Tensor:
if axis is None:
axis = -1
dtype = x.dtype
if "complex" in str(dtype):
amax = tf_backend.max(x, axis=axis, keepdims=True)
normalized = tf.exp(tf.subtract(x, amax))
return tf.divide(
normalized, tf.reduce_sum(normalized, axis=axis, keepdims=True)
)
return tf.nn.softmax(x, axis)
@with_supported_dtypes(
{
"2.15.0 and below": (
"float16",
"bfloat16",
"float32",
"float64",
"complex64",
"complex128",
)
},
backend_version,
)
def softplus(
x: Tensor,
/,
*,
beta: Optional[Union[int, float]] = None,
threshold: Optional[Union[int, float]] = None,
complex_mode="jax",
out: Optional[Tensor] = None,
) -> Tensor:
if beta is not None and beta != 1:
x_beta = x * beta
res = (tf.nn.softplus(x_beta)) / beta
else:
x_beta = x
res = tf.nn.softplus(x)
if threshold is not None:
return tf.where(x_beta > threshold, x, res)
return res
# Softsign
@with_supported_dtypes(
{
"2.15.0 and below": (
"float16",
"bfloat16",
"float32",
"float64",
"complex64",
"complex128",
)
},
backend_version,
)
def softsign(x: tf.Tensor, /, out: Optional[tf.Tensor] = None) -> tf.Tensor:
return tf.nn.softsign(x)
def log_softmax(
x: Tensor,
/,
*,
axis: Optional[int] = -1,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[Tensor] = None,
):
if "complex" in str(x.dtype):
x_max = tf_backend.max(x, axis=axis, keepdims=True)
sub_temp = tf.subtract(x, x_max)
ret = tf.reduce_sum(tf.exp(sub_temp), axis=axis, keepdims=True)
ret = tf.math.log(ret)
return tf.subtract(sub_temp, ret)
return tf.nn.log_softmax(x, axis)
def mish(
x: Tensor,
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[Tensor] = None,
) -> Tensor:
if "complex" in str(x.dtype):
x_norm = tf.math.log1p(tf.exp(x))
else:
x_norm = tf.math.softplus(x)
return tf.multiply(x, tf.math.tanh(x_norm))
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def hardswish(
x: Tensor,
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[Tensor] = None,
) -> Tensor:
return x * tf.nn.relu6(x + 3) / 6
| ivy/ivy/functional/backends/tensorflow/activations.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/activations.py",
"repo_id": "ivy",
"token_count": 1905
} | 26 |
import tensorflow as tf
import math
from typing import Optional
from ivy.func_wrapper import (
with_unsupported_dtypes,
with_supported_device_and_dtypes,
)
from . import backend_version
@with_unsupported_dtypes({"2.15.0 and below": "bool"}, backend_version)
def huber_loss(
input: tf.Tensor,
target: tf.Tensor,
/,
*,
delta: Optional[float] = 1.0,
reduction: Optional[str] = "mean",
) -> tf.Tensor:
abs_diff = tf.abs(input - target)
quadratic_loss = 0.5 * (abs_diff**2)
linear_loss = delta * (abs_diff - 0.5 * delta)
loss = tf.where(abs_diff <= delta, quadratic_loss, linear_loss)
if reduction == "sum":
return tf.sum(loss)
elif reduction == "mean":
return tf.mean(loss)
else:
return loss
@with_unsupported_dtypes({"2.15.0 and below": "bool"}, backend_version)
def smooth_l1_loss(
input: tf.Tensor,
target: tf.Tensor,
/,
*,
beta: Optional[float] = 1.0,
reduction: Optional[str] = "mean",
) -> tf.Tensor:
diff = tf.abs(input - target)
loss = tf.where(diff < beta, 0.5 * diff**2 / beta, diff - 0.5 * beta)
if reduction == "mean":
return tf.reduce_mean(loss)
elif reduction == "sum":
return tf.reduce_sum(loss)
else:
return loss
@with_unsupported_dtypes({"2.15.0 and below": "bool"}, backend_version)
def soft_margin_loss(
input: tf.Tensor,
target: tf.Tensor,
/,
*,
reduction: Optional[str] = "mean",
) -> tf.Tensor:
loss = tf.reduce_sum(tf.math.log1p(tf.exp(-input * target))) / tf.size(input)
if reduction == "sum":
return tf.reduce_sum(loss)
elif reduction == "mean":
return tf.reduce_mean(loss)
else:
return loss
def _apply_loss_reduction(loss: tf.Tensor, reduction: str) -> tf.Tensor:
if reduction == "sum":
return tf.math.reduce_sum(loss)
elif reduction == "mean":
return tf.reduce_mean(loss)
else: # reduction == "none"
return loss
def _validate_poisson_nll_params(
input,
label,
epsilon,
reduction,
allowed_dtypes=[tf.float32, tf.float64],
):
# Validate dtypes
for parameter, name in zip([input, label], ["input", "label"]):
if parameter.dtype not in allowed_dtypes:
raise TypeError(
f"The dtype of '{name}' in poisson_nll_loss should be one of"
f" {allowed_dtypes}, but received {parameter.dtype}."
)
# Validate epsilon
if epsilon <= 0:
raise ValueError(
"The value of `epsilon` in poisson_nll_loss should be positive, but"
f" received {epsilon}, which is not allowed."
)
# Validate reduction
if reduction not in ["sum", "mean", "none"]:
raise ValueError(
"The value of 'reduction' in poisson_nll_loss should be 'sum', 'mean' or"
f" 'none', but received {reduction}, which is not allowed."
)
# Validate shape
if input.shape != label.shape:
raise ValueError(
f"The shape of 'input' ({input.shape}) must be the same as the shape of"
f" 'label' ({label.shape})."
)
return True
@with_supported_device_and_dtypes(
{
"2.15.0 and below": {
"cpu": ("float32", "float64"),
"gpu": ("float32", "float64"),
}
},
backend_version,
)
def poisson_nll_loss(
input: tf.Tensor,
target: tf.Tensor,
*,
log_input: bool = True,
full: bool = False,
eps: float = 1e-8,
reduction: str = "mean",
) -> tf.Tensor:
input_tensor = tf.constant(input, dtype=input.dtype)
target_tensor = tf.constant(target, dtype=input.dtype)
_validate_poisson_nll_params(input_tensor, target_tensor, eps, reduction)
if log_input:
loss = tf.math.exp(input_tensor) - target_tensor * input_tensor
else:
loss = input_tensor - target_tensor * tf.math.log(input_tensor + eps)
if full:
point_five = tf.constant(0.5, dtype=target_tensor.dtype)
two_pi = tf.constant(2 * math.pi, dtype=target_tensor.dtype)
stirling_approx = (
(target_tensor * tf.math.log(target_tensor))
- target_tensor
+ (point_five * tf.math.log(two_pi * target_tensor))
)
zeros = tf.zeros_like(target_tensor, dtype=target_tensor.dtype)
ones = tf.ones_like(target_tensor, dtype=target_tensor.dtype)
cond = tf.math.logical_and(target_tensor >= zeros, target_tensor <= ones)
loss = loss + tf.where(cond, zeros, stirling_approx)
return _apply_loss_reduction(loss, reduction)
@with_supported_device_and_dtypes(
{
"2.14.0 and below": {
"cpu": ("float32", "float64"),
"gpu": ("float32", "float64"),
}
},
backend_version,
)
def hinge_embedding_loss(
input: tf.Tensor,
target: tf.Tensor,
*,
margin: float = 1.0,
reduction: str = "mean",
) -> tf.Tensor:
zero_ = tf.zeros([1], dtype=input.dtype)
relu_part = tf.math.maximum(margin - input, 0)
loss = tf.where(tf.equal(target, 1.0), input, zero_) + tf.where(
tf.equal(target, -1.0), relu_part, zero_
)
return _apply_loss_reduction(loss, reduction)
| ivy/ivy/functional/backends/tensorflow/experimental/losses.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/experimental/losses.py",
"repo_id": "ivy",
"token_count": 2364
} | 27 |
"""TensorFlow random functions.
Collection of TensorFlow random functions, wrapped to fit Ivy syntax and
signature.
"""
from typing import Optional, Union, Sequence
# global
import tensorflow as tf
from tensorflow.python.framework.dtypes import DType
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.functional.ivy.random import (
_check_bounds_and_get_shape,
_randint_check_dtype_and_bound,
_check_valid_scale,
)
from . import backend_version
# Extra #
# ------#
@with_supported_dtypes(
{"2.15.0 and below": ("float", "int32", "int64")}, backend_version
)
def random_uniform(
*,
low: Union[float, tf.Tensor, tf.Variable] = 0.0,
high: Union[float, tf.Tensor, tf.Variable] = 1.0,
shape: Optional[Union[ivy.NativeShape, Sequence[int], tf.Tensor]] = None,
dtype: DType,
device: Optional[str] = None,
seed: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
shape = _check_bounds_and_get_shape(low, high, shape).shape
low = tf.cast(low, dtype)
high = tf.cast(high, dtype)
if seed:
tf.random.set_seed(seed)
return tf.random.uniform(shape, low, high, dtype=dtype, seed=seed)
def random_normal(
*,
mean: Union[float, tf.Tensor, tf.Variable] = 0.0,
std: Union[float, tf.Tensor, tf.Variable] = 1.0,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
dtype: DType,
seed: Optional[int] = None,
device: Optional[str] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
_check_valid_scale(std)
shape = _check_bounds_and_get_shape(mean, std, shape).shape
mean = tf.cast(mean, dtype)
std = tf.cast(std, dtype)
if seed:
tf.random.set_seed(seed)
return tf.random.normal(shape, mean, std, dtype=dtype, seed=seed)
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16",)}, backend_version)
def multinomial(
population_size: int,
num_samples: int,
/,
*,
batch_size: int = 1,
probs: Optional[Union[tf.Tensor, tf.Variable]] = None,
replace: bool = True,
device: Optional[str] = None,
seed: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if probs is None:
probs = (
tf.ones(
(
batch_size,
population_size,
)
)
/ population_size
)
# We set the global seed, but not the operation seeds below. In this way, we
# get different results for every random op call but the same sequence for
# every re-run of the program
if seed:
tf.random.set_seed(seed)
if not replace:
orig_probs_shape = list(probs.shape)
probs_flat = tf.reshape(probs, (-1, orig_probs_shape[-1]))
probs_flat = probs_flat / tf.math.reduce_sum(probs_flat, axis=-1, keepdims=True)
probs_stack = tf.split(probs_flat, probs_flat.shape[0])
samples_stack = []
for prob in probs_stack:
logits = tf.dtypes.cast(tf.math.log(prob), tf.float64)
# Gumbel-max trick
# https://github.com/tensorflow/tensorflow/issues/9260
z = tf.dtypes.cast(
-tf.math.log(-tf.math.log(tf.random.uniform(tf.shape(logits), 0, 1))),
tf.float64,
)
_, indices = tf.nn.top_k(logits + z, k=num_samples)
samples_stack.append(indices)
samples_flat = tf.stack(samples_stack)
return tf.convert_to_tensor(
tf.reshape(samples_flat, orig_probs_shape[:-1] + [num_samples])
)
else:
if len(probs.numpy().shape) == 1:
probs = tf.expand_dims(probs, axis=0)
return tf.random.categorical(tf.math.log(probs), num_samples)
def randint(
low: Union[float, tf.Tensor, tf.Variable],
high: Union[float, tf.Tensor, tf.Variable],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[str] = None,
dtype: Optional[Union[DType, ivy.Dtype]] = None,
seed: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if not dtype:
dtype = ivy.default_int_dtype()
dtype = ivy.as_native_dtype(dtype)
_randint_check_dtype_and_bound(low, high, dtype)
shape = _check_bounds_and_get_shape(low, high, shape).shape
low = tf.cast(low, "float32")
high = tf.cast(high, "float32")
if seed:
tf.random.set_seed(seed)
return tf.cast(tf.random.uniform(shape, low, high, "float32", seed=seed), dtype)
def seed(*, seed_value: int = 0):
tf.random.set_seed(seed_value)
return
def shuffle(
x: Union[tf.Tensor, tf.Variable],
axis: Optional[int] = 0,
/,
*,
seed: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if seed:
tf.random.set_seed(seed)
return tf.random.shuffle(x, seed=seed)
| ivy/ivy/functional/backends/tensorflow/random.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/random.py",
"repo_id": "ivy",
"token_count": 2289
} | 28 |
"""Collection of PyTorch general functions, wrapped to fit Ivy syntax and
signature."""
import inspect
# global
import os
import importlib
import torch
from typing import Optional, Union
from torch.profiler import ProfilerActivity
from torch.profiler import profile
# local
import ivy
from ivy.functional.ivy.device import (
_shift_native_arrays_on_default_device,
Profiler as BaseProfiler,
)
torch_scatter = None
# API #
# ----#
def dev(
x: torch.Tensor, /, *, as_native: bool = False
) -> Union[ivy.Device, torch.device]:
dv = x.device
if as_native:
if isinstance(dv, torch.device):
dv = dv.type
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
return torch.device(dv.replace("gpu", "mps"))
return torch.device(dv.replace("gpu", "cuda"))
return as_ivy_dev(dv)
def to_device(
x: torch.Tensor,
device: torch.device,
/,
*,
stream: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if device is None:
return x
ret = x.to(as_native_dev(device))
if isinstance(x, torch.nn.Parameter):
return torch.nn.Parameter(ret)
return ret
def as_ivy_dev(device: torch.device, /):
if isinstance(device, str):
return ivy.Device(device)
dev_type, dev_idx = (device.type, device.index)
if dev_type == "cpu":
return ivy.Device(dev_type)
elif dev_type == "mps":
return ivy.Device(
dev_type.replace("mps", "gpu")
+ (":" + (str(dev_idx) if dev_idx is not None else "0"))
)
return ivy.Device(
dev_type.replace("cuda", "gpu")
+ (":" + (str(dev_idx) if dev_idx is not None else "0"))
)
def as_native_dev(
device: Optional[Union[ivy.Device, torch.device]] = None,
/,
) -> Optional[torch.device]:
if not isinstance(device, str):
return device
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
return torch.device(ivy.Device(device).replace("gpu", "mps"))
return torch.device(ivy.Device(device).replace("gpu", "cuda"))
def clear_cached_mem_on_dev(device: Union[ivy.Device, torch.device], /) -> None:
torch_dev = as_native_dev(device)
if torch_dev.type == "cuda":
torch.cuda.empty_cache()
elif torch_dev.type == "mps":
from torch import mps
mps.empty_cache()
def num_gpus() -> int:
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
return 1
return torch.cuda.device_count()
def gpu_is_available() -> bool:
return (
hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
) or torch.cuda.is_available()
# noinspection PyUnresolvedReferences
def tpu_is_available() -> bool:
if importlib.util.find_spec("torch_xla") is not None:
return True
return False
def handle_soft_device_variable(*args, fn, **kwargs):
args, kwargs, device_shifting_dev = _shift_native_arrays_on_default_device(
*args, **kwargs
)
# checking if this function accepts `device` argument
# must be handled in the backend
if "device" in inspect.signature(fn).parameters:
kwargs["device"] = device_shifting_dev
return fn(*args, **kwargs)
class Profiler(BaseProfiler):
def __init__(self, save_dir: str):
super().__init__(save_dir)
self._prof = profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], with_stack=True
)
def start(self):
self._prof.__enter__()
def stop(self):
self._prof.__exit__(None, None, None)
self._prof.export_chrome_trace(os.path.join(self._save_dir, "trace.json"))
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
| ivy/ivy/functional/backends/torch/device.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/device.py",
"repo_id": "ivy",
"token_count": 1622
} | 29 |
# global
from typing import Optional, Union, Sequence
import torch
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from .. import backend_version
from ivy.functional.ivy.random import (
_check_bounds_and_get_shape,
_check_shapes_broadcastable,
)
# dirichlet
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def dirichlet(
alpha: Union[torch.tensor, float, Sequence[float]],
/,
*,
size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
out: Optional[torch.Tensor] = None,
seed: Optional[int] = None,
dtype: Optional[torch.dtype] = None,
) -> torch.Tensor:
size = size if size is not None else len(alpha)
if seed is not None:
torch.manual_seed(seed)
return torch.tensor(
torch.distributions.dirichlet.Dirichlet(alpha).rsample(sample_shape=size),
dtype=dtype,
)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, backend_version)
def beta(
alpha: Union[float, torch.Tensor],
beta: Union[float, torch.Tensor],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
dtype: Optional[Union[torch.dtype, ivy.Dtype]] = None,
device: torch.device = None,
seed: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
shape = _check_bounds_and_get_shape(alpha, beta, shape).shape
if seed is not None:
torch.manual_seed(seed)
ret = torch.distributions.beta.Beta(alpha, beta).sample(shape)
if device is not None:
return ret.to(device)
return ret
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, backend_version)
def gamma(
alpha: Union[float, torch.Tensor],
beta: Union[float, torch.Tensor],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
dtype: Optional[Union[torch.dtype, ivy.Dtype]] = None,
device: torch.device = None,
seed: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
shape = _check_bounds_and_get_shape(alpha, beta, shape).shape
if seed is not None:
torch.manual_seed(seed)
ret = torch.distributions.gamma.Gamma(alpha, beta).sample(shape)
if device is not None:
return ret.to(device)
return ret
def _poisson_with_neg_lam(lam, fill_value, device, dtype):
if torch.any(lam < 0):
pos_lam = torch.where(lam < 0, 0, lam)
ret = torch.poisson(pos_lam).type(dtype).to(device)
ret = torch.where(lam < 0, fill_value, ret)
else:
ret = torch.poisson(lam).type(dtype).to(device)
return ret
def poisson(
lam: Union[float, torch.Tensor],
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: torch.device = None,
dtype: torch.dtype,
seed: Optional[int] = None,
fill_value: Optional[Union[float, int]] = 0,
out: Optional[torch.Tensor] = None,
):
lam = torch.tensor(lam, device=device, dtype=torch.float32)
if seed:
torch.manual_seed(seed)
if shape is None:
return _poisson_with_neg_lam(lam, fill_value, device, dtype)
shape = torch.tensor(shape, device=device, dtype=torch.int32)
list_shape = shape.tolist()
_check_shapes_broadcastable(lam.shape, list_shape)
lam = torch.broadcast_to(lam, list_shape)
return _poisson_with_neg_lam(lam, fill_value, device, dtype)
def bernoulli(
probs: Union[float, torch.Tensor],
*,
logits: Union[float, torch.Tensor] = None,
shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None,
device: torch.device = None,
dtype: torch.dtype,
seed: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
dtype = dtype if dtype is not None else probs.dtype
if seed:
torch.manual_seed(seed)
if logits is not None:
probs = torch.nn.functional.softmax(logits, -1)
if not _check_shapes_broadcastable(shape, probs.shape):
shape = probs.shape
return torch.bernoulli(probs, out=out).to(device, dtype).broadcast_to(shape)
bernoulli.support_native_out = True
| ivy/ivy/functional/backends/torch/experimental/random.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/experimental/random.py",
"repo_id": "ivy",
"token_count": 1687
} | 30 |
# global
import torch
from typing import Tuple, Optional
from collections import namedtuple
# local
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
import ivy
@with_unsupported_dtypes(
{
"2.2 and below": ("complex", "float16"),
},
backend_version,
)
def unique_all(
x: torch.Tensor,
/,
*,
axis: Optional[int] = None,
by_value: bool = True,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
Results = namedtuple(
"Results",
["values", "indices", "inverse_indices", "counts"],
)
if axis is None:
x = torch.flatten(x)
axis = 0
values, inverse_indices, counts = torch.unique(
x,
sorted=True,
return_inverse=True,
return_counts=True,
dim=axis,
)
unique_nan = torch.isnan(values)
idx_dtype = inverse_indices.dtype
if torch.any(unique_nan):
nan_index = torch.where(torch.isnan(x))
non_nan_index = [
x.tolist().index(val) for val in values if not torch.isnan(val)
]
indices = values.clone().to(idx_dtype)
indices[unique_nan] = nan_index[0]
inverse_indices[torch.isnan(x)] = torch.where(unique_nan)[0][0]
counts[unique_nan] = 1
indices[~unique_nan] = torch.tensor(non_nan_index, dtype=idx_dtype)
else:
decimals = torch.arange(inverse_indices.numel()) / inverse_indices.numel()
inv_sorted = (inverse_indices + decimals).argsort()
tot_counts = torch.cat((counts.new_zeros(1), counts.cumsum(dim=0)))[:-1]
indices = inv_sorted[tot_counts].to(idx_dtype)
if not by_value:
sort_idx = torch.argsort(indices)
else:
values_ = torch.moveaxis(values, axis, 0)
values_ = torch.reshape(values_, (values_.shape[0], -1))
sort_idx = torch.tensor(
[i[0] for i in sorted(enumerate(values_), key=lambda x: tuple(x[1]))]
)
ivy_torch = ivy.current_backend()
values = values.index_select(dim=axis, index=sort_idx)
counts = ivy_torch.gather(counts, sort_idx)
indices = ivy_torch.gather(indices, sort_idx)
inv_sort_idx = ivy_torch.invert_permutation(sort_idx)
inverse_indices = torch.vmap(lambda y: torch.gather(inv_sort_idx, 0, y))(
inverse_indices
)
return Results(
values.to(x.dtype),
indices,
inverse_indices,
counts,
)
@with_unsupported_dtypes(
{
"2.2 and below": ("float16",),
},
backend_version,
)
def unique_counts(x: torch.Tensor, /) -> Tuple[torch.Tensor, torch.Tensor]:
v, c = torch.unique(torch.reshape(x, [-1]), return_counts=True)
nan_idx = torch.where(torch.isnan(v))
c[nan_idx] = 1
Results = namedtuple("Results", ["values", "counts"])
return Results(v, c)
@with_unsupported_dtypes(
{
"2.2 and below": ("float16",),
},
backend_version,
)
def unique_inverse(
x: torch.Tensor,
/,
*,
axis: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
Results = namedtuple("Results", ["values", "inverse_indices"])
if axis is None:
x = torch.flatten(x)
axis = 0
values, inverse_indices = torch.unique(x, return_inverse=True, dim=axis)
nan_idx = torch.isnan(x)
if nan_idx.any():
inverse_indices[nan_idx] = torch.where(torch.isnan(values))[0][0]
inverse_indices = inverse_indices.reshape(x.shape)
return Results(values, inverse_indices)
@with_unsupported_dtypes(
{
"2.2 and below": ("float16", "complex"),
},
backend_version,
)
def unique_values(
x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None
) -> torch.Tensor:
return torch.unique(x)
| ivy/ivy/functional/backends/torch/set.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/set.py",
"repo_id": "ivy",
"token_count": 1721
} | 31 |
def tree_leaves(tree, is_leaf=None):
# todo: is_leaf
if isinstance(tree, (tuple, list)):
new_struc = []
for child in tree:
new_struc += tree_leaves(child)
return new_struc
elif isinstance(tree, dict):
new_struc = []
for key in sorted(tree):
new_struc += tree_leaves(tree[key])
return new_struc
return [tree]
def tree_map(f, tree, *rest, is_leaf=None):
# todo: is_leaf
is_tuple = isinstance(tree, tuple)
if is_tuple:
tree = list(tree)
if isinstance(tree, list):
for idx, elem in enumerate(tree):
curr_r = [r[idx] for r in rest] if rest else []
tree[idx] = tree_map(f, tree[idx], *curr_r, is_leaf=is_leaf)
return tuple(tree) if is_tuple else tree
elif isinstance(tree, dict):
for key in sorted(tree):
curr_r = [r[key] for r in rest] if rest else []
tree[key] = tree_map(f, tree[key], *curr_r, is_leaf=is_leaf)
return tree
return f(tree, *rest) if rest else f(tree)
| ivy/ivy/functional/frontends/jax/_src/tree_util.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/_src/tree_util.py",
"repo_id": "ivy",
"token_count": 523
} | 32 |
# local
import ivy
from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_unsupported_dtypes
@to_ivy_arrays_and_back
def fft(a, n=None, axis=-1, norm=None):
if norm is None:
norm = "backward"
return ivy.fft(a, axis, norm=norm, n=n)
@to_ivy_arrays_and_back
def fft2(a, s=None, axes=(-2, -1), norm=None):
if norm is None:
norm = "backward"
return ivy.array(ivy.fft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
def fftfreq(n, d=1.0, *, dtype=None):
if not isinstance(
n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))
):
raise TypeError("n should be an integer")
dtype = ivy.float64 if dtype is None else ivy.as_ivy_dtype(dtype)
N = (n - 1) // 2 + 1
val = 1.0 / (n * d)
results = ivy.zeros((n,), dtype=dtype)
results[:N] = ivy.arange(0, N, dtype=dtype)
results[N:] = ivy.arange(-(n // 2), 0, dtype=dtype)
return results * val
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
def fftshift(x, axes=None, name=None):
shape = x.shape
if axes is None:
axes = tuple(range(x.ndim))
shifts = [(dim // 2) for dim in shape]
elif isinstance(axes, int):
shifts = shape[axes] // 2
else:
shifts = [shape[ax] // 2 for ax in axes]
roll = ivy.roll(x, shifts, axis=axes)
return roll
@to_ivy_arrays_and_back
def ifft(a, n=None, axis=-1, norm=None):
if norm is None:
norm = "backward"
return ivy.ifft(a, axis, norm=norm, n=n)
@to_ivy_arrays_and_back
def ifft2(a, s=None, axes=(-2, -1), norm=None):
if norm is None:
norm = "backward"
return ivy.array(ivy.ifft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"1.25.2 and below": ("float16", "bfloat16")}, "numpy")
def rfft(a, n=None, axis=-1, norm=None):
if n is None:
n = a.shape[axis]
if norm is None:
norm = "backward"
result = ivy.dft(
a, axis=axis, inverse=False, onesided=False, dft_length=n, norm=norm
)
slices = [slice(0, a) for a in result.shape]
slices[axis] = slice(0, int(ivy.shape(result, as_array=True)[axis] // 2 + 1))
result = result[tuple(slices)]
return result
| ivy/ivy/functional/frontends/jax/numpy/fft.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/numpy/fft.py",
"repo_id": "ivy",
"token_count": 1154
} | 33 |
from . import nn_func
from .nn_func import *
| ivy/ivy/functional/frontends/mindspore/ops/function/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mindspore/ops/function/__init__.py",
"repo_id": "ivy",
"token_count": 16
} | 34 |
# local
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
class broadcast:
@to_ivy_arrays_and_back
def __init__(self, *args):
data = ivy.broadcast_arrays(*map(ivy.array, args))
self._shape = data[0].shape
self._ndim = data[0].ndim
self._index = 0
self._numiter = len(data)
self._size = data[0].size
self._data = (*zip(*(ivy.flatten(i) for i in data)),)
self._iters = tuple(iter(ivy.flatten(i)) for i in data)
@property
def shape(self):
return self._shape
@property
def ndim(self):
return self._ndim
@property
def nd(self):
return self._ndim
@property
def numiter(self):
return self._numiter
@property
def size(self):
return self._size
@property
def iters(self):
return self._iters
@property
def index(self):
return self._index
def __next__(self):
if self.index < self.size:
self._index += 1
return self._data[self.index - 1]
raise StopIteration
def __iter__(self):
return self
def reset(self):
self._index = 0
| ivy/ivy/functional/frontends/numpy/broadcast/methods.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/broadcast/methods.py",
"repo_id": "ivy",
"token_count": 561
} | 35 |
from . import generating_index_arrays
from .generating_index_arrays import *
from . import indexing_like_operations
from .indexing_like_operations import *
from . import inserting_data_into_arrays
from .inserting_data_into_arrays import *
from . import lib
| ivy/ivy/functional/frontends/numpy/indexing_routines/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/indexing_routines/__init__.py",
"repo_id": "ivy",
"token_count": 77
} | 36 |
# global
import ivy
# local
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
inputs_to_ivy_arrays,
handle_numpy_casting,
handle_numpy_dtype,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
# --- Helpers --- #
# --------------- #
@handle_numpy_out
@to_ivy_arrays_and_back
@handle_numpy_dtype
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _equal(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.equal(x1, x2, out=out)
if ivy.is_array(where):
where = ivy.asarray(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@to_ivy_arrays_and_back
@handle_numpy_dtype
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _greater(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.greater(x1, x2, out=out)
if ivy.is_array(where):
where = ivy.asarray(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@to_ivy_arrays_and_back
@handle_numpy_dtype
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _greater_equal(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.greater_equal(x1, x2, out=out)
if ivy.is_array(where):
where = ivy.asarray(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@to_ivy_arrays_and_back
@handle_numpy_dtype
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _less(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.less(x1, x2, out=out)
if ivy.is_array(where):
where = ivy.asarray(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@to_ivy_arrays_and_back
@handle_numpy_dtype
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _less_equal(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.less_equal(x1, x2, out=out)
if ivy.is_array(where):
where = ivy.asarray(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@to_ivy_arrays_and_back
@handle_numpy_dtype
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _not_equal(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.not_equal(x1, x2, out=out)
if ivy.is_array(where):
where = ivy.asarray(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def array_equal(a1, a2, equal_nan=False):
if not equal_nan:
return ivy.array(ivy.array_equal(a1, a2))
a1nan, a2nan = ivy.isnan(a1), ivy.isnan(a2)
if not (a1nan == a2nan).all():
return False
return ivy.array(ivy.array_equal(a1 * ~a1nan, a2 * ~a2nan))
@inputs_to_ivy_arrays
@from_zero_dim_arrays_to_scalar
def array_equiv(a1, a2):
if len(ivy.shape(a1)) < len(ivy.shape(a2)):
a1 = ivy.broadcast_to(a1, ivy.shape(a2))
else:
a2 = ivy.broadcast_to(a2, ivy.shape(a1))
return ivy.array_equal(a1, a2)
| ivy/ivy/functional/frontends/numpy/logic/comparison.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/logic/comparison.py",
"repo_id": "ivy",
"token_count": 1965
} | 37 |
# local
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_casting,
handle_numpy_dtype,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
import ivy.functional.frontends.numpy as np_frontend
@to_ivy_arrays_and_back
def column_stack(tup):
out_dtype = ivy.dtype(tup[0])
for i in tup:
out_dtype = ivy.as_ivy_dtype(
np_frontend.promote_numpy_dtypes(i.dtype, out_dtype)
)
return ivy.column_stack(tup)
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def concatenate(arrays, /, *, axis=0, out=None, dtype=None, casting="same_kind"):
if dtype is not None:
out_dtype = ivy.as_ivy_dtype(dtype)
else:
out_dtype = ivy.dtype(arrays[0])
for i in arrays:
out_dtype = ivy.as_ivy_dtype(
np_frontend.promote_numpy_dtypes(i.dtype, out_dtype)
)
return ivy.concat(arrays, axis=axis, out=out).astype(out_dtype, copy=False)
@to_ivy_arrays_and_back
def hstack(tup):
out_dtype = ivy.dtype(tup[0])
for i in tup:
out_dtype = ivy.as_ivy_dtype(
np_frontend.promote_numpy_dtypes(i.dtype, out_dtype)
)
return ivy.hstack(tup)
@handle_numpy_out
@to_ivy_arrays_and_back
def stack(arrays, /, *, axis=0, out=None):
out_dtype = ivy.dtype(arrays[0])
for i in arrays:
out_dtype = ivy.as_ivy_dtype(
np_frontend.promote_numpy_dtypes(i.dtype, out_dtype)
)
return ivy.stack(arrays, axis=axis, out=out).astype(out_dtype, copy=False)
@to_ivy_arrays_and_back
def vstack(tup):
out_dtype = ivy.dtype(tup[0])
for i in tup:
out_dtype = ivy.as_ivy_dtype(
np_frontend.promote_numpy_dtypes(i.dtype, out_dtype)
)
return ivy.vstack(tup)
row_stack = vstack
| ivy/ivy/functional/frontends/numpy/manipulation_routines/joining_arrays.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/manipulation_routines/joining_arrays.py",
"repo_id": "ivy",
"token_count": 984
} | 38 |
# global
import ivy
# local
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_casting,
handle_numpy_dtype,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
# --- Helpers --- #
# --------------- #
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _ceil(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.ceil(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _floor(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.floor(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _rint(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.round(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, x), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _trunc(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.trunc(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
# --- Main --- #
# ------------ #
@handle_numpy_out
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def around(a, decimals=0, out=None):
return ivy.round(a, decimals=decimals, out=out)
@handle_numpy_out
@to_ivy_arrays_and_back
def fix(
x,
/,
out=None,
):
where = ivy.greater_equal(x, 0)
return ivy.where(where, ivy.floor(x, out=out), ivy.ceil(x, out=out), out=out)
@handle_numpy_out
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def round(a, decimals=0, out=None):
return ivy.round(a, decimals=decimals, out=out)
| ivy/ivy/functional/frontends/numpy/mathematical_functions/rounding.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/mathematical_functions/rounding.py",
"repo_id": "ivy",
"token_count": 1218
} | 39 |
# global
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
from_zero_dim_arrays_to_scalar,
)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def count_nonzero(a, axis=None, *, keepdims=False):
x = ivy.array(a)
zero = ivy.zeros(ivy.shape(x), dtype=x.dtype)
return ivy.sum(
ivy.astype(ivy.not_equal(x, zero), ivy.int64),
axis=axis,
keepdims=keepdims,
)
| ivy/ivy/functional/frontends/numpy/sorting_searching_counting/counting.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/sorting_searching_counting/counting.py",
"repo_id": "ivy",
"token_count": 227
} | 40 |
# global
# local
import ivy
# import ivy.functional.frontends.onnx as onnx_frontend
class Tensor:
def __init__(self, array):
self._ivy_array = (
ivy.array(array) if not isinstance(array, ivy.Array) else array
)
def __len__(self):
return len(self._ivy_array)
def __repr__(self):
return str(self.ivy_array.__repr__()).replace(
"ivy.array", "ivy.frontends.onnx.Tensor"
)
# Properties #
# ---------- #
@property
def ivy_array(self):
return self._ivy_array
@property
def device(self):
return self.ivy_array.device
@property
def dtype(self):
return self.ivy_array.dtype
@property
def shape(self):
return self.ivy_array.shape
@property
def ndim(self):
return self.ivy_array.ndim
# Setters #
# --------#
@ivy_array.setter
def ivy_array(self, array):
self._ivy_array = (
ivy.array(array) if not isinstance(array, ivy.Array) else array
)
| ivy/ivy/functional/frontends/onnx/tensor.py/0 | {
"file_path": "ivy/ivy/functional/frontends/onnx/tensor.py",
"repo_id": "ivy",
"token_count": 497
} | 41 |
# global
from ..random import * # noqa: F401
import ivy
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
# NOTE:
# Only inplace functions are to be added in this file.
# Please add non-inplace counterparts to `/frontends/paddle/random.py`.
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def exponential_(x, lam=1.0, name=None):
return ivy.multiply(lam, ivy.exp(ivy.multiply(-lam, x)))
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
x = ivy.array(x)
return ivy.random_uniform(
low=min, high=max, shape=x.shape, dtype=x.dtype, seed=seed
)
| ivy/ivy/functional/frontends/paddle/tensor/random.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/tensor/random.py",
"repo_id": "ivy",
"token_count": 358
} | 42 |
from .constants import *
| ivy/ivy/functional/frontends/scipy/constants/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/constants/__init__.py",
"repo_id": "ivy",
"token_count": 7
} | 43 |
from .odr import *
| ivy/ivy/functional/frontends/scipy/odr/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/odr/__init__.py",
"repo_id": "ivy",
"token_count": 7
} | 44 |
from . import _label
from ._label import *
| ivy/ivy/functional/frontends/sklearn/preprocessing/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/preprocessing/__init__.py",
"repo_id": "ivy",
"token_count": 12
} | 45 |
# local
import ivy
import ivy.functional.frontends.tensorflow as tf_frontend
import ivy.functional.frontends.numpy as np_frontend
from ivy.functional.frontends.tensorflow.func_wrapper import (
to_ivy_arrays_and_back,
handle_tf_dtype,
)
class DType:
def __init__(self, dtype_int):
self._ivy_dtype = tf_frontend.tensorflow_enum_to_type[dtype_int]
def __repr__(self):
return "ivy.frontends.tensorflow." + self._ivy_dtype
@property
def ivy_dtype(self):
return self._ivy_dtype
@property
def as_datatype_enum(self):
return tf_frontend.tensorflow_type_to_enum[self._ivy_dtype]
@property
def as_numpy_dtype(self):
return np_frontend.dtype(self._ivy_dtype)
@property
def base_dtype(self):
return self
@property
def is_bool(self):
return self._ivy_dtype.is_bool_dtype
@property
def is_complex(self):
return "complex" in self._ivy_dtype
@property
def is_floating(self):
return self._ivy_dtype.is_float_dtype
@property
def is_integer(self):
return self._ivy_dtype.is_int_dtype
@property
def is_numpy_compatible(self):
return self._ivy_dtype in np_frontend.numpy_type_to_str_and_num_table
@property
def is_unsigned(self):
return self._ivy_dtype.is_uint_dtype
@property
def limits(self):
if self._ivy_dtype is ivy.bool:
return False, True
if self._ivy_dtype.is_int_dtype:
return 0, self._ivy_dtype.info.max
if self._ivy_dtype.is_float_dtype:
return 0, 1
else:
raise ivy.utils.exceptions.IvyException(
f"{self._ivy_dtype} does not have defined limits"
)
@property
def max(self):
if self._ivy_dtype in (ivy.bool, ivy.complex128, ivy.complex64):
raise ivy.utils.exceptions.IvyException(
f"Cannot find maximum value of {self._ivy_dtype}"
)
if self._ivy_dtype is ivy.bfloat16:
return float.fromhex("0x1.FEp127")
return self._ivy_dtype.info.max
@property
def min(self):
if self._ivy_dtype in (ivy.bool, ivy.complex128, ivy.complex64):
raise ivy.utils.exceptions.IvyException(
f"Cannot find maximum value of {self._ivy_dtype}"
)
if self._ivy_dtype is ivy.bfloat16:
return float.fromhex("-0x1.FEp127")
return self._ivy_dtype.info.min
@property
def real_dtype(self):
if self._ivy_dtype is ivy.complex64:
return DType(1)
if self._ivy_dtype is ivy.complex128:
return DType(2)
else:
return self
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, DType):
try:
other = as_dtype(other)
except ivy.utils.exceptions.IvyException:
return False
return self._ivy_dtype == other._ivy_dtype
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(repr(self))
def as_dtype(type_value):
if isinstance(type_value, DType):
return type_value
if ivy.is_native_dtype(type_value):
return DType(tf_frontend.tensorflow_type_to_enum[ivy.as_ivy_dtype(type_value)])
if type_value in tf_frontend.tensorflow_enum_to_type:
return DType(type_value)
if type_value in tf_frontend.tensorflow_type_to_enum:
return DType(tf_frontend.tensorflow_type_to_enum[type_value])
if type_value is float:
return DType(1)
if type_value is bool:
return DType(10)
if isinstance(type_value, np_frontend.dtype):
return DType(tf_frontend.tensorflow_type_to_enum[type_value.ivy_dtype])
if issubclass(type_value, np_frontend.generic):
return DType(
tf_frontend.tensorflow_type_to_enum[
np_frontend.numpy_scalar_to_dtype[type_value]
]
)
raise ivy.utils.exceptions.IvyException(
f"Cannot convert the argument 'type_value': {type_value!r} "
"to a TensorFlow Dtype"
)
@handle_tf_dtype
@to_ivy_arrays_and_back
def cast(x, dtype, name=None):
return ivy.astype(x, dtype, copy=False)
| ivy/ivy/functional/frontends/tensorflow/dtypes.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/dtypes.py",
"repo_id": "ivy",
"token_count": 2100
} | 46 |
import ivy
from ivy.functional.frontends.torch.func_wrapper import (
to_ivy_arrays_and_back,
outputs_to_native_arrays,
)
from ivy.func_wrapper import outputs_to_ivy_arrays
def vmap(func, in_dims=0, out_dims=0, randomness="error", *, chunk_size=None):
fun = outputs_to_native_arrays(func)
return to_ivy_arrays_and_back(
outputs_to_ivy_arrays(ivy.vmap(fun, in_axes=in_dims, out_axes=out_dims))
)
| ivy/ivy/functional/frontends/torch/func.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/func.py",
"repo_id": "ivy",
"token_count": 191
} | 47 |
# global
from functools import reduce
# local
import ivy
from ivy import with_unsupported_dtypes
from ivy.functional.frontends.torch.func_wrapper import (
to_ivy_arrays_and_back,
)
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"float16",
)
},
"torch",
)
@to_ivy_arrays_and_back
def adaptive_avg_pool1d(input, output_size):
return ivy.adaptive_avg_pool1d(input, output_size)
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
"torch",
)
@to_ivy_arrays_and_back
def adaptive_avg_pool2d(input, output_size):
return ivy.adaptive_avg_pool2d(input, output_size, data_format="NCHW")
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"float16",
)
},
"torch",
)
@to_ivy_arrays_and_back
def adaptive_max_pool2d(
input,
output_size,
return_indices=False,
):
# ToDo: Add return_indices once superset is implemented
return ivy.adaptive_max_pool2d(input, output_size)
@with_unsupported_dtypes(
{
"2.2 and below": (
"bfloat16",
"float16",
)
},
"torch",
)
@to_ivy_arrays_and_back
def adaptive_max_pool3d(
input,
output_size,
return_indices=False,
):
return ivy.adaptive_max_pool3d(input, output_size)
@with_unsupported_dtypes(
{"2.2 and below": ("float16",)},
"torch",
)
@to_ivy_arrays_and_back
def avg_pool1d(
input,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
):
if not isinstance(padding, int):
padding = [(pad, pad) for pad in padding]
return ivy.avg_pool1d(
input,
kernel_size,
stride if stride is not None else kernel_size,
padding,
data_format="NCW",
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
)
@with_unsupported_dtypes(
{"2.2 and below": ("float16",)},
"torch",
)
@to_ivy_arrays_and_back
def avg_pool2d(
input,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
):
if not isinstance(padding, int):
padding = [(pad, pad) for pad in padding]
return ivy.avg_pool2d(
input,
kernel_size,
stride if stride is not None else kernel_size,
padding,
data_format="NCHW",
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
divisor_override=divisor_override,
)
@with_unsupported_dtypes(
{"2.2 and below": ("float16", "bfloat16")},
"torch",
)
@to_ivy_arrays_and_back
def avg_pool3d(
input,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
):
if not isinstance(padding, int):
padding = [(pad, pad) for pad in padding]
return ivy.avg_pool3d(
input,
kernel_size,
stride if stride is not None else kernel_size,
padding,
data_format="NCDHW",
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
divisor_override=divisor_override,
)
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
"torch",
)
@to_ivy_arrays_and_back
def lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
data_format = "NCW"
padding = "VALID"
if stride is None:
stride = kernel_size
if not isinstance(kernel_size, int):
kernel_mul = reduce(lambda x, y: x * y, kernel_size)
else:
kernel_mul = kernel_size
out = ivy.avg_pool1d(
ivy.pow(input, norm_type),
kernel_size,
stride,
padding,
data_format=data_format,
ceil_mode=ceil_mode,
)
p = 1.0 / norm_type if norm_type != 0 else 1.0
return ivy.pow(ivy.multiply(out, kernel_mul), p)
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
"torch",
)
@to_ivy_arrays_and_back
def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
data_format = "NCHW"
padding = "VALID"
if stride is None:
stride = kernel_size
out = ivy.avg_pool2d(
ivy.pow(input, norm_type),
kernel_size,
stride,
padding,
data_format=data_format,
ceil_mode=ceil_mode,
)
if not isinstance(kernel_size, int):
kernel_mul = reduce(lambda x, y: x * y, kernel_size)
else:
kernel_mul = kernel_size
p = ivy.divide(1.0, norm_type) if norm_type != 0 else 1.0
return ivy.pow(ivy.multiply(out, kernel_mul), p).astype(input.dtype)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def max_pool1d(
input,
kernel_size,
stride=None,
padding=0,
dilation=1,
ceil_mode=False,
return_indices=False,
):
if stride is None:
stride = kernel_size
if not isinstance(padding, int):
padding = [(pad, pad) for pad in padding]
return ivy.max_pool1d(
input,
kernel_size,
stride,
padding,
data_format="NCW",
dilation=dilation,
ceil_mode=ceil_mode,
)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def max_pool2d(
input,
kernel_size,
stride=None,
padding=0,
dilation=1,
ceil_mode=False,
return_indices=False,
):
if stride is None:
stride = kernel_size
if not isinstance(padding, int):
padding = [(pad, pad) for pad in padding]
return ivy.max_pool2d(
input,
kernel_size,
stride,
padding,
data_format="NCHW",
dilation=dilation,
ceil_mode=ceil_mode,
)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def max_pool3d(
input,
kernel_size,
stride=None,
padding=0,
dilation=1,
ceil_mode=False,
return_indices=False,
):
if stride is None:
stride = kernel_size
if not isinstance(padding, int):
padding = [(pad, pad) for pad in padding]
return ivy.max_pool3d(
input,
kernel_size,
stride,
padding,
data_format="NCDHW",
dilation=dilation,
ceil_mode=ceil_mode,
)
| ivy/ivy/functional/frontends/torch/nn/functional/pooling_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/functional/pooling_functions.py",
"repo_id": "ivy",
"token_count": 3230
} | 48 |
import ivy
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_supported_dtypes, with_unsupported_device_and_dtypes
@to_ivy_arrays_and_back
def batched_nms(boxes, scores, idxs, iou_threshold):
if boxes.size == 0:
return ivy.array([], dtype=ivy.int64)
else:
max_coordinate = boxes.max()
boxes_dtype = boxes.dtype
offsets = idxs.astype(boxes_dtype) * (
max_coordinate + ivy.array(1, dtype=boxes_dtype)
)
boxes_for_nms = boxes + offsets[:, None]
keep = nms(boxes_for_nms, scores, iou_threshold)
return keep
@to_ivy_arrays_and_back
def box_area(boxes):
return ivy.prod(boxes[..., 2:] - boxes[..., :2], axis=-1)
@to_ivy_arrays_and_back
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = ivy.maximum(boxes1[:, None, :2], boxes2[:, :2])
rb = ivy.minimum(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clip(x_min=0)
inter = wh[:, :, 0] * wh[:, :, 1]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou
@with_unsupported_device_and_dtypes(
{
"2.2 and below": {
"cpu": ("float16",),
}
},
"torch",
)
@to_ivy_arrays_and_back
def clip_boxes_to_image(boxes, size):
height, width = size
boxes_x = boxes[..., 0::2].clip(0, width)
boxes_y = boxes[..., 1::2].clip(0, height)
clipped_boxes = ivy.stack([boxes_x, boxes_y], axis=-1)
return clipped_boxes.reshape(boxes.shape).astype(boxes.dtype)
@to_ivy_arrays_and_back
def nms(boxes, scores, iou_threshold):
return ivy.nms(boxes, scores, iou_threshold)
@to_ivy_arrays_and_back
def remove_small_boxes(boxes, min_size):
w, h = boxes[..., 2] - boxes[..., 0], boxes[..., 3] - boxes[..., 1]
return ivy.nonzero((w >= min_size) & (h >= min_size))[0]
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, "torch")
@to_ivy_arrays_and_back
def roi_align(
input, boxes, output_size, spatial_scale=1.0, sampling_ratio=1, aligned=False
):
return ivy.roi_align(
input, boxes, output_size, spatial_scale, sampling_ratio, aligned
)
| ivy/ivy/functional/frontends/torchvision/ops.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torchvision/ops.py",
"repo_id": "ivy",
"token_count": 1005
} | 49 |
# global
from __future__ import annotations
import functools
from numbers import Number
from typing import (
Union,
Tuple,
Optional,
List,
Sequence,
Callable,
Protocol,
TypeVar,
Iterable,
)
import numpy as np
# local
import ivy
from ivy import to_ivy
from ivy.utils.exceptions import handle_exceptions
from ivy.utils.backend import current_backend
from ivy.func_wrapper import (
handle_array_function,
infer_dtype,
handle_out_argument,
outputs_to_ivy_arrays,
inputs_to_native_arrays,
inputs_to_native_shapes,
to_native_arrays_and_back,
handle_nestable,
handle_array_like_without_promotion,
handle_device,
handle_backend_invalid,
temp_asarray_wrapper,
)
# Helpers #
# --------#
def _asarray_handle_nestable(fn: Callable) -> Callable:
fn_name = fn.__name__
@functools.wraps(fn)
def _asarray_handle_nestable_wrapper(*args, **kwargs):
"""Call `fn` with the *nestable* property of the function correctly
handled. This means mapping the function to the container leaves if any
containers are passed in the input.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with the nestable property handled correctly.
"""
# This decorator should only be applied to ivy.asarray, so we know where
# the container must be if there is one.
cont_fn = getattr(ivy.Container, f"static_{fn_name}")
if isinstance(args[0], ivy.Container):
return cont_fn(*args, **kwargs)
# if the passed arguments does not contain a container, the function using
# the passed arguments, returning an ivy or a native array.
return fn(*args, **kwargs)
_asarray_handle_nestable_wrapper.handle_nestable = True
return _asarray_handle_nestable_wrapper
def _ivy_to_native(x):
# checks the first element of the leaf list and
# converts it to a native array if it is an ivy array
# assumes that either all elements in a leaf list are ivy arrays
# or none of them are
if isinstance(x, (list, tuple)) and len(x) != 0 and isinstance(x[0], (list, tuple)):
for i, item in enumerate(x):
x = list(x) if isinstance(x, tuple) else x
x[i] = _ivy_to_native(item)
elif (isinstance(x, (list, tuple)) and len(x) > 0) and ivy.is_ivy_array(x[0]):
x = ivy.to_native(x, nested=True)
elif ivy.is_ivy_array(x):
x = ivy.to_native(x)
return x
def _shape_to_native(x: Iterable) -> Tuple[int]:
# checks the first element of the leaf list and
# converts it to a native array if it is an ivy array
# This function is to be used with the nested_map function
# it was a lambda function before but was replaced with the defined function below
def nested_map_shape_fn(x: Iterable) -> List:
return x.shape if isinstance(x, ivy.Shape) else x
if isinstance(x, (list, tuple)) and len(x) != 0 and isinstance(x[0], (list, tuple)):
for i, item in enumerate(x):
x = list(x) if isinstance(x, tuple) else x
x[i] = _shape_to_native(item)
else:
if (isinstance(x, (list, tuple)) and len(x) > 0) and (
isinstance(x[0], ivy.Shape) and ivy.array_mode
):
x = ivy.nested_map(x, nested_map_shape_fn)
elif isinstance(x, ivy.Shape) and ivy.array_mode:
x = x.shape
return x
def _flatten_nest(xs):
for x in xs:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
yield from _flatten_nest(x)
else:
yield x
def _remove_np_bfloat16(obj):
# unlike other frameworks, torch and paddle do not support creating tensors
# from numpy arrays that have bfloat16 dtype using any extension because
# bfloat16 in not supported natively by numpy (as of version <=1.25)
if isinstance(obj, np.ndarray) and obj.dtype.name == "bfloat16":
return obj.tolist()
return obj
def _asarray_to_native_arrays_and_back(fn: Callable) -> Callable:
@functools.wraps(fn)
def _asarray_to_native_arrays_and_back_wrapper(*args, dtype=None, **kwargs):
"""Wrap `fn` so that input arrays are all converted to
`ivy.NativeArray` instances and return arrays are all converted to
`ivy.Array` instances.
This wrapper is specifically for the backend implementations of
asarray.
It assumes either all the elements in a leaf list are ivy arrays
or none of them are. It checks the first element of all the leaf
list. If it is an ivy array, it converts all the elements in the
leaf list to native otherwise it skips that leaf list.
"""
new_arg = _ivy_to_native(args[0])
new_args = (new_arg,) + args[1:]
if dtype is not None:
dtype = ivy.default_dtype(dtype=dtype, as_native=True)
return to_ivy(fn(*new_args, dtype=dtype, **kwargs))
_asarray_to_native_arrays_and_back_wrapper._asarray_to_native_arrays_and_back = True
return _asarray_to_native_arrays_and_back_wrapper
def _asarray_infer_dtype(fn: Callable) -> Callable:
@functools.wraps(fn)
def _asarray_infer_dtype_wrapper(*args, dtype=None, **kwargs):
"""Determine the correct `dtype`, and then calls the function with the
`dtype` passed explicitly. This wrapper is specifically for the backend
implementations of asarray.
Parameters
----------
args
The arguments to be passed to the function.
dtype
The dtype for the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with `dtype` passed explicitly.
"""
def _infer_dtype(obj):
if isinstance(obj, ivy.NativeShape):
obj = list(obj)
if hasattr(obj, "dtype"):
return obj.dtype.name if isinstance(obj, np.ndarray) else obj.dtype
else:
return ivy.default_dtype(item=obj)
if not ivy.exists(dtype):
arr = args[0]
# get default dtypes for all elements
dtype_list = [ivy.nested_map(lambda x: _infer_dtype(x), arr, shallow=False)]
# flatten the nested structure
dtype_list = _flatten_nest(dtype_list)
# keep unique dtypes
dtype_list = list(set(dtype_list))
if len(dtype_list) != 0: # handle the case of empty input
# promote all dtypes to a single dtype
dtype = dtype_list[0]
# we disable precise mode to avoid wider than necessary casting
# that might result from the mixing of int32 and float32
with ivy.PreciseMode(False):
for dt in dtype_list[1:]:
dtype = ivy.promote_types(dtype, dt)
else:
dtype = ivy.default_float_dtype()
dtype = ivy.as_native_dtype(dtype)
# call the function with dtype provided explicitly
return fn(*args, dtype=dtype, **kwargs)
_asarray_infer_dtype_wrapper.infer_dtype = True
return _asarray_infer_dtype_wrapper
def _asarray_infer_device(fn: Callable) -> Callable:
@functools.wraps(fn)
def _asarray_infer_device_wrapper(*args, device=None, **kwargs):
"""Determine the correct `device`, and then calls the function with the
`device` passed explicitly. This wrapper is specifically for the
backend implementations of asarray.
Parameters
----------
args
The arguments to be passed to the function.
device
The device for the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with `device` passed explicitly.
"""
if isinstance(args[0], list):
return fn(
*args, device=ivy.default_device(device, as_native=True), **kwargs
)
# find the first array argument, if required
arr = None if ivy.exists(device) else args[0]
# infer the correct device
device = ivy.default_device(device, item=arr, as_native=True)
# call the function with device provided explicitly
return fn(*args, device=device, **kwargs)
_asarray_infer_device_wrapper.infer_device = True
return _asarray_infer_device_wrapper
def _asarray_inputs_to_native_shapes(fn: Callable) -> Callable:
@functools.wraps(fn)
def _inputs_to_native_shapes(*args, **kwargs):
new_arg = _shape_to_native(args[0])
new_args = (new_arg,) + args[1:]
return fn(*new_args, **kwargs)
_inputs_to_native_shapes.inputs_to_native_shapes = True
return _inputs_to_native_shapes
# Type hints #
# -----------#
SupportsBufferProtocol = TypeVar("SupportsBufferProtocol")
_T_co = TypeVar("_T_co", covariant=True)
class NestedSequence(Protocol[_T_co]):
def __getitem__(self, key: int, /) -> Union[_T_co, NestedSequence[_T_co]]: ...
def __len__(self, /) -> int: ...
# Array API Standard #
# -------------------#
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@outputs_to_ivy_arrays
@handle_array_function
@handle_device
def arange(
start: Number,
/,
stop: Optional[Number] = None,
step: Number = 1,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return evenly spaced values within a given interval, with the spacing
being specified.
Values are generated within the half-open interval [start, stop) (in other words,
the interval including start but excluding stop). For integer arguments the function
is equivalent to the Python built-in range function, but returns an array in the
chosen ml_framework rather than a list.
See :math:`linspace` for a certain number of evenly spaced values in an interval.
Parameters
----------
start
if stop is specified, the start of interval (inclusive); otherwise, the end of
the interval (exclusive). If stop is not specified, the default starting value
is 0.
stop
the end of the interval. Default: ``None``.
step
the distance between two adjacent elements (out[i+1] - out[i]). Must not be 0;
may be negative, this results in an empty array if stop >= start. Default: 1.
dtype
output array data type. If dtype is None, the output array data type must be
inferred from start, stop and step. If those are all integers, the output array
dtype must be the default integer dtype; if one or more have type float, then
the output array dtype must be the default floating-point data type. Default:
None.
device
device on which to place the created array. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
a one-dimensional array containing evenly spaced values. The length of the
output array must be ceil((stop-start)/step) if stop - start and step have the
same sign, and length 0 otherwise.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.arange.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
>>> stop = 5
>>> x = ivy.arange(stop)
>>> print(x)
ivy.array([0, 1, 2, 3, 4])
>>> start = 1
>>> stop = 5
>>> x = ivy.arange(start, stop)
>>> print(x)
ivy.array([1, 2, 3, 4])
>>> start = 1
>>> stop = 10
>>> step = 2
>>> x = ivy.arange(start, stop, step)
>>> print(x)
ivy.array([1, 3, 5, 7, 9])
>>> start = 1
>>> stop = 10
>>> step = 2
>>> dtype = "float64"
>>> device = "cpu"
>>> x = ivy.arange(start, stop, step, dtype=dtype, device=device)
>>> print(x, x.dtype, x.device)
ivy.array([1., 3., 5., 7., 9.]) float64 cpu
"""
return current_backend().arange(
start, stop, step, dtype=dtype, device=device, out=out
)
@temp_asarray_wrapper
@handle_backend_invalid
@handle_array_like_without_promotion
@handle_out_argument
@handle_array_function
@handle_device
def asarray(
obj: Union[
ivy.Array,
ivy.NativeArray,
ivy.Shape,
ivy.NativeShape,
bool,
int,
float,
NestedSequence,
SupportsBufferProtocol,
np.ndarray,
],
/,
*,
copy: Optional[bool] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Convert the input to an array.
Parameters
----------
obj
input data, in any form that can be converted to an array. This includes lists,
lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays.
copy
boolean, indicating whether or not to copy the input. Default: ``None``.
dtype
output array data type. If ``dtype`` is ``None``, the output array data type must
be the default floating-point data type. Default ``None``.
device
device on which to place the created array. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
An array interpretation of x.
Examples
--------
With list of lists as input:
>>> ivy.asarray([[1,2],[3,4]])
ivy.array([[1, 2],
[3, 4]])
With tuple of lists as input:
>>> ivy.asarray(([1.4,5.6,5.5],[3.1,9.1,7.5]))
ivy.array([[1.39999998, 5.5999999 , 5.5 ],
[3.0999999 , 9.10000038, 7.5 ]])
With ndarray as input:
>>> x = ivy.np.ndarray(shape=(2,2), order='C')
>>> ivy.asarray(x)
ivy.array([[6.90786433e-310, 6.90786433e-310],
[6.90786433e-310, 6.90786433e-310]])
With :class:`ivy.Container` as input:
>>> x = ivy.Container(a = [(1,2),(3,4),(5,6)], b = ((1,2,3),(4,5,6)))
>>> ivy.asarray(x)
{
a: ivy.array([[1, 2],[3, 4], [5, 6]]),
b: ivy.array([[1, 2, 3],
[4, 5, 6]])
}
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.asarray.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
return current_backend().asarray(
obj, copy=copy, dtype=dtype, device=device, out=out
)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_native_shapes
@outputs_to_ivy_arrays
@handle_array_function
@infer_dtype
@handle_device
def zeros(
shape: Union[ivy.Shape, ivy.NativeShape],
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a new array having a specified ``shape`` and filled with zeros.
Parameters
----------
shape
output array shape.
dtype
output array data type. If ``dtype`` is ``None``, the output array data type must
be the default floating-point data type. Default ``None``.
device
device on which to place the created array. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing zeros.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.zeros.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.NativeShape` input:
>>> shape = (3, 5)
>>> x = ivy.zeros(shape)
>>> print(x)
ivy.array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
>>> x = ivy.zeros(5)
>>> print(x)
ivy.array([0., 0., 0., 0., 0.])
"""
return current_backend().zeros(shape, dtype=dtype, device=device, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_native_shapes
@outputs_to_ivy_arrays
@handle_array_function
@infer_dtype
@handle_device
def ones(
shape: Union[ivy.Shape, ivy.NativeShape],
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a new array having a specified ``shape`` and filled with ones.
.. note::
An output array having a complex floating-point data type must contain complex
numbers having a real component equal to one and an imaginary component equal to
zero (i.e., ``1 + 0j``).
Parameters
----------
shape
output array shape.
dtype
output array data type. If ``dtype`` is ``None``, the output array data type
must be the default floating-point data type. Default ``None``.
device
device on which to place the created array. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing ones.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.ones.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Shape` input:
>>> shape = (2,2)
>>> x = ivy.ones(shape)
>>> print(x)
ivy.array([[1., 1.],
[1., 1.]])
With :class:`ivy.Dtype` input:
>>> shape = (3,2)
>>> d_type = ivy.int64
>>> y = ivy.ones(shape, dtype=d_type)
>>> print(y)
ivy.array([[1, 1],
[1, 1],
[1, 1]])
With :class:`ivy.Device` input:
>>> shape = (3,2)
>>> y = ivy.ones(shape, device="cpu")
>>> print(y)
ivy.array([[1., 1.],
[1., 1.],
[1., 1.]])
With :class:`ivy.Array` input:
>>> shape = (1, 5, 2)
>>> x = ivy.zeros(shape)
>>> ivy.ones(shape, out=x)
>>> print(x)
ivy.array([[[1., 1.],
[1., 1.],
[1., 1.],
[1., 1.],
[1., 1.]]])
"""
return current_backend().ones(shape, dtype=dtype, device=device, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@infer_dtype
@handle_device
def full_like(
x: Union[ivy.Array, ivy.NativeArray],
/,
fill_value: Number,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a new array filled with ``fill_value`` and having the same
``shape`` as an input array ``x`` .
Parameters
----------
x
input array from which to derive the output array shape.
fill_value
Scalar fill value
dtype
output array data type. If ``dtype`` is `None`, the output array data type must
be inferred from ``x``. Default: ``None``.
device
device on which to place the created array. If ``device`` is ``None``, the
output array device must be inferred from ``x``. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array having the same shape as ``x`` and where every element is equal to
``fill_value``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.full_like.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :code:`int` datatype:
>>> x = ivy.array([1, 2, 3, 4, 5, 6])
>>> fill_value = 1
>>> y = ivy.full_like(x, fill_value)
>>> print(y)
ivy.array([1, 1, 1, 1, 1, 1])
>>> fill_value = 0.000123
>>> x = ivy.ones(5)
>>> y = ivy.full_like(x, fill_value)
>>> print(y)
ivy.array([0.000123, 0.000123, 0.000123, 0.000123, 0.000123])
With float datatype:
>>> x = ivy.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
>>> fill_value = 0.000123
>>> y = ivy.full_like(x, fill_value)
>>> print(y)
ivy.array([0.000123, 0.000123, 0.000123, 0.000123, 0.000123, 0.000123])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([3.0, 8.0])
>>> fill_value = 0.000123
>>> y = ivy.full_like(x,fill_value)
>>> print(y)
ivy.array([0.000123, 0.000123])
>>> x = ivy.native_array([[3., 8., 2.], [2., 8., 3.]])
>>> y = ivy.full_like(x, fill_value)
>>> print(y)
ivy.array([[0.000123, 0.000123, 0.000123],
[0.000123, 0.000123, 0.000123]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.2, 2.2324, 3.234]),
... b=ivy.array([4.123, 5.23, 6.23]))
>>> fill_value = 15.0
>>> y = ivy.full_like(x, fill_value)
>>> print(y)
{
a: ivy.array([15., 15., 15.]),
b: ivy.array([15., 15., 15.])
}
"""
return current_backend(x).full_like(
x, fill_value, dtype=dtype, device=device, out=out
)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@infer_dtype
@handle_device
def ones_like(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a new array filled with ones and having the same shape as an
input array ``x``.
.. note::
An output array having a complex floating-point data type must contain complex
numbers having a real component equal to one and an imaginary component equal
to zero (i.e., ``1 + 0j``).
Parameters
----------
x
input array from which to derive the output array shape.
dtype
output array data type. If ``dtype`` is ``None``, the output array data type
must be inferred from ``x``. Default ``None``.
device
device on which to place the created array. If device is ``None``, the output
array device must be inferred from ``x``. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array having the same shape as ``x`` and filled with ``ones``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.ones_like.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3, 4, 5, 6])
>>> y = ivy.ones_like(x)
>>> print(y)
ivy.array([1, 1, 1, 1, 1, 1])
>>> x = ivy.array([[0, 1, 2],[3, 4, 5]], dtype = ivy.float32)
>>> y = ivy.ones_like(x)
>>> print(y)
ivy.array([[1., 1., 1.],
[1., 1., 1.]])
>>> x = ivy.array([3., 2., 1.])
>>> y = ivy.zeros(3)
>>> ivy.ones_like(x, out=y)
>>> print(y)
ivy.array([1., 1., 1.])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([[3, 8, 2],[2, 8, 3]])
>>> y = ivy.ones_like(x)
>>> print(y)
ivy.array([[1, 1, 1],
[1, 1, 1]])
>>> x = ivy.native_array([3, 8, 2, 0, 0, 2])
>>> y = ivy.ones_like(x, dtype=ivy.IntDtype('int32'), device=ivy.Device('cpu'))
>>> print(y)
ivy.array([1, 1, 1, 1, 1, 1])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([3, 2, 1]), b=ivy.array([8, 2, 3]))
>>> y = ivy.ones_like(x)
>>> print(y)
{
a: ivy.array([1, 1, 1]),
b: ivy.array([1, 1, 1])
}
With :class:`ivy.Array` input:
>>> x = ivy.array([2, 3, 8, 2, 1])
>>> y = x.ones_like()
>>> print(y)
ivy.array([1, 1, 1, 1, 1])
With :class:'ivy.Container' input:
>>> x = ivy.Container(a=ivy.array([3., 8.]), b=ivy.array([2., 2.]))
>>> y = x.ones_like()
>>> print(y)
{
a: ivy.array([1., 1.]),
b: ivy.array([1., 1.])
}
"""
return current_backend(x).ones_like(x, dtype=dtype, device=device, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@infer_dtype
@handle_device
def zeros_like(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a new array filled with zeros and having the same ``shape`` as an
input array ``x``.
Parameters
----------
x
input array from which to derive the output array shape.
dtype
output array data type. If ``dtype`` is ``None``, the output array data type
must be inferred from ``x``. Default: ``None``.
device
device on which to place the created array. If ``device`` is ``None``, the
output array device must be inferred from ``x``. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array having the same shape as ``x`` and filled with ``zeros``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.zeros_like.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3, 4, 5, 6])
>>> y = ivy.zeros_like(x)
>>> print(y)
ivy.array([0, 0, 0, 0, 0, 0])
>>> x = ivy.array([[0, 1, 2],[3, 4, 5]], dtype = ivy.float32)
>>> y = ivy.zeros_like(x)
>>> print(y)
ivy.array([[0., 0., 0.],
[0., 0., 0.]])
>>> x = ivy.array([3., 2., 1.])
>>> y = ivy.ones(3)
>>> ivy.zeros_like(x, out=y)
>>> print(y)
ivy.array([0., 0., 0.])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([[3, 8, 2],[2, 8, 3]])
>>> y = ivy.zeros_like(x)
>>> print(y)
ivy.array([[0, 0, 0],[0, 0, 0]])
>>> x = ivy.native_array([3, 8, 2, 0, 0, 2])
>>> y = ivy.zeros_like(x, dtype=ivy.IntDtype('int32'), device=ivy.Device('cpu'))
>>> print(y)
ivy.array([0, 0, 0, 0, 0, 0])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([3, 2, 1]), b=ivy.array([8, 2, 3]))
>>> y = ivy.zeros_like(x)
>>> print(y)
{
a: ivy.array([0, 0, 0]),
b: ivy.array([0, 0, 0])
}
With :class:`ivy.Array` input:
>>> x = ivy.array([2, 3, 8, 2, 1])
>>> y = x.zeros_like()
>>> print(y)
ivy.array([0, 0, 0, 0, 0])
With :class:'ivy.Container' input:
>>> x = ivy.Container(a=ivy.array([3., 8.]), b=ivy.array([2., 2.]))
>>> y = x.zeros_like()
>>> print(y)
{
a: ivy.array([0., 0.]),
b: ivy.array([0., 0.])
}
"""
return current_backend(x).zeros_like(x, dtype=dtype, device=device, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def tril(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
k: int = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the lower triangular part of a matrix (or a stack of matrices)
``x``.
.. note::
The main diagonal is defined as the set of indices ``{(i, i)}`` for ``i``
on the interval ``[0, min(M, N) - 1]``.
Parameters
----------
x
input array having shape (..., M, N) and whose innermost two dimensions form MxN
matrices.
k
diagonal above which to zero elements. If k = 0, the diagonal is the main
diagonal. If k < 0, the diagonal is below the main diagonal. If k > 0, the
diagonal is above the main diagonal. Default: ``0``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the lower triangular part(s). The returned array must have
the same shape and data type as x. All elements above the specified diagonal k
must be zeroed. The returned array should be allocated on the same device as x.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.tril.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
return current_backend(x).tril(x, k=k, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def triu(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
k: int = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the upper triangular part of a matrix (or a stack of matrices)
``x``.
.. note::
The upper triangular part of the matrix is defined as the elements
on and above the specified diagonal ``k``.
Parameters
----------
x
input array having shape (..., M, N) and whose innermost two dimensions form MxN
matrices. *,
k
diagonal below which to zero elements. If k = 0, the diagonal is the main
diagonal. If k < 0, the diagonal is below the main diagonal. If k > 0, the
diagonal is above the main diagonal. Default: ``0``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the upper triangular part(s). The returned array must have
the same shape and data type as x. All elements below the specified diagonal k
must be zeroed. The returned array should be allocated on the same device as x.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.triu.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
return current_backend(x).triu(x, k=k, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_native_shapes
@outputs_to_ivy_arrays
@handle_array_function
@infer_dtype
@handle_device
def empty(
shape: Union[ivy.Shape, ivy.NativeShape],
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape
output array shape.
dtype
output array data type. If dtype is None, the output array data type must be the
default floating-point data type. Default: ``None``.
device
device on which to place the created array. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an uninitialized array having a specified shape
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.empty.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
return current_backend().empty(shape, dtype=dtype, device=device, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@infer_dtype
@handle_device
def empty_like(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return an uninitialized array with the same shape as an input array x.
Parameters
----------
x
input array from which to derive the output array shape.
dtype
output array data type. If dtype is None, the output array data type must be
inferred from x. Default: ``None``.
device
device on which to place the created array. If device is None, the output array
device must be inferred from x. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array having the same shape as x and containing uninitialized data.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.empty_like.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
return current_backend(x).empty_like(x, dtype=dtype, device=device, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@outputs_to_ivy_arrays
@handle_array_function
@infer_dtype
@handle_device
def eye(
n_rows: int,
n_cols: Optional[int] = None,
/,
*,
k: int = 0,
batch_shape: Optional[Union[int, Sequence[int]]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a two-dimensional array with ones on the k diagonal and zeros
elsewhere.
Parameters
----------
n_rows
number of rows in the output array.
n_cols
number of columns in the output array. If None, the default number of columns in
the output array is equal to n_rows. Default: ``None``.
k
index of the diagonal. A positive value refers to an upper diagonal, a negative
value to a lower diagonal, and 0 to the main diagonal. Default: ``0``.
batch_shape
optional input that determines returning identity array shape.
Default: ``None``.
dtype
output array data type. If dtype is None, the output array data type must be the
default floating-point data type. Default: ``None``.
device
the device on which to place the created array.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
device on which to place the created array. Default: ``None``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.eye.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances as a replacement to any of the arguments.
Examples
--------
With :'n_rows' input:
>>> x = ivy.eye(3)
>>> print(x)
ivy.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
With :'n_cols' input:
>>> x = ivy.eye(3,4)
>>> print(x)
ivy.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.]])
With :'k' input:
>>> x = ivy.eye(3, k=1)
>>> print(x)
ivy.array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
With :'dtype' input:
>>> x = ivy.eye(4, k=2, dtype=ivy.IntDtype('int32'))
>>> print(x)
ivy.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]])
With :'batch_shape' input:
>>> x = ivy.eye(2, 3, batch_shape=[3])
>>> print(x)
ivy.array([[[1., 0., 0.],
[0., 1., 0.]],
[[1., 0., 0.],
[0., 1., 0.]],
[[1., 0., 0.],
[0., 1., 0.]]])
With :'out' input:
>>> y = ivy.ones((3, 3))
>>> ivy.eye(3, out=y)
>>> print(y)
ivy.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
With :'device' input:
>>> x = ivy.eye(3, device=ivy.Device('cpu'))
>>> print(x)
ivy.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
return current_backend().eye(
n_rows,
n_cols,
k=k,
batch_shape=batch_shape,
dtype=dtype,
device=device,
out=out,
)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@infer_dtype
@handle_device
def linspace(
start: Union[ivy.Array, ivy.NativeArray, float],
stop: Union[ivy.Array, ivy.NativeArray, float],
/,
num: int,
*,
axis: Optional[int] = None,
endpoint: bool = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Generate a certain number of evenly-spaced values in an interval along a
given axis.
See :math:`arange` that allows to specify the step size of evenly spaced values in
an interval.
Parameters
----------
start
First entry in the range.
stop
Final entry in the range.
num
Number of values to generate.
axis
Axis along which the operation is performed.
endpoint
If True, stop is the last sample. Otherwise, it is not included.
dtype
output array data type.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Tensor of evenly-spaced values.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.linspace.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With float input:
>>> x = ivy.linspace(1, 2, 3)
>>> print(x)
ivy.array([1. , 1.5, 2. ])
>>> x = ivy.linspace(1, 2, 4, endpoint=False)
>>> print(x)
ivy.array([1., 1.25, 1.5 , 1.75])
>>> x = ivy.linspace(1, 10, 4, dtype="int32")
>>> print(x)
ivy.array([ 1, 4, 7, 10])
>>> x = ivy.linspace(1, 2, 4, device= "cpu")
>>> print(x)
ivy.array([1., 1.33333337, 1.66666663, 2.])
>>> y = ivy.array([0,0,0,0])
>>> ivy.linspace(1, 2, 4, out= y)
>>> print(y)
ivy.array([1, 1, 1, 2])
With :class:`ivy.Array` input:
>>> x = ivy.array([1,2])
>>> y = ivy.array([4,5])
>>> z = ivy.linspace(x, y, 4, axis = 0)
>>> print(z)
ivy.array([[1, 2],
[2, 3],
[3, 4],
[4, 5]])
"""
return current_backend(start).linspace(
start,
stop,
num,
axis=axis,
endpoint=endpoint,
dtype=dtype,
device=device,
out=out,
)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def meshgrid(
*arrays: Union[ivy.Array, ivy.NativeArray],
sparse: bool = False,
indexing: str = "xy",
out: Optional[ivy.Array] = None,
) -> List[ivy.Array]:
"""Return coordinate matrices from coordinate vectors.
Parameters
----------
arrays
an arbitrary number of one-dimensional arrays representing grid coordinates.
Each array should have the same numeric data type.
sparse
if True, a sparse grid is returned in order to conserve memory.
Default: ``False``.
indexing
Cartesian ``'xy'`` or matrix ``'ij'`` indexing of output. If provided zero or
one one-dimensional vector(s) (i.e., the zero- and one-dimensional cases,
respectively), the ``indexing`` keyword has no effect and should be ignored.
Default: ``'xy'``.
Returns
-------
ret
list of N arrays, where ``N`` is the number of provided one-dimensional input
arrays. Each returned array must have rank ``N``. For ``N`` one-dimensional
arrays having lengths ``Ni = len(xi)``,
- if matrix indexing ``ij``, then each returned array must have the shape
``(N1, N2, N3, ..., Nn)``.
- if Cartesian indexing ``xy``, then each returned array must have shape
``(N2, N1, N3, ..., Nn)``.
Accordingly, for the two-dimensional case with input one-dimensional arrays of
length ``M`` and ``N``, if matrix indexing ``ij``, then each returned array must
have shape ``(M, N)``, and, if Cartesian indexing ``xy``, then each returned
array must have shape ``(N, M)``.
Similarly, for the three-dimensional case with input one-dimensional arrays of
length ``M``, ``N``, and ``P``, if matrix indexing ``ij``, then each returned
array must have shape ``(M, N, P)``, and, if Cartesian indexing ``xy``, then
each returned array must have shape ``(N, M, P)``.
Each returned array should have the same data type as the input arrays.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of
the `docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.meshgrid.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2])
>>> y = ivy.array([3, 4])
>>> xv, yv = ivy.meshgrid(x, y)
>>> print(xv)
ivy.array([[1, 2],
[1, 2]])
>>> print(yv)
ivy.array([[3, 3],
[4, 4]])
>>> x = ivy.array([1, 2, 5])
>>> y = ivy.array([4, 1])
>>> xv, yv = ivy.meshgrid(x, y, indexing='ij')
>>> print(xv)
ivy.array([[1, 1],
[2, 2],
[5, 5]])
>>> print(yv)
ivy.array([[4, 1],
[4, 1],
[4, 1]])
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([4, 5, 6])
>>> xv, yv = ivy.meshgrid(x, y, sparse=True)
>>> print(xv)
ivy.array([[1, 2, 3]])
>>> print(yv)
ivy.array([[4], [5], [6]])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([1, 2])
>>> y = ivy.native_array([3, 4])
>>> xv, yv = ivy.meshgrid(x, y)
>>> print(xv)
ivy.array([[1, 2],
[1, 2]])
>>> print(yv)
ivy.array([[3, 3],
[4, 4]])
"""
return current_backend().meshgrid(
*arrays, sparse=sparse, indexing=indexing, out=out
)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_native_shapes
@inputs_to_native_arrays
@outputs_to_ivy_arrays
@handle_array_function
@handle_device
def full(
shape: Union[ivy.Shape, ivy.NativeShape],
fill_value: Union[float, bool],
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a new array having a specified ``shape`` and filled with
``fill_value``.
Parameters
----------
shape
output array shape.
fill_value
fill value.
dtype
output array data type. If ``dtype`` is `None`, the output array data type must
be inferred from ``fill_value``. If the fill value is an ``int``, the output
array data type must be the default integer data type. If the fill value is a
``float``, the output array data type must be the default floating-point data
type. If the fill value is a ``bool``, the output array must have boolean data
type. Default: ``None``.
device
device on which to place the created array. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array where every element is equal to `fill_value`.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.full.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Shape` input:
>>> shape = ivy.Shape((2,2))
>>> fill_value = 8.6
>>> x = ivy.full(shape, fill_value)
>>> print(x)
ivy.array([[8.6, 8.6],
[8.6, 8.6]])
With :class:`ivy.NativeShape` input:
>>> shape = ivy.NativeShape((2, 2, 2))
>>> fill_value = True
>>> dtype = ivy.bool
>>> device = ivy.Device('cpu')
>>> x = ivy.full(shape, fill_value, dtype=dtype, device=device)
>>> print(x)
ivy.array([[[True, True],
[True, True]],
[[True, True],
[True, True]]])
With :class:`ivy.NativeDevice` input:
>>> shape = ivy.NativeShape((1, 2))
>>> fill_value = 0.68
>>> dtype = ivy.float64
>>> device = ivy.NativeDevice('cpu')
>>> x = ivy.full(shape, fill_value, dtype=dtype, device=device)
>>> print(x)
ivy.array([[0.68, 0.68]])
With :class:`ivy.Container` input:
>>> shape = ivy.Container(a=ivy.NativeShape((2, 1)), b=ivy.Shape((2, 1, 2)))
>>> fill_value = ivy.Container(a=0.99, b=False)
>>> dtype = ivy.Container(a=ivy.float64, b=ivy.bool)
>>> device = ivy.Container(a=ivy.NativeDevice('cpu'), b=ivy.Device('cpu'))
>>> x = ivy.full(shape, fill_value, dtype=dtype, device=device)
>>> print(x)
{
a: ivy.array([[0.99],
[0.99]]),
b: ivy.array([[[False, False]],
[[False, False]]])
}
"""
return current_backend().full(
shape, fill_value, dtype=dtype, device=device, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def to_dlpack(
x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None
):
"""Return PyCapsule Object.
Parameters
----------
x object
input (array) object.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Return PyCapsule Object.
.. admonition:: Note
:class: note
The returned array may be either a copy or a view. See
:ref:`data-interchange` for details.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.from_dlpack.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
return current_backend(x).to_dlpack(x, out=out)
@handle_backend_invalid
def from_dlpack(
x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""Return a new array containing the data from another (array) object with
a ``__dlpack__`` method or PyCapsule Object.
Parameters
----------
x object
input (array) object with a ``__dlpack__`` method or PyCapsule Object.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the data in `x`.
.. admonition:: Note
:class: note
The returned array may be either a copy or a view. See
:ref:`data-interchange` for details.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.from_dlpack.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
return current_backend(x).from_dlpack(x, out=out)
# Extra #
# ------#
array = asarray
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_native_arrays
@handle_array_function
@handle_device
def copy_array(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
to_ivy_array: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Copy an array.
Parameters
----------
x
array, input array containing elements to copy.
to_ivy_array
boolean, if True the returned array will be an ivy.Array object otherwise
returns an ivy.NativeArray object (i.e. a torch.tensor, np.array, etc.,
depending on the backend), defaults to True.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
a copy of the input array ``x``.
Examples
--------
With one :class:`ivy.Array` input:
>>> x = ivy.array([-1, 0, 1])
>>> y = ivy.copy_array(x)
>>> print(y)
ivy.array([-1, 0, 1])
>>> x = ivy.array([1, 0, 1, 1])
>>> y = ivy.copy_array(x)
>>> print(y)
ivy.array([1, 0, 1, 1])
>>> x = ivy.array([1, 0, 1, -1])
>>> y = ivy.zeros((1, 4))
>>> ivy.copy_array(x, out=y)
>>> print(y)
ivy.array([1, 0, 1, -1])
>>> x = ivy.array([1, 0, 1, 1])
>>> ivy.copy_array(x, out=x)
>>> print(x)
ivy.array([1, 0, 1, 1])
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1, 0, 1]))
>>> y = ivy.copy_array(x)
>>> print(y)
{
a: ivy.array([-1, 0, 1])
}
>>> x = ivy.Container(a=ivy.array([-1, 0, 1]),b=ivy.array([-1, 0, 1, 1, 1, 0]))
>>> y = ivy.copy_array(x)
>>> print(y)
{
a: ivy.array([-1, 0, 1]),
b: ivy.array([-1, 0, 1, 1, 1, 0])
}
With one :class:`ivy.Container` static method:
>>> x = ivy.Container(a=ivy.array([-1, 0, 1]),b=ivy.array([-1, 0, 1, 1, 1, 0]))
>>> y = ivy.Container.static_copy_array(x)
>>> print(y)
{
a: ivy.array([-1, 0, 1]),
b: ivy.array([-1, 0, 1, 1, 1, 0])
}
With one :class:`ivy.Array` instance method:
>>> x = ivy.array([-1, 0, 1])
>>> y = x.copy_array()
>>> print(y)
ivy.array([-1, 0, 1])
>>> x = ivy.array([1, 0, 1, 1])
>>> y = x.copy_array()
>>> print(y)
ivy.array([1, 0, 1, 1])
With :class:`ivy.Container` instance method:
>>> x = ivy.Container(a=ivy.array([1, 0, 1]),b=ivy.array([-1, 0, 1, 1]))
>>> y = x.copy_array()
>>> print(y)
{
a: ivy.array([1, 0, 1]),
b: ivy.array([-1, 0, 1, 1])
}
"""
return current_backend(x).copy_array(x, to_ivy_array=to_ivy_array, out=out)
@handle_backend_invalid
@handle_array_like_without_promotion
def native_array(
x: Union[ivy.Array, ivy.NativeArray, List[Number], Tuple[Number], np.ndarray],
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
) -> ivy.NativeArray:
"""Convert the input to a native array.
Parameters
----------
x
input data, in any form that can be converted to an array. This includes lists,
lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays.
dtype
datatype, optional. Datatype is inferred from the input data.
device
device on which to place the created array. Default: ``None``.
Returns
-------
ret
A native array interpretation of x.
Examples
--------
With :class:`List[Number]` input:
>>> x = [1, 2, 3]
>>> x_native = ivy.native_array(x)
>>> print(x_native)
[1 2 3]
With :class:`np.ndarray` input:
>>> y = np.array([4, 5, 6])
>>> y_native = ivy.native_array(y)
>>> print(y_native)
[4 5 6]
With :class:`ivy.Array` input:
>>> z = ivy.array([7, 8, 9])
>>> z_native = ivy.native_array(z)
>>> print(z_native)
[7 8 9]
"""
# ToDo: Make this more efficient,
# ideally without first converting to ivy.Array with ivy.asarray and then
# converting back to native with ivy.to_native
return ivy.to_native(ivy.asarray(x, dtype=dtype, device=device))
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def one_hot(
indices: Union[ivy.Array, ivy.NativeArray],
depth: int,
/,
*,
on_value: Optional[Number] = None,
off_value: Optional[Number] = None,
axis: Optional[int] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Union[ivy.Device, ivy.NativeDevice] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a one-hot array. The locations represented by indices in the
parameter indices take value on_value, while all other locations take value
off_value.
Parameters
----------
indices
Indices for where the ones should be scattered *[batch_shape, dim]*
depth
Scalar defining the depth of the one-hot dimension.
on_value
Scalar defining the value to fill in output when indices[j] == i.
Default: ``1``.
off_value
Scalar defining the value to fill in output when indices[j] != i.
Default: ``0``.
axis
Axis to scatter on. The default is ``-1``, a new inner-most axis is created.
dtype
The data type of the output tensor.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if
None.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Tensor of zeros with the same shape and type as a, unless dtype provided which
overrides.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([3, 1])
>>> y = 5
>>> z = x.one_hot(5)
>>> print(z)
ivy.array([[0., 0., 0., 1., 0.],
... [0., 1., 0., 0., 0.]])
>>> x = ivy.array([0])
>>> y = 5
>>> ivy.one_hot(x, y)
ivy.array([[1., 0., 0., 0., 0.]])
>>> x = ivy.array([0])
>>> y = 5
>>> ivy.one_hot(x, 5, out=z)
ivy.array([[1., 0., 0., 0., 0.]])
>>> print(z)
ivy.array([[1., 0., 0., 0., 0.]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2]), \
b=ivy.array([3, 1]), c=ivy.array([2, 3]))
>>> y = 5
>>> z = x.one_hot(y)
>>> print(z)
{
a: ivy.array([[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.]]),
b: ivy.array([[0., 0., 0., 1., 0.],
[0., 1., 0., 0., 0.]]),
c: ivy.array([[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.]])
}
>>> x = ivy.Container(a=ivy.array([2]), \
b=ivy.array([], dtype=ivy.int32), c=ivy.native_array([4]))
>>> y = 7
>>> z = x.one_hot(y)
>>> print(z)
{
a: ivy.array([[0., 0., 1., 0., 0., 0., 0.]]),
b: ivy.array([], shape=(0, 7)),
c: ivy.array([[0., 0., 0., 0., 1., 0., 0.]])
}
"""
return current_backend(indices).one_hot(
indices,
depth,
on_value=on_value,
off_value=off_value,
axis=axis,
dtype=dtype,
device=device,
out=out,
)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@infer_dtype
@handle_device
def logspace(
start: Union[ivy.Array, ivy.NativeArray, float],
stop: Union[ivy.Array, ivy.NativeArray, float],
/,
num: int,
*,
base: float = 10.0,
axis: int = 0,
endpoint: bool = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Generate a certain number of evenly-spaced values in log space, in an
interval along a given axis.
Parameters
----------
start
First value in the range in log space. base ** start is the starting value in
the sequence. Can be an array or a float.
stop
Last value in the range in log space. base ** stop is the final value in the
sequence. Can be an array or a float.
num
Number of values to generate.
base
The base of the log space. Default is 10.0
axis
Axis along which the operation is performed. Relevant only if start or stop are
array-like. Default is 0.
endpoint
If True, stop is the last sample. Otherwise, it is not included. Default is
True.
dtype
The data type of the output tensor. If None, the dtype of on_value is used or if
that is None, the dtype of off_value is used, or if that is None, defaults to
float32. Default is None.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Default is
None.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to. Default is None.
Returns
-------
ret
Tensor of evenly-spaced values in log space.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With float input:
>>> print(ivy.logspace(1, 2, 4))
ivy.array([ 10., 21.5443469, 46.41588834, 100.])
>>> print(ivy.logspace(1, 2, 4, endpoint=False))
ivy.array([10., 17.7827941, 31.6227766, 56.23413252])
>>> print(ivy.logspace(1, 2, 4, dtype= int))
ivy.array([ 10., 10., 10., 100.])
>>> out = ivy.array([0,0,0,0])
>>> ivy.logspace(1, 2, 4, out = out)
>>> print(out)
ivy.array([ 10, 21, 46, 100])
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2])
>>> y = ivy.array([4, 5])
>>> print(ivy.logspace(x, y, 4))
ivy.array([[1.e+01, 1.e+02],
[1.e+02, 1.e+03],
[1.e+03, 1.e+04],
[1.e+04, 1.e+05])
>>> x = ivy.array([1, 2])
>>> y = ivy.array([4, 5])
>>> print(ivy.logspace(x, y, 4, axis = 1))
ivy.array([[[1.e+01, 1.e+02, 1.e+03, 1.e+04],
[1.e+02, 1.e+03, 1.e+04, 1.e+05]]])
>>> x = ivy.array([1, 2])
>>> y = ivy.array([4])
>>> print(ivy.logspace(x, y, 4))
ivy.array([[ 10., 100.],
[ 100., 100.],
[ 1000., 1000.],
[10000., 10000.]])
"""
result = base ** linspace(
start,
stop,
num,
endpoint=endpoint,
axis=axis,
dtype=dtype,
device=device,
)
if ivy.exists(out):
return ivy.inplace_update(out, result)
return result
@handle_nestable
@outputs_to_ivy_arrays
def frombuffer(
buffer: bytes,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
count: Optional[int] = -1,
offset: Optional[int] = 0,
) -> ivy.Array:
r"""Interpret a buffer as a 1-dimensional array.
.. note::
Note that either of the following must be true:
1. count is a positive non-zero number, and the total number of bytes
in the buffer is equal or greater than offset plus count times the size
(in bytes) of dtype.
2. count is negative, and the length (number of bytes) of the buffer
subtracted by the offset is a multiple of the size (in bytes) of dtype.
Parameters
----------
buffer
An object that exposes the buffer interface.
dtype
Data-type of the returned array; default: float.
count
Number of items to read. -1 means all data in the buffer.
offset
Start reading the buffer from this offset (in bytes); default: 0.
Returns
-------
out
1-dimensional array.
Examples
--------
With :class:`bytes` inputs:
>>> x = b'\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@'
>>> y = ivy.frombuffer(x, dtype=ivy.float64)
>>> print(y)
ivy.array([1., 2.])
>>> x = b'\x01\x02\x03\x04'
>>> y = ivy.frombuffer(x, dtype='int8', count=-2, offset=1)
>>> print(y)
ivy.array([2, 3, 4])
>>> x = b'\x00<\x00@\x00B\x00D\x00E'
>>> y = ivy.frombuffer(x, dtype='float16', count=4, offset=2)
>>> print(y)
ivy.array([2., 3., 4., 5.])
"""
return current_backend().frombuffer(
buffer,
dtype=dtype,
count=count,
offset=offset,
)
@handle_exceptions
@handle_nestable
@outputs_to_ivy_arrays
@handle_device
def triu_indices(
n_rows: int,
n_cols: Optional[int] = None,
k: int = 0,
/,
*,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
) -> Tuple[ivy.Array]:
"""Return the indices of the upper triangular part of a row by col matrix
in a 2-by-N shape (tuple of two N dimensional arrays), where the first row
contains row coordinates of all indices and the second row contains column
coordinates. Indices are ordered based on rows and then columns. The upper
triangular part of the matrix is defined as the elements on and above the
diagonal. The argument k controls which diagonal to consider. If k = 0,
all elements on and above the main diagonal are retained. A positive value
excludes just as many diagonals above the main diagonal, and similarly a
negative value includes just as many diagonals below the main diagonal. The
main diagonal are the set of indices {(i,i)} for i∈[0,min{n_rows,
n_cols}−1].
Notes
-----
Primary purpose of this function is to slice an array of shape (n,m). See
https://numpy.org/doc/stable/reference/generated/numpy.triu_indices.html
for examples
Tensorflow does not support slicing 2-D tensor with tuple of tensor of indices
Parameters
----------
n_rows
number of rows in the 2-d matrix.
n_cols
number of columns in the 2-d matrix. If None n_cols will be the same as n_rows
k
number of shifts from the main diagonal. k = 0 includes main diagonal,
k > 0 moves upwards and k < 0 moves downwards
device
device on which to place the created array. Default: ``None``.
Returns
-------
ret
an 2xN shape, tuple of two N dimensional, where first subarray (i.e. ret[0])
contains row coordinates of all indices and the second subarray (i.e ret[1])
contains columns indices.
Function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
>>> x = ivy.triu_indices(4,4,0)
>>> print(x)
(ivy.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]),
ivy.array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
>>> x = ivy.triu_indices(4,4,1)
>>> print(x)
(ivy.array([0, 0, 0, 1, 1, 2]),
ivy.array([1, 2, 3, 2, 3, 3]))
>>> x = ivy.triu_indices(4,4,-2)
>>> print(x)
(ivy.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3]),
ivy.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3]))
>>> x = ivy.triu_indices(4,2,0)
>>> print(x)
(ivy.array([0, 0, 1]),
ivy.array([0, 1, 1]))
>>> x = ivy.triu_indices(2,4,0)
>>> print(x)
(ivy.array([0, 0, 0, 0, 1, 1, 1]),
ivy.array([0, 1, 2, 3, 1, 2, 3]))
>>> x = ivy.triu_indices(4,-4,0)
>>> print(x)
(ivy.array([]), ivy.array([]))
>>> x = ivy.triu_indices(4,4,100)
>>> print(x)
(ivy.array([]), ivy.array([]))
>>> x = ivy.triu_indices(2,4,-100)
>>> print(x)
(ivy.array([0, 0, 0, 0, 1, 1, 1, 1]), ivy.array([0, 1, 2, 3, 0, 1, 2, 3]))
"""
return current_backend().triu_indices(n_rows, n_cols, k, device=device)
| ivy/ivy/functional/ivy/creation.py/0 | {
"file_path": "ivy/ivy/functional/ivy/creation.py",
"repo_id": "ivy",
"token_count": 29426
} | 50 |
# global
from typing import (
Optional,
Union,
Tuple,
Iterable,
Sequence,
Callable,
Any,
Literal,
List,
)
from numbers import Number
from functools import partial
import math
# local
import ivy
from ivy.func_wrapper import (
handle_out_argument,
handle_partial_mixed_function,
to_native_arrays_and_back,
inputs_to_native_shapes,
handle_nestable,
handle_array_like_without_promotion,
handle_view,
inputs_to_ivy_arrays,
handle_array_function,
handle_device,
handle_backend_invalid,
)
from ivy.functional.ivy.general import _numel
from ivy.utils.backend import current_backend
from ivy.utils.exceptions import handle_exceptions
# Helpers #
# ------- #
def _to_tf_padding(pad_width, ndim):
if isinstance(pad_width, Number):
pad_width = [[pad_width] * 2] * ndim
elif len(pad_width) == 2 and isinstance(pad_width[0], Number):
pad_width = [pad_width] * ndim
elif (
isinstance(pad_width, (list, tuple))
and isinstance(pad_width[0], (list, tuple))
and len(pad_width) < ndim
):
pad_width = pad_width * ndim
return pad_width
def _check_paddle_pad(
mode, reflect_type, pad_width, input_shape, constant_values, ndim_limit, extend=True
):
if extend:
pad_width = _to_tf_padding(pad_width, len(input_shape))
return isinstance(constant_values, Number) and (
mode == "constant"
or (
(
(
mode == "reflect"
and reflect_type == "even"
and all(
pad_width[i][0] < s and pad_width[i][1] < s
for i, s in enumerate(input_shape)
)
)
or mode in ["edge", "wrap"]
)
and len(input_shape) <= ndim_limit
)
)
def _to_paddle_padding(pad_width, ndim):
if isinstance(pad_width, Number):
pad_width = [pad_width] * (2 * ndim)
else:
if len(pad_width) == 2 and isinstance(pad_width[0], Number) and ndim != 1:
pad_width = [pad_width] * ndim
pad_width = [item for sublist in pad_width for item in sublist[::-1]][::-1]
return pad_width
@handle_exceptions
@handle_nestable
@handle_partial_mixed_function
@handle_array_like_without_promotion
@handle_view
@inputs_to_ivy_arrays
@handle_array_function
def flatten(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[bool] = None,
start_dim: int = 0,
end_dim: int = -1,
order: str = "C",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Flattens input by reshaping it into a one-dimensional tensor. If
start_dim or end_dim are passed, only dimensions starting with start_dim
and ending with end_dim are flattened. The order of elements in input is
unchanged.
Parameters
----------
x
input array to flatten.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
start_dim
first dim to flatten. If not set, defaults to 0.
end_dim
last dim to flatten. If not set, defaults to -1.
order
Read the elements of the input container using this index order,
and place the elements into the reshaped array using this index order.
‘C’ means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first axis index
changing slowest.
‘F’ means to read / write the elements using Fortran-like index order, with
the first index changing fastest, and the last index changing slowest.
Note that the ‘C’ and ‘F’ options take no account of the memory layout
of the underlying array, and only refer to the order of indexing.
Default order is 'C'
out
optional output array, for writing the result to.
Returns
-------
ret
the flattened array over the specified dimensions.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[1,2], [3,4]])
>>> ivy.flatten(x)
ivy.array([1, 2, 3, 4])
>>> x = ivy.array([[1,2], [3,4]])
>>> ivy.flatten(x, order='F')
ivy.array([1, 3, 2, 4])
>>> x = ivy.array(
[[[[ 5, 5, 0, 6],
[17, 15, 11, 16],
[ 6, 3, 13, 12]],
[[ 6, 18, 10, 4],
[ 5, 1, 17, 3],
[14, 14, 18, 6]]],
[[[12, 0, 1, 13],
[ 8, 7, 0, 3],
[19, 12, 6, 17]],
[[ 4, 15, 6, 15],
[ 0, 5, 17, 9],
[ 9, 3, 6, 19]]],
[[[17, 13, 11, 16],
[ 4, 18, 17, 4],
[10, 10, 9, 1]],
[[19, 17, 13, 10],
[ 4, 19, 16, 17],
[ 2, 12, 8, 14]]]]
)
>>> ivy.flatten(x, start_dim = 1, end_dim = 2)
ivy.array(
[[[ 5, 5, 0, 6],
[17, 15, 11, 16],
[ 6, 3, 13, 12],
[ 6, 18, 10, 4],
[ 5, 1, 17, 3],
[14, 14, 18, 6]],
[[12, 0, 1, 13],
[ 8, 7, 0, 3],
[19, 12, 6, 17],
[ 4, 15, 6, 15],
[ 0, 5, 17, 9],
[ 9, 3, 6, 19]],
[[17, 13, 11, 16],
[ 4, 18, 17, 4],
[10, 10, 9, 1],
[19, 17, 13, 10],
[ 4, 19, 16, 17],
[ 2, 12, 8, 14]]]))
"""
if x.shape == ():
x = ivy.reshape(x, (1, -1))[0, :]
if start_dim == end_dim:
return ivy.inplace_update(out, x) if ivy.exists(out) else x
if start_dim not in range(-len(x.shape), len(x.shape)):
raise IndexError(
"Dimension out of range (expected to be in range of"
f" {[-len(x.shape), len(x.shape) - 1]}, but got {start_dim}"
)
if end_dim not in range(-len(x.shape), len(x.shape)):
raise IndexError(
"Dimension out of range (expected to be in range of"
f" {[-len(x.shape), len(x.shape) - 1]}, but got {end_dim}"
)
if start_dim < 0:
start_dim = len(x.shape) + start_dim
if end_dim < 0:
end_dim = len(x.shape) + end_dim
c = 1
for i in range(start_dim, end_dim + 1):
c *= x.shape[i]
lst = [c]
if start_dim != 0:
for i in range(0, start_dim):
lst.insert(i, x.shape[i])
for i in range(end_dim + 1, len(x.shape)):
lst.insert(i, x.shape[i])
return ivy.reshape(x, tuple(lst), order=order, out=out)
flatten.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"handle_out_argument",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays", "handle_partial_mixed_function"),
}
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def moveaxis(
a: Union[ivy.Array, ivy.NativeArray],
source: Union[int, Sequence[int]],
destination: Union[int, Sequence[int]],
/,
*,
copy: Optional[bool] = None,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Move axes of an array to new positions..
Parameters
----------
a
The array whose axes should be reordered.
source
Original positions of the axes to move. These must be unique.
destination
Destination positions for each of the original axes.
These must also be unique.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
out
optional output array, for writing the result to.
Returns
-------
ret
Array with moved axes. This array is a view of the input array.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.zeros((3, 4, 5))
>>> ivy.moveaxis(x, 0, -1).shape
(4, 5, 3)
>>> ivy.moveaxis(x, -1, 0).shape
(5, 3, 4)
"""
return ivy.current_backend().moveaxis(a, source, destination, copy=copy, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def heaviside(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the Heaviside step function for each element in x1.
Parameters
----------
x1
input array.
x2
values to use where x1 is zero.
out
optional output array, for writing the result to.
Returns
-------
ret
output array with element-wise Heaviside step function of x1.
This is a scalar if both x1 and x2 are scalars.
Examples
--------
With :class:`ivy.Array` input:
>>> x1 = ivy.array([-1.5, 0, 2.0])
>>> x2 = ivy.array([0.5])
>>> ivy.heaviside(x1, x2)
ivy.array([0.0000, 0.5000, 1.0000])
>>> x1 = ivy.array([-1.5, 0, 2.0])
>>> x2 = ivy.array([1.2, -2.0, 3.5])
>>> ivy.heaviside(x1, x2)
ivy.array([0., -2., 1.])
"""
return ivy.current_backend().heaviside(x1, x2, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def flipud(
m: Union[ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[bool] = None,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Flip array in the up/down direction. Flip the entries in each column in
the up/down direction. Rows are preserved, but appear in a different order
than before.
Parameters
----------
m
The array to be flipped.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
out
optional output array, for writing the result to.
Returns
-------
ret
Array corresponding to input array with elements
order reversed along axis 0.
Examples
--------
>>> m = ivy.diag([1, 2, 3])
>>> ivy.flipud(m)
ivy.array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
"""
return ivy.current_backend().flipud(m, copy=copy, out=out)
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def vstack(
arrays: Sequence[ivy.Array],
/,
*,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> ivy.Array:
"""Stack arrays in sequence vertically (row wise).
Parameters
----------
arrays
Sequence of arrays to be stacked.
Returns
-------
ret
The array formed by stacking the given arrays.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([2, 3, 4])
>>> ivy.vstack((x, y))
ivy.array([[1, 2, 3],
[2, 3, 4]])
>>> ivy.vstack((x, y, x, y))
ivy.array([[1, 2, 3],
[2, 3, 4],
[1, 2, 3],
[2, 3, 4]])
>>> y = [ivy.array([[5, 6]]), ivy.array([[7, 8]])]
>>> print(ivy.vstack(y))
ivy.array([[5, 6],
[7, 8]])
"""
return ivy.current_backend().vstack(arrays, out=out)
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def hstack(
arrays: Sequence[ivy.Array],
/,
*,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> ivy.Array:
"""Stack arrays in sequence horizotally (column wise).
Parameters
----------
arrays
Sequence of arrays to be stacked.
Returns
-------
ret
The array formed by stacking the given arrays.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([2, 3, 4])
>>> ivy.hstack((x, y))
ivy.array([1, 2, 3, 2, 3, 4])
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([0, 0, 0])
>>> ivy.hstack((x, y, x))
ivy.array([1, 2, 3, 0, 0, 0, 1, 2, 3])
>>> y = [ivy.array([[5, 6]]), ivy.array([[7, 8]])]
>>> print(ivy.hstack(y))
ivy.array([[5, 6, 7, 8]])
"""
return ivy.current_backend().hstack(arrays, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def rot90(
m: Union[ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[bool] = None,
k: int = 1,
axes: Tuple[int, int] = (0, 1),
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Rotate an array by 90 degrees in the plane specified by axes. Rotation
direction is from the first towards the second axis.
Parameters
----------
m
Input array of two or more dimensions.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
k
Number of times the array is rotated by 90 degrees.
axes
The array is rotated in the plane defined by the axes. Axes must be
different.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
A rotated view of m.
Examples
--------
With :code:`ivy.Array` input:
>>> m = ivy.array([[1,2], [3,4]])
>>> ivy.rot90(m)
ivy.array([[2, 4],
[1, 3]])
>>> m = ivy.array([[1,2], [3,4]])
>>> ivy.rot90(m, k=2)
ivy.array([[4, 3],
[2, 1]])
>>> m = ivy.array([[[0, 1],\
[2, 3]],\
[[4, 5],\
[6, 7]]])
>>> ivy.rot90(m, k=2, axes=(1,2))
ivy.array([[[3, 2],
[1, 0]],
[[7, 6],
[5, 4]]])
With :code:`ivy.NativeArray` input:
>>> m = ivy.native_array([[1,2], [3,4]])
>>> ivy.rot90(m)
ivy.array([[2, 4],
[1, 3]])
>>> m = ivy.native_array([[1,2], [3,4]])
>>> ivy.rot90(m, k=2)
ivy.array([[4, 3],
[2, 1]])
>>> m = ivy.native_array([[[0, 1],\
[2, 3]],\
[[4, 5],\
[6, 7]]])
>>> ivy.rot90(m, k=2, axes=(1,2))
ivy.array([[[3, 2],
[1, 0]],
[[7, 6],
[5, 4]]])
"""
return ivy.current_backend(m).rot90(m, copy=copy, k=k, axes=axes, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def top_k(
x: Union[ivy.Array, ivy.NativeArray],
k: int,
/,
*,
axis: int = -1,
largest: bool = True,
sorted: bool = True,
out: Optional[tuple] = None,
) -> Tuple[ivy.Array, ivy.NativeArray]:
"""Return the `k` largest elements of the given input array along a given
axis.
Parameters
----------
x
The array to compute top_k for.
k
Number of top elements to return must not exceed the array size.
axis
The axis along which we must return the top elements default value is 1.
largest
If largest is set to False we return k smallest elements of the array.
sorted
If sorted is set to True we return the elements in sorted order.
out:
Optional output tuple, for writing the result to. Must have two arrays inside,
with a shape that the returned tuple broadcast to.
Returns
-------
ret
A named tuple with values and indices of top k elements.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([2., 1., -3., 5., 9., 0., -4])
>>> y = ivy.top_k(x, 2)
>>> print(y)
top_k(values=ivy.array([9., 5.]), indices=ivy.array([4, 3]))
>>> x = ivy.array([[-2., 3., 4., 0.], [-8., 0., -1., 2.]])
>>> y = ivy.top_k(x, 2, axis=1, largest=False)
>>> print(y)
top_k(values=ivy.array([[-2., 0.],
[-8., -1.]]), indices=ivy.array([[0, 3],
[0, 2]]))
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([2., 1., -3., 5., 9., 0., -4])
>>> y = ivy.top_k(x, 3)
>>> print(y)
top_k(values=ivy.array([9., 5., 2.]), indices=ivy.array([4, 3, 0]))
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1, 2, -4]), b=ivy.array([4., 5., 0.]))
>>> y = x.top_k(2)
>>> print(y)
[{
a: ivy.array([2, -1]),
b: ivy.array([5., 4.])
}, {
a: ivy.array([1, 0]),
b: ivy.array([1, 0])
}]
"""
return current_backend(x).top_k(
x, k, axis=axis, largest=largest, sorted=sorted, out=out
)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def fliplr(
m: Union[ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[bool] = None,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Flip array in the left/right direction. Flip the entries in each column
in the left/right direction. Columns are preserved, but appear in a
different order than before.
Parameters
----------
m
The array to be flipped. Must be at least 2-D.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
out
optional output array, for writing the result to.
Returns
-------
ret
Array corresponding to input array with elements
order reversed along axis 1.
Examples
--------
>>> m = ivy.diag([1, 2, 3])
>>> ivy.fliplr(m)
ivy.array([[0, 0, 1],
[0, 2, 0],
[3, 0, 0]])
"""
return ivy.current_backend().fliplr(m, copy=copy, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def i0(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the Bessel i0 function of x element-wise.
Parameters
----------
x
Array input.
out
optional output array, for writing the result to.
Returns
-------
ret
Array with the modified Bessel function
evaluated at each of the elements of x.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> ivy.i0(x)
ivy.array([1.26606588, 2.2795853 , 4.88079259])
"""
return ivy.current_backend(x).i0(x, out=out)
def _slice_at_axis(sl, axis):
return (slice(None),) * axis + (sl,) + (...,)
def _set_pad_area(padded, axis, width_pair, value_pair):
if width_pair[0] > 0:
left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
padded[left_slice] = value_pair[0]
if width_pair[1] > 0:
right_slice = _slice_at_axis(
slice(padded.shape[axis] - width_pair[1], None), axis
)
padded[right_slice] = value_pair[1]
return padded
def _get_edges(padded, axis, width_pair):
left_index = width_pair[0]
left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
left_edge = padded[left_slice]
right_index = padded.shape[axis] - width_pair[1]
right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
right_edge = padded[right_slice]
return left_edge, right_edge
def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
edge_pair = _get_edges(padded, axis, width_pair)
if width_pair[0] > 0:
left_ramp = ivy.linspace(
end_value_pair[0],
ivy.array(edge_pair[0].squeeze(axis=axis)),
num=width_pair[0],
endpoint=False,
dtype=ivy.Dtype(str(padded.dtype)),
axis=axis,
)
else:
left_ramp = ivy.empty((0,))
if width_pair[1] > 0:
right_ramp = ivy.flip(
ivy.linspace(
end_value_pair[1],
ivy.array(edge_pair[1].squeeze(axis=axis)),
num=width_pair[1],
endpoint=False,
dtype=ivy.Dtype(str(padded.dtype)),
axis=axis,
),
axis=axis,
)
else:
right_ramp = ivy.empty((0,))
return left_ramp, right_ramp
def _get_stats(padded, axis, width_pair, length_pair, stat_func):
left_index = width_pair[0]
right_index = padded.shape[axis] - width_pair[1]
max_length = right_index - left_index
left_length, right_length = length_pair
if left_length is None or max_length < left_length:
left_length = max_length
if right_length is None or max_length < right_length:
right_length = max_length
left_slice = _slice_at_axis(slice(left_index, left_index + left_length), axis)
left_chunk = padded[left_slice]
left_chunk = (
left_chunk.astype("float32") if ivy.is_int_dtype(left_chunk) else left_chunk
)
left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
left_stat = (
ivy.round(left_stat).astype(padded.dtype)
if ivy.is_int_dtype(padded)
else left_stat
)
if left_length == right_length == max_length:
return left_stat, left_stat
right_slice = _slice_at_axis(slice(right_index - right_length, right_index), axis)
right_chunk = padded[right_slice]
right_chunk = (
right_chunk.astype("float32") if ivy.is_int_dtype(right_chunk) else right_chunk
)
right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
right_stat = (
ivy.round(right_stat).astype(padded.dtype)
if ivy.is_int_dtype(padded)
else right_stat
)
return left_stat, right_stat
def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
left_pad, right_pad = width_pair
old_length = padded.shape[axis] - right_pad - left_pad
if include_edge:
edge_offset = 1
else:
edge_offset = 0
old_length -= 1
if left_pad > 0:
chunk_length = min(old_length, left_pad)
stop = left_pad - edge_offset
start = stop + chunk_length
left_slice = _slice_at_axis(slice(start, stop, -1), axis)
left_chunk = padded[left_slice]
if method == "odd":
edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
left_chunk = 2 * padded[edge_slice] - left_chunk
start = left_pad - chunk_length
stop = left_pad
pad_area = _slice_at_axis(slice(start, stop), axis)
padded[pad_area] = left_chunk
left_pad -= chunk_length
if right_pad > 0:
chunk_length = min(old_length, right_pad)
start = -right_pad + edge_offset - 2
stop = start - chunk_length
right_slice = _slice_at_axis(slice(start, stop, -1), axis)
right_chunk = padded[right_slice]
if method == "odd":
edge_slice = _slice_at_axis(slice(-right_pad - 1, -right_pad), axis)
right_chunk = 2 * padded[edge_slice] - right_chunk
start = padded.shape[axis] - right_pad
stop = start + chunk_length
pad_area = _slice_at_axis(slice(start, stop), axis)
padded[pad_area] = right_chunk
right_pad -= chunk_length
return left_pad, right_pad, padded
def _set_wrap_both(padded, axis, width_pair):
left_pad, right_pad = width_pair
period = padded.shape[axis] - right_pad - left_pad
while left_pad > 0:
right_slice = _slice_at_axis(
slice(
-width_pair[1] - min(period, left_pad),
-width_pair[1] if width_pair[1] != 0 else None,
),
axis,
)
right_chunk = padded[right_slice]
if left_pad > period:
pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
left_pad = left_pad - period
else:
pad_area = _slice_at_axis(slice(None, left_pad), axis)
left_pad = 0
padded[pad_area] = right_chunk
while right_pad > 0:
left_slice = _slice_at_axis(
slice(
width_pair[0],
width_pair[0] + min(period, right_pad),
),
axis,
)
left_chunk = padded[left_slice]
if right_pad > period:
pad_area = _slice_at_axis(slice(-right_pad, -right_pad + period), axis)
right_pad = right_pad - period
else:
pad_area = _slice_at_axis(slice(-right_pad, None), axis)
right_pad = 0
padded[pad_area] = left_chunk
return padded
def _init_pad(array, pad_width, fill_value=None):
new_shape = tuple(
left + size + right for size, (left, right) in zip(array.shape, pad_width)
)
if fill_value is not None:
padded = ivy.ones(new_shape, dtype=array.dtype) * fill_value
else:
padded = ivy.zeros(new_shape, dtype=array.dtype)
original_area_slice = tuple(
slice(left, left + size) for size, (left, right) in zip(array.shape, pad_width)
)
padded[original_area_slice] = array
return padded
def _to_pairs(x, n, m=2):
if ivy.isscalar(x):
return ((x,) * m,) * n
elif len(x) == m and ivy.isscalar(x[0]):
return ((*x[:m],),) * n
elif len(x) != n:
ivy.utils.assertions.check_equal(
ivy.asarray(list(x)).shape,
(n, m),
message=(
"tuple argument should contain "
"ndim pairs where ndim is the number of "
"the input's dimensions"
),
as_array=False,
)
return x
def check_scalar(x, force_integer, force_positive):
return (
ivy.isscalar(x)
and (ivy.is_int_dtype(x) if force_integer else True)
and (x >= 0 if force_positive else True)
)
def _check_tuple_arg(arg, arg_name, force_integer=False, force_positive=False):
if not (
check_scalar(arg, force_integer, force_positive)
or (
isinstance(arg, (tuple, list))
and (
all(check_scalar(elem, force_integer, force_positive) for elem in arg)
or (
isinstance(elem, (tuple, list))
and all(
check_scalar(sub_elem, force_integer, force_positive)
for sub_elem in elem
)
)
for elem in arg
)
)
):
if force_integer:
raise ivy.utils.exceptions.IvyException(
f"{arg_name} should be int, tuple of ints or tuple of int tuples"
)
else:
raise ivy.utils.exceptions.IvyException(
f"{arg_name} should be scalar, tuple of scalars or tuple of scalar"
" tuples"
)
def _check_arguments(
mode,
pad_width,
stat_length,
constant_values,
end_values,
reflect_type,
):
supported_modes = [
"constant",
"dilated",
"edge",
"linear_ramp",
"maximum",
"mean",
"median",
"minimum",
"reflect",
"symmetric",
"wrap",
"empty",
]
ivy.utils.assertions.check_true(
callable(mode) or mode in supported_modes,
message=f"Only modes {supported_modes} are supported. Got {mode}.",
)
_check_tuple_arg(
pad_width, "pad_width", force_positive=mode != "dilated", force_integer=True
)
if mode in ["maximum", "mean", "median", "minimum"]:
_check_tuple_arg(
stat_length, "stat_length", force_positive=True, force_integer=True
)
elif mode in ["constant", "dilated"]:
_check_tuple_arg(constant_values, "constant_values")
elif mode == "linear_ramp":
_check_tuple_arg(end_values, "end_values")
elif mode in ["reflect", "symmetric"]:
ivy.utils.assertions.check_true(
reflect_type in ["even", "odd"],
message=(
f"Only reflect types ['even', 'odd'] are supported. Got {reflect_type}."
),
)
@handle_exceptions
@handle_nestable
@handle_partial_mixed_function
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def pad(
input: Union[ivy.Array, ivy.NativeArray],
pad_width: Union[Iterable[Tuple[int]], int],
/,
*,
mode: Union[
Literal[
"constant",
"dilated",
"edge",
"linear_ramp",
"maximum",
"mean",
"median",
"minimum",
"reflect",
"symmetric",
"wrap",
"empty",
],
Callable,
] = "constant",
stat_length: Union[Iterable[Tuple[int]], int] = 1,
constant_values: Union[Iterable[Tuple[Number]], Number] = 0,
end_values: Union[Iterable[Tuple[Number]], Number] = 0,
reflect_type: Literal["even", "odd"] = "even",
**kwargs: Optional[Any],
) -> ivy.Array:
"""Pad an array.
Parameters
----------
input
Input array to pad.
pad_width
Number of values padded to the edges of each axis.
- ((before_1, after_1), … (before_N, after_N)) yields unique pad widths
for each axis.
- ((before, after),) yields same before and after pad for each axis.
- pad (integer) is shortcut for before = after = pad width for all axes.
mode
One of the following string values or a user-supplied function.
- "constant": Pads with a constant value.
- "edge": Pads with the input's edge values.
- "linear_ramp": Pads with the linear ramp between end_value
and the input's edge value.
- "maximum": Pads with the maximum value of all or part of the vector
along each axis.
- "mean": Pads with the mean value of all or part of the vector along
each axis.
- "median": Pads with the median value of all or part of the vector
along each axis.
- "minimum": Pads with the minimum value of all or part of the vector
along each axis.
- "reflect": Pads with the reflection mirrored on the first and last
values of the vector along each axis.
- "symmetric": Pads with the reflection of the vector mirrored along
the edge of the input.
- "wrap": Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the end values are used
to pad the beginning.
- "empty": Pads with undefined values.
- <function>: Pads with a user-defined padding function. The padding
function should modify a rank 1 array following the signature
`padding_func(vector, iaxis_pad_width, iaxis, kwargs)`, where:
- `vector` is a rank 1 array already padded with zeros. Padded
values are `vector[:iaxis_pad_width[0]]` and
`vector[-iaxis_pad_width[1]:]`.
- `iaxis_pad_width` is a 2-tuple of ints, where
`iaxis_pad_width[0]` represents the number of values padded at
the beginning of `vector` and `iaxis_pad_width[1]` represents
the number of values padded at the end of `vector`.
- `iaxis` is the axis currently being calculated.
- `kwargs` is a dict of keyword arguments the function requires.
stat_length
Used in "maximum", "mean", "median", and "minimum". Number of values at edge
of each axis used to calculate the statistic value.
- ((before_1, after_1), … (before_N, after_N)) yields unique statistic
lengths for each axis.
- ((before, after),) yields same before and after statistic lengths for
each axis.
- stat_length (integer) is a shortcut for before = after = stat_length
length for all axes.
- None uses the entire axis.
constant_values
Used in "constant". The values to set the padded values for each axis.
- ((before_1, after_1), ... (before_N, after_N)) yields unique pad
constants for each axis.
- ((before, after),) yields same before and after constants for each axis.
- constant (integer) is a shortcut for before = after = constant for
all axes.
end_values
Used in "linear_ramp". The values used for the ending value of the linear_ramp
and that will form the edge of the padded array.
- ((before_1, after_1), ... (before_N, after_N)) yields unique end values
for each axis.
- ((before, after),) yields same before and after end values for each axis
- end (integer) is a shortcut for before = after = end for all axes.
reflect_type
Used in "reflect", and "symmetric". The "even" style is the default with an
unaltered reflection around the edge value. For the "odd" style, the extended
part of the array is created by subtracting the reflected values from two
times the edge value.
Returns
-------
ret
Padded array of the same rank as the input but with shape increased according
to pad_width.
Both the description and the type hints above assume an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[1, 2, 3], [4, 5, 6]])
>>> padding = ((1, 1), (2, 2))
>>> y = ivy.pad(x, padding, mode="constant", constant_values=0)
>>> print(y)
ivy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> x = ivy.array([[1, 2, 3], [4, 5, 6]])
>>> padding = ((1, 1), (2, 2))
>>> y = ivy.pad(x, padding, mode="reflect")
>>> print(y)
ivy.array([[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1]])
>>> x = ivy.array([[1, 2, 3], [4, 5, 6]])
>>> padding = ((1, 1), (2, 2))
>>> y = ivy.pad(x, padding, mode="symmetric")
>>> print(y)
ivy.array([[2, 1, 1, 2, 3, 3, 2],
[2, 1, 1, 2, 3, 3, 2],
[5, 4, 4, 5, 6, 6, 5],
[5, 4, 4, 5, 6, 6, 5]])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([[1, 2, 3], [4, 5, 6]])
>>> padding = ((1, 1), (2, 2))
>>> y = ivy.pad(x, padding, mode="constant", constant_values=7)
>>> print(y)
ivy.array([[7, 7, 7, 7, 7, 7, 7],
[7, 7, 1, 2, 3, 7, 7],
[7, 7, 4, 5, 6, 7, 7],
[7, 7, 7, 7, 7, 7, 7]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0, 1, 2]), b=ivy.array([4, 5, 6]))
>>> padding = (1, 1)
>>> y = ivy.pad(x, padding, mode="constant")
>>> print(y)
{
a: ivy.array([0, 0, 1, 2, 0]),
b: ivy.array([0, 4, 5, 6, 0])
}
"""
_check_arguments(
mode,
pad_width,
stat_length,
constant_values,
end_values,
reflect_type,
)
ndim = input.ndim
if mode == "dilated":
pad_width = _to_pairs(pad_width, ndim, m=3)
if not ivy.is_array(constant_values) or constant_values.dtype != input.dtype:
constant_values = ivy.asarray(constant_values, dtype=input.dtype)
return _interior_pad(input, constant_values, pad_width)
pad_width = _to_pairs(pad_width, len(input.shape))
if callable(mode):
func = mode
padded = _init_pad(input, pad_width, fill_value=0)
for axis in range(ndim):
padded = ivy.moveaxis(padded, axis, -1)
inds = ivy.ndindex(padded.shape[:-1])
for ind in inds:
padded[ind] = func(padded[ind], pad_width[axis], axis, kwargs)
return padded
padded = _init_pad(input, pad_width)
stat_functions = {
"maximum": ivy.max,
"minimum": ivy.min,
"mean": ivy.mean,
"median": ivy.median,
}
if mode == "constant":
constant_values = _to_pairs(constant_values, ndim)
for axis, (width_pair, value_pair) in enumerate(
zip(pad_width, constant_values)
):
padded = _set_pad_area(padded, axis, width_pair, value_pair)
elif mode == "empty":
pass
elif mode == "edge":
for axis, width_pair in enumerate(pad_width):
edge_pair = _get_edges(padded, axis, width_pair)
padded = _set_pad_area(padded, axis, width_pair, edge_pair)
elif mode == "linear_ramp":
end_values = _to_pairs(end_values, ndim)
for axis, (width_pair, value_pair) in enumerate(zip(pad_width, end_values)):
ramp_pair = _get_linear_ramps(padded, axis, width_pair, value_pair)
padded = _set_pad_area(padded, axis, width_pair, ramp_pair)
elif mode in stat_functions:
func = stat_functions[mode]
stat_length = _to_pairs(stat_length, ndim)
for axis, (width_pair, length_pair) in enumerate(zip(pad_width, stat_length)):
stat_pair = _get_stats(padded, axis, width_pair, length_pair, func)
padded = _set_pad_area(padded, axis, width_pair, stat_pair)
elif mode in {"reflect", "symmetric"}:
include_edge = True if mode == "symmetric" else False
for axis, (left_index, right_index) in enumerate(pad_width):
if input.shape[axis] == 1 and (left_index > 0 or right_index > 0):
edge_pair = _get_edges(padded, axis, (left_index, right_index))
padded = _set_pad_area(
padded, axis, (left_index, right_index), edge_pair
)
continue
while left_index > 0 or right_index > 0:
left_index, right_index, padded = _set_reflect_both(
padded, axis, (left_index, right_index), reflect_type, include_edge
)
elif mode == "wrap":
for axis, (left_index, right_index) in enumerate(pad_width):
padded = _set_wrap_both(padded, axis, (left_index, right_index))
return padded
pad.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@to_native_arrays_and_back
@handle_array_function
@handle_device
def vsplit(
ary: Union[ivy.Array, ivy.NativeArray],
indices_or_sections: Union[int, Sequence[int], ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[bool] = None,
) -> List[ivy.Array]:
"""Split an array vertically into multiple sub-arrays.
Parameters
----------
ary
Array input.
indices_or_sections
If indices_or_sections is an integer n, the array is split into n
equal sections, provided that n must be a divisor of the split axis.
If indices_or_sections is a sequence of ints or 1-D array,
then input is split at each of the indices.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
Returns
-------
ret
input array split vertically.
Examples
--------
>>> ary = ivy.array(
[[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]]
)
>>> ivy.vsplit(ary, 2)
[ivy.array([[[0., 1.], [2., 3.]]]), ivy.array([[[4., 5.], [6., 7.]]])])
"""
return ivy.current_backend(ary).vsplit(ary, indices_or_sections, copy=copy)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@to_native_arrays_and_back
@handle_device
def dsplit(
ary: Union[ivy.Array, ivy.NativeArray],
indices_or_sections: Union[int, Sequence[int], ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[bool] = None,
) -> List[ivy.Array]:
"""Split an array into multiple sub-arrays along the 3rd axis.
Parameters
----------
ary
Array input.
indices_or_sections
If indices_or_sections is an integer n, the array is split into n sections.
If the array is divisible by n along the 3rd axis, each section will be of
equal size. If input is not divisible by n, the sizes of the first
int(ary.size(0) % n) sections will have size int(ary.size(0) / n) + 1, and
the rest will have size int(ary.size(0) / n).
If indices_or_sections is a sequence of ints or 1-D array,
then input is split at each of the indices.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
Returns
-------
ret
input array split along the 3rd axis.
Examples
--------
>>> ary = ivy.array(
[[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]]
)
>>> ivy.dsplit(ary, 2)
[ivy.array([[[ 0., 1.], [ 4., 5.]], [[ 8., 9.], [12., 13.]]]),
ivy.array([[[ 2., 3.], [ 6., 7.]], [[10., 11.], [14., 15.]]])]
"""
return ivy.current_backend(ary).dsplit(ary, indices_or_sections, copy=copy)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@to_native_arrays_and_back
@handle_device
def atleast_1d(
*arys: Union[ivy.Array, ivy.NativeArray, bool, Number],
copy: Optional[bool] = None,
) -> List[ivy.Array]:
"""Convert inputs to arrays with at least one dimension. Scalar inputs are
converted to 1-dimensional arrays, whilst higher-dimensional inputs are
preserved.
Parameters
----------
arys
One or more input arrays.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
Returns
-------
ret
An array, or list of arrays, each with at least 1D.
Copies are made only if necessary.
Examples
--------
>>> ary1 = ivy.array(5)
>>> ivy.atleast_1d(ary1)
ivy.array([5])
>>> ary2 = ivy.array([[3,4]])
>>> ivy.atleast_1d(ary2)
ivy.array([[3, 4]])
>>> ivy.atleast_1d(6,7,8)
[ivy.array([6]), ivy.array([7]), ivy.array([8])]
"""
return ivy.current_backend().atleast_1d(*arys, copy=copy)
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def dstack(
arrays: Sequence[ivy.Array],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Stack arrays in sequence depth wise (along third axis).
Parameters
----------
arrays
Sequence of arrays to be stacked.
Returns
-------
ret
The array formed by stacking the given arrays.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([2, 3, 4])
>>> ivy.dstack((x, y))
ivy.array([[[1, 2],
[2, 3],
[3, 4]]])
>>> x = ivy.array([[1], [2], [3]])
>>> y = ivy.array([[2], [3], [4]])
>>> ivy.dstack((x, y))
ivy.array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return ivy.current_backend().dstack(arrays, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@to_native_arrays_and_back
@handle_device
def atleast_2d(
*arys: Union[ivy.Array, ivy.NativeArray],
copy: Optional[bool] = None,
) -> List[ivy.Array]:
"""Convert inputs to arrays with at least two dimension. Scalar inputs are
converted to 2-dimensional arrays, whilst higher-dimensional inputs are
preserved.
Parameters
----------
arys
One or more array-like sequences. Non-array inputs are
converted to arrays. Arrays that already have two or more
dimensions are preserved.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
Returns
-------
ret
An array, or list of arrays, each with at least 2D.
Copies are made only if necessary.
Examples
--------
>>> ary1 = ivy.array(5)
>>> ivy.atleast_2d(ary1)
ivy.array([[5]])
>>> ary2 = ivy.array([[[3,4]]])
>>> ivy.atleast_2d(ary2)
ivy.array([[[3, 4]]])
>>> ivy.atleast_2d(6,7,8)
[ivy.array([[6]]), ivy.array([[7]]), ivy.array([[8]])]
"""
return ivy.current_backend().atleast_2d(*arys, copy=copy)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@to_native_arrays_and_back
@handle_device
def atleast_3d(
*arys: Union[ivy.Array, ivy.NativeArray, bool, Number],
copy: Optional[bool] = None,
) -> List[ivy.Array]:
"""Convert inputs to arrays with at least three dimension. Scalar inputs
are converted to 3-dimensional arrays, whilst higher-dimensional inputs are
preserved.
Parameters
----------
arys
One or more array-like sequences. Non-array inputs are
converted to arrays. Arrays that already have three or more
dimensions are preserved.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
Returns
-------
ret
An array, or list of arrays, each with a.ndim >= 3. Copies
are avoided where possible, and views with three or more
dimensions are returned. For example, a 1-D array of shape
(N,) becomes a view of shape (1, N, 1), and a 2-D array of
shape (M, N) becomes a view of shape (M, N, 1).
Examples
--------
>>> ary1 = ivy.array([5,6])
>>> ivy.atleast_3d(ary1)
ivy.array([[[5],
[6]]])
>>> ary2 = ivy.array([[[3,4]]])
>>> ivy.atleast_3d(ary2)
ivy.array([[[3, 4]]])
>>> ary3 = ivy.array([[3,4],[9,10]])
>>> ivy.atleast_3d(6,7,ary3)
[ivy.array([[[6]]]), ivy.array([[[7]]]), ivy.array([[[ 3],
[ 4]],
[[ 9],
[10]]])]
"""
return ivy.current_backend().atleast_3d(*arys, copy=copy)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def take_along_axis(
arr: Union[ivy.Array, ivy.NativeArray],
indices: Union[ivy.Array, ivy.NativeArray],
axis: int,
/,
*,
mode: str = "fill",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Take values from the input array by matching 1d index and data slices.
Parameters
----------
arr
The source array.
indices
The indices of the values to extract.
axis
The axis over which to select values.
If axis is None, arr is treated as a flattened 1D array.
mode
One of: 'clip', 'fill', 'drop'. Parameter controlling how out-of-bounds indices
will be handled.
out
The output array.
Returns
-------
ret
The returned array has the same shape as `indices`.
Examples
--------
>>> arr = ivy.array([[4, 3, 5], [1, 2, 1]])
>>> indices = ivy.array([[0, 1, 1], [2, 0, 0]])
>>> y = ivy.take_along_axis(arr, indices, 1)
>>> print(y)
ivy.array([[4, 3, 3], [1, 1, 1]])
"""
return ivy.current_backend(arr).take_along_axis(
arr, indices, axis, mode=mode, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@to_native_arrays_and_back
@handle_array_function
@handle_device
def hsplit(
ary: Union[ivy.Array, ivy.NativeArray],
indices_or_sections: Union[int, Sequence[int], ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[bool] = None,
) -> List[ivy.Array]:
"""Split an array into multiple sub-arrays horizontally.
Parameters
----------
ary
Array input.
indices_or_sections
If indices_or_sections is an integer n, the array is split into n
equal sections, provided that n must be a divisor of the split axis.
If indices_or_sections is a tuple of ints, then input is split at each of
the indices in the tuple.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
Returns
-------
ret
input array split horizontally.
Examples
--------
>>> ary = ivy.array(
[[0., 1., 2., 3.],
[4., 5., 6, 7.],
[8., 9., 10., 11.],
[12., 13., 14., 15.]]
)
>>> ivy.hsplit(ary, 2)
[ivy.array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
ivy.array([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]])]
"""
return ivy.current_backend(ary).hsplit(ary, indices_or_sections, copy=copy)
@handle_exceptions
@inputs_to_native_shapes
def broadcast_shapes(*shapes: Union[List[int], List[Tuple]]) -> Tuple[int]:
"""Broadcasts shapes.
Parameters
----------
shapes
The shapes to broadcast.
Returns
-------
ret
The broadcasted shape.
Examples
--------
>>> x = [(3, 3), (3, 1)]
>>> print(ivy.broadcast_shapes(*x))
(3, 3)
>>> print(ivy.broadcast_shapes(*[(3, 3),(3, 1),(1, 3)]))
(3, 3)
"""
return ivy.current_backend().broadcast_shapes(*shapes)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@handle_out_argument
@inputs_to_native_shapes
@to_native_arrays_and_back
@handle_device
def expand(
x: Union[ivy.Array, ivy.NativeArray],
shape: Union[ivy.Shape, ivy.NativeShape],
/,
*,
copy: Optional[bool] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Broadcast the input Array following the given shape and the broadcast
rule.
Parameters
----------
x
Array input.
shape
A 1-D Array indicates the shape you want to expand to,
following the broadcast rule.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
out
optional output array, for writing the result to.
Returns
-------
ret
Output Array
"""
return ivy.current_backend(x).expand(x, shape, out=out, copy=copy)
# ToDo: add 'mean' modes to scatter_nd and then to put_along_axis
@inputs_to_ivy_arrays
@handle_array_like_without_promotion
@handle_partial_mixed_function
@handle_nestable
@handle_exceptions
def put_along_axis(
arr: Union[ivy.Array, ivy.NativeArray],
indices: Union[ivy.Array, ivy.NativeArray],
values: Union[ivy.Array, ivy.NativeArray],
axis: int,
/,
*,
mode: Literal["sum", "min", "max", "mul", "mean", "replace"] = "replace",
out: Optional[ivy.Array] = None,
) -> None:
"""Put values into the input array by matching 1d index and data slices
along a specified axis.
Parameters
----------
arr : array_like
The input array to modify.
indices : array_like
The indices of the values to put into `arr`.
values : array_like
The values to put into `arr`.
axis : int
The axis over which to put the `values`.
mode : {'sum', 'min', 'max', 'mul', 'replace'}
The reduction operation to apply.
out : ndarray, optional
Output array in which to place the result.
If not specified, a new array is created.
Note
----
In case `indices` contains duplicates, the updates get accumulated in each place.
Returns
-------
None
Examples
--------
>>> arr = ivy.array([[4, 3, 5], [1, 2, 1]])
>>> indices = ivy.array([[0, 1, 1], [2, 0, 0]])
>>> values = ivy.array([[9, 8, 7], [6, 5, 4]])
>>> ivy.put_along_axis(arr, indices, values, 1, mode='replace')
>>> print(arr)
ivy.array([[9, 7, 5],
[4, 2, 6]])
>>> arr = ivy.array([[10, 30, 20], [60, 40, 50]])
>>> axis = 1
>>> indices = ivy.argmax(arr, axis=axis, keepdims=True)
>>> value = 100
>>> ivy.put_along_axis(arr, indices, value, axis, mode='sum')
>>> print(arr)
ivy.array([[10, 30, 20],
[60, 40, 50]])
"""
arr_shape = arr.shape
# array containing all flat indices
arr_ = ivy.arange(0, _numel(arr_shape)).reshape(arr_shape)
# use take_along_axis to get the queried indices
arr_idxs = ivy.take_along_axis(arr_, indices, axis)
# convert the flat indices to multi-D indices
arr_idxs = ivy.unravel_index(arr_idxs, arr_shape)
# stack the multi-D indices to bring them to scatter_nd format
arr_idxs = ivy.stack(arr_idxs, axis=-1).astype(ivy.int64)
ret = ivy.scatter_nd(arr_idxs, values, reduction=mode, out=ivy.copy_array(arr))
return ivy.inplace_update(out, ret) if ivy.exists(out) else ret
put_along_axis.mixed_backend_wrappers = {
"to_add": (
"handle_out_argument",
"outputs_to_ivy_arrays",
"inputs_to_native_arrays",
),
"to_skip": "handle_partial_mixed_function",
}
def _check_bounds(shape0, shape1, strides1, itemsize):
numel0 = math.prod(shape0)
ndim1 = len(shape1)
return (
sum((shape1[i] - 1) * strides1[i] for i in range(ndim1)) + itemsize
<= numel0 * itemsize
)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@inputs_to_native_shapes
def as_strided(
x: Union[ivy.Array, ivy.NativeArray],
shape: Union[ivy.Shape, ivy.NativeShape, Sequence[int]],
strides: Sequence[int],
/,
) -> ivy.Array:
"""Create a copy of the input array with the given shape and strides.
Parameters
----------
x
Input Array.
shape
The shape of the new array.
strides
The strides of the new array (specified in bytes).
Returns
-------
ret
Output Array
Examples
--------
>>> x = ivy.array([1, 2, 3, 4, 5, 6])
>>> ivy.as_strided(x, (4, 3), (8, 8))
ivy.array([[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6]])
"""
itemsize = x.itemsize
if not _check_bounds(x.shape, shape, strides, itemsize):
raise ivy.exceptions.IvyException("attempted unsafe memory access")
if any(strides[i] % itemsize != 0 for i in range(len(strides))):
raise ivy.exceptions.IvyException("strides must be multiple of itemsize")
src = memoryview(ivy.to_numpy(x)).cast("b")
src_ind = ivy.inner(
ivy.indices(shape).reshape((len(shape), -1)).T,
ivy.array(strides),
)
src_ind = ivy.expand_dims(src_ind, axis=-1)
src_ind = src_ind + ivy.arange(itemsize)
src_ind = ivy.reshape(src_ind, (-1,)).to_numpy()
temp_list = [src[i] for i in src_ind]
temp_array = ivy.asarray(temp_list, dtype=ivy.int8)
result = bytearray(temp_array.to_numpy())
return ivy.reshape(
ivy.frombuffer(result, dtype=x.dtype, count=math.prod(shape)),
shape,
)
as_strided.unsupported_dtypes = ("bfloat16",)
as_strided.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def concat_from_sequence(
input_sequence: Union[
Tuple[Union[ivy.Array, ivy.NativeArray]],
List[Union[ivy.Array, ivy.NativeArray]],
],
/,
*,
new_axis: int = 0,
axis: int = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Concatenate a sequence of arrays along a new or an existing axis.
Parameters
----------
input_sequence
A sequence of arrays.
new_axis
Insert and concatenate on a new axis or not,
default 0 means do not insert new axis.
new_axis = 0: concatenate
new_axis = 1: stack
axis
axis along which the arrays will be concatenated.
out
optional output array, for writing the result to.
Returns
-------
ret
Output Array
"""
return current_backend(input_sequence).concat_from_sequence(
input_sequence, new_axis=new_axis, axis=axis, out=out
)
def _slice(operand, start_indices, limit_indices, strides=None):
strides = [1] * len(operand.shape) if strides is None else strides
full_slice = ()
for i, _ in enumerate(operand.shape):
strides_i = int(strides[i])
start_i = int(start_indices[i])
limit_i = int(limit_indices[i])
full_slice += (slice(start_i, limit_i, strides_i),)
return operand[full_slice]
def _slice_along_axis(x, start=0, stop=None, stride=1, axis=0):
if axis >= 0:
slices = [slice(None)] * axis + [slice(start, stop, stride)]
else:
slices = [Ellipsis, slice(start, stop, stride)] + [slice(None)] * (-1 - axis)
return x[tuple(slices)]
def _interior_pad(operand, padding_value, padding_config):
for axis, (_, _, interior) in enumerate(padding_config):
if interior > 0:
new_shape = list(operand.shape)
new_shape[axis] = new_shape[axis] + (new_shape[axis] - 1) * interior
new_array = ivy.full(new_shape, padding_value, dtype=operand.dtype)
src_indices = ivy.arange(operand.shape[axis])
dst_indices = src_indices * (interior + 1)
index_tuple = [slice(None)] * operand.ndim
index_tuple[axis] = dst_indices
new_array[tuple(index_tuple)] = operand
operand = new_array
start_indices = [0] * operand.ndim
limit_indices = [0] * operand.ndim
for axis, (low, high, _) in enumerate(padding_config):
if low < 0:
start_indices[axis] = abs(low)
if high < 0:
limit_indices[axis] = high
else:
limit_indices[axis] = operand.shape[axis] + 1
padded = _slice(operand, start_indices, limit_indices)
pad_width = [(0, 0)] * operand.ndim
for axis, (low, high, _) in enumerate(padding_config):
if low > 0 and high > 0:
pad_width[axis] = (low, high)
elif low > 0:
pad_width[axis] = (low, 0)
elif high > 0:
pad_width[axis] = (0, high)
padded = ivy.constant_pad(padded, pad_width, value=padding_value)
return padded
def _interleave(a, b, axis):
assert a.shape[axis] in [b.shape[axis], b.shape[axis] + 1]
a_pad = [(0, 0, 0)] * a.ndim
b_pad = [(0, 0, 0)] * b.ndim
a_pad[axis] = (0, 1 if a.shape[axis] == b.shape[axis] else 0, 1)
b_pad[axis] = (1, 0 if a.shape[axis] == b.shape[axis] else 1, 1)
a = _interior_pad(a, 0.0, a_pad)
b = _interior_pad(b, 0.0, b_pad)
return ivy.add(a, b)
@handle_exceptions
@handle_nestable
@inputs_to_ivy_arrays
@handle_array_function
def associative_scan(
x: Union[ivy.Array, ivy.NativeArray],
fn: Callable,
/,
*,
reverse: bool = False,
axis: int = 0,
) -> ivy.Array:
"""Perform an associative scan over the given array.
Parameters
----------
x
The array to scan over.
fn
The associative function to apply.
reverse
Whether to scan in reverse with respect to the given axis.
axis
The axis to scan over.
Returns
-------
ret
The result of the scan.
"""
elems = [x]
if reverse:
elems = [ivy.flip(elem, axis=[axis]) for elem in elems]
def _combine(a, b):
a = a[0]
b = b[0]
if a.shape[axis] == 0:
return [a]
c = fn(a, b)
return [c]
def _scan(elems):
num_elems = elems[0].shape[axis]
if num_elems < 2:
return elems
reduced_elems = _combine(
[_slice_along_axis(elem, 0, -1, stride=2, axis=axis) for elem in elems],
[_slice_along_axis(elem, 1, None, stride=2, axis=axis) for elem in elems],
)
odd_elems = _scan(reduced_elems)
if num_elems % 2 == 0:
even_elems = _combine(
[_slice_along_axis(e, 0, -1, axis=axis) for e in odd_elems],
[_slice_along_axis(e, 2, None, stride=2, axis=axis) for e in elems],
)
else:
even_elems = _combine(
odd_elems,
[_slice_along_axis(e, 2, None, stride=2, axis=axis) for e in elems],
)
even_elems = [
ivy.concat([_slice_along_axis(elem, 0, 1, axis=axis), result], axis=axis)
for (elem, result) in zip(elems, even_elems)
]
return list(map(partial(_interleave, axis=axis), even_elems, odd_elems))
scans = _scan(elems)
if reverse:
scans = [ivy.flip(scanned, axis=[axis]) for scanned in scans]
return ivy.reshape(ivy.asarray(scans), elems[0].shape)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_array_function
@handle_device
def unique_consecutive(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[int] = None,
) -> Tuple[
Union[ivy.Array, ivy.NativeArray],
Union[ivy.Array, ivy.NativeArray],
Union[ivy.Array, ivy.NativeArray],
]:
"""Eliminates all but the first element from every consecutive group of
equivalent elements in ``x``.
Parameters
----------
x
input array.
axis
the axis to apply unique on. If None, unique is applied on flattened ``x``.
Returns
-------
ret
a namedtuple ``(output, inverse_indices, counts)`` whose
- first element has the field name ``output`` and is an array
containing ``x`` with its equivalent consecutive elements eliminated.
- second element has the field name ``inverse_indices`` and is an
array containing the indices of ``output`` that reconstruct ``x``.
- third element has the field name ``counts`` and is an array
containing the number of occurrences for each unique value or array in ``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 1, 2, 2, 3, 1, 1, 2])
>>> ivy..unique_consecutive(x)
Results(values=ivy.array([1, 2, 3, 1, 2]),
inverse_indices=ivy.array([0, 0, 1, 1, 2, 3, 3, 4]),
counts=ivy.array([2, 2, 1, 2, 1]))
"""
return ivy.current_backend(x).unique_consecutive(x, axis=axis)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def fill_diagonal(
a: Union[ivy.Array, ivy.NativeArray],
v: Union[int, float, ivy.Array, ivy.NativeArray],
/,
*,
wrap: bool = False,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Fill the main diagonal of the given array of any dimensionality..
Parameters
----------
a
Array at least 2D.
v
Value(s) to write on the diagonal. If val is scalar, the
value is written along the diagonal. If array-like, the
flattened val is written along the diagonal, repeating if
necessary to fill all diagonal entries.
wrap
The diagonal 'wrapped' after N columns for tall matrices.
Returns
-------
ret
Array with the diagonal filled.
"""
shape = a.shape
max_end = ivy.prod(ivy.array(shape))
end = max_end
if len(shape) == 2:
step = shape[1] + 1
if not wrap:
end = shape[1] * shape[1]
else:
step = int(1 + (ivy.cumprod(ivy.array(shape[:-1]), axis=0)).sum())
end = int(min(end, max_end))
a = ivy.reshape(a, (-1,))
steps = ivy.arange(0, end, step)
if isinstance(v, (ivy.Array, ivy.NativeArray)):
v = ivy.reshape(v, (-1,)).astype(a.dtype)
v = ivy.tile(v, int(ivy.ceil(len(steps) / v.shape[0])))[: len(steps)]
else:
v = ivy.repeat(v, len(steps))
ivy.scatter_flat(steps, v, size=a.shape[0], reduction="replace", out=a)
a = ivy.reshape(a, shape)
return a
fill_diagonal.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def unfold(
x: Union[ivy.Array, ivy.NativeArray],
/,
mode: int = 0,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the mode-`mode` unfolding of `tensor` with modes starting at `0`.
Parameters
----------
x
input tensor to be unfolded
mode
indexing starts at 0, therefore mode is in ``range(0, tensor.ndim)``
out
optional output array, for writing the result to.
Returns
-------
ret
unfolded_tensor of shape ``(tensor.shape[mode], -1)``
"""
return ivy.reshape(ivy.moveaxis(x, mode, 0), (x.shape[mode], -1), out=out)
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def fold(
x: Union[ivy.Array, ivy.NativeArray],
/,
mode: int,
shape: Union[ivy.Shape, ivy.NativeShape, Sequence[int]],
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Refolds the mode-`mode` unfolding into a tensor of shape `shape` In
other words, refolds the n-mode unfolded tensor into the original tensor of
the specified shape.
Parameters
----------
input
unfolded tensor of shape ``(shape[mode], -1)``
mode
the mode of the unfolding
shape
shape of the original tensor before unfolding
out
optional output array, for writing the result to.
Returns
-------
ret
folded_tensor of shape `shape`
"""
full_shape = list(shape)
mode_dim = full_shape.pop(mode)
full_shape.insert(0, mode_dim)
return ivy.moveaxis(ivy.reshape(x, full_shape), 0, mode, out=out)
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def partial_unfold(
x: Union[ivy.Array, ivy.NativeArray],
/,
mode: int = 0,
skip_begin: int = 1,
skip_end: int = 0,
ravel_tensors: bool = False,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Partial unfolding of a tensor while ignoring the specified number of
dimensions at the beginning and the end. For instance, if the first
dimension of the tensor is the number of samples, to unfold each sample,
set skip_begin=1. This would, for each i in ``range(tensor.shape[0])``,
unfold ``tensor[i, ...]``.
Parameters
----------
x
tensor of shape n_samples x n_1 x n_2 x ... x n_i
mode
indexing starts at 0, therefore mode is in range(0, tensor.ndim)
skip_begin
number of dimensions to leave untouched at the beginning
skip_end
number of dimensions to leave untouched at the end
ravel_tensors
if True, the unfolded tensors are also flattened
out
optional output array, for writing the result to.
Returns
-------
ret
partially unfolded tensor
"""
if ravel_tensors:
new_shape = [-1]
else:
new_shape = [x.shape[mode + skip_begin], -1]
if skip_begin:
new_shape = [x.shape[i] for i in range(skip_begin)] + new_shape
if skip_end:
new_shape += [x.shape[-i] for i in range(1, 1 + skip_end)]
return ivy.reshape(
ivy.moveaxis(x, mode + skip_begin, skip_begin), new_shape, out=out
)
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def partial_fold(
x: Union[ivy.Array, ivy.NativeArray],
/,
mode: int,
shape: Union[ivy.Shape, ivy.NativeShape, Sequence[int]],
skip_begin: int = 1,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Re-folds a partially unfolded tensor.
Parameters
----------
x
a partially unfolded tensor
mode
indexing starts at 0, therefore mode is in range(0, tensor.ndim)
shape
the shape of the original full tensor (including skipped dimensions)
skip_begin
number of dimensions left untouched at the beginning
out
optional output array, for writing the result to.
Returns
-------
ret
partially re-folded tensor
"""
transposed_shape = list(shape)
mode_dim = transposed_shape.pop(skip_begin + mode)
transposed_shape.insert(skip_begin, mode_dim)
return ivy.moveaxis(
ivy.reshape(x, transposed_shape), skip_begin, skip_begin + mode, out=out
)
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def partial_tensor_to_vec(
x: Union[ivy.Array, ivy.NativeArray],
/,
skip_begin: int = 1,
skip_end: int = 0,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Partial vectorization of a tensor while ignoring the specified dimension
at the beginning and the end.
Parameters
----------
x
tensor to partially vectorise
skip_begin
number of dimensions to leave untouched at the beginning
skip_end
number of dimensions to leave untouched at the end
out
optional output array, for writing the result to.
Returns
-------
ret
partially vectorised tensor with the
`skip_begin` first and `skip_end` last dimensions untouched
"""
return partial_unfold(
x,
mode=0,
skip_begin=skip_begin,
skip_end=skip_end,
ravel_tensors=True,
out=out,
)
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def partial_vec_to_tensor(
x: Union[ivy.Array, ivy.NativeArray],
/,
shape: Union[ivy.Shape, ivy.NativeShape, Sequence[int]],
skip_begin: int = 1,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Refolds a partially vectorised tensor into a full one.
Parameters
----------
x
a partially vectorised tensor
shape
the shape of the original full tensor (including skipped dimensions)
skip_begin
number of dimensions to leave untouched at the beginning
out
optional output array, for writing the result to.
Returns
-------
ret
full tensor
"""
return partial_fold(x, mode=0, shape=shape, skip_begin=skip_begin, out=out)
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def matricize(
x: Union[ivy.Array, ivy.NativeArray],
/,
row_modes: Sequence[int],
column_modes: Optional[Sequence[int]] = None,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Matricizes the given tensor.
Parameters
----------
x
the input tensor
row_modes
modes to use as row of the matrix (in the desired order)
column_modes
modes to use as column of the matrix, in the desired order
if None, the modes not in `row_modes` will be used in ascending order
out
optional output array, for writing the result to.
ret
-------
ivy.Array : tensor of size (ivy.prod(x.shape[i] for i in row_modes), -1)
"""
ndims = len(x.shape)
row_indices = list(row_modes)
if column_modes:
column_indices = list(column_modes)
else:
column_indices = [i for i in range(ndims) if i not in row_indices]
if sorted(column_indices + row_indices) != list(range(ndims)):
msg = (
"If you provide both column and row modes for the matricization then"
" column_modes + row_modes must contain all the modes of the tensor."
f" Yet, got row_modes={row_modes} and column_modes={column_modes}."
)
raise ValueError(msg)
row_size, column_size = 1, 1
row_size = int(ivy.prod([x.shape[i] for i in row_indices]))
column_size = int(ivy.prod([x.shape[i] for i in column_indices]))
return ivy.reshape(
ivy.permute_dims(x, row_indices + column_indices),
(row_size, column_size),
out=out,
)
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def soft_thresholding(
x: Union[ivy.Array, ivy.NativeArray],
/,
threshold: Union[float, ivy.Array, ivy.NativeArray],
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Soft-thresholding operator.
sign(tensor) * max[abs(tensor) - threshold, 0]
Parameters
----------
x
input array
threshold
float or array with shape tensor.shape
* If float the threshold is applied to the whole tensor
* If array, one threshold is applied per elements, 0 values are ignored
out
optional output array, for writing the result to.
Returns
-------
ivy.Array
thresholded tensor on which the operator has been applied
Examples
--------
Basic shrinkage
>>> x = ivy.array([[1, -2, 1.5], [-4, 3, -0.5]])
>>> soft_thresholding(x, 1.1)
array([[ 0. , -0.9, 0.4],
[-2.9, 1.9, 0. ]])
Example with missing values
>>> mask = ivy.array([[0, 0, 1], [1, 0, 1]])
>>> soft_thresholding(x, mask*1.1)
array([[ 1. , -2. , 0.4],
[-2.9, 3. , 0. ]])
"""
res = ivy.abs(x) - threshold
res = ivy.where(res < 0.0, 0.0, res) * ivy.sign(x)
if ivy.exists(out):
return ivy.inplace_update(out, res)
return res
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def choose(
arr: Union[ivy.Array, ivy.NativeArray],
choices: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: None = None,
mode: Union[str, None] = None,
) -> ivy.Array:
"""Take values from the input array by matching 1d index and data slices.
Parameters
----------
arr
The source array.
choices
The indices of the values to extract.
out
The output array.
mode
One of: 'wrap', 'clip'. Parameter controlling how out-of-bounds indices
will be handled.
Returns
-------
ret
The returned array has the same shape as `indices`.
Examples
--------
>>> choices = ivy.array([[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23],[30, 31, 32, 33]])
>>> print(ivy.choose(choices, ivy.array([2, 3, 1, 0]))
ivy.array([20, 31, 12, 3])
>>> arr = ivy.array([2, 4, 1, 0])
>>> print(ivy.choose(choices, arr, mode='clip')) # 4 goes to 3 (4-1)
ivy.array([20, 31, 12, 3])
>>> arr = ivy.array([2, 4, 1, 0])
>>> print(ivy.choose(choices, arr, mode='wrap')) # 4 goes to (4 mod 4)
ivy.array([20, 1, 12, 3])
"""
return ivy.current_backend().choose(arr, choices, out=out, mode=mode)
@handle_array_function
@inputs_to_ivy_arrays
@handle_nestable
@handle_exceptions
@handle_device
def column_stack(
arrays: Sequence[Union[ivy.Array, ivy.NativeArray]],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Create a new array by horizontally stacking the arrays in arrays.
Equivalent to `ivy.hstack(arrays)`, except each zero or one dimensional
array `x` in arrays is first reshaped into a `(x.size(), 1)` column
before being stacked horizontally.
Parameters
----------
arrays
Arrays to be stacked.
out
Output array.
Returns
-------
ret
Stacked input.
Examples
--------
Arrays of different dtypes up to dimension 2.
>>> a0 = ivy.array(True)
>>> a1 = ivy.array([7])
>>> a2 = ivy.array([[11.3, 13.7]])
>>> ivy.column_stack((a0, a1, a2))
ivy.array([[ 1. , 7. , 11.30000019, 13.69999981]])
Arrays of dimension 3.
>>> a = ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
>>> b = ivy.array([[[11, 12]], [[13, 14]]])
>>> ivy.column_stack((a, b))
ivy.array([[[ 1, 2],
[ 3, 4],
[11, 12]],
[[ 5, 6],
[ 7, 8],
[13, 14]]])
"""
arrays = [ivy.reshape(x, shape=(-1, 1)) if x.ndim < 2 else x for x in arrays]
return ivy.hstack(arrays, out=out)
column_stack.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_out_argument",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def take(
x: Union[int, ivy.Array, ivy.NativeArray],
indices: Union[int, ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[int] = None,
mode: str = "fill",
fill_value: Optional[Number] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return elements of an array along an axis.
.. note::
Conceptually, take(x, indices, axis=3) is equivalent to x[:,:,:,indices,...];
however, explicit indexing via arrays of indices is not currently supported
in this specification due to concerns regarding __setitem__
and array mutation semantics.
Parameters
----------
x
input array
indices
array indices. Must have an integer data type.
axis
axis over which to select values. If `axis` is negative,
the function must determine the axis along which to select values
by counting from the last dimension.
By default, the flattened input array is used.
mode
specifies how out-of-bounds `indices` will behave.
- ‘raise’ – raise an error
- ‘wrap’ – wrap around
- ‘clip’ – clip to the range (all indices that are too large are
replaced by the index that addresses the last element along that axis.
Note that this disables indexing with negative numbers.)
- 'fill' (default) = returns invalid values (e.g. NaN)
for out-of bounds indices (see also fill_value below)
fill_value
fill value to return for out-of-bounds slices
(Defaults to NaN for inexact types,
the largest negative value for signed types,
the largest positive value for unsigned types, and True for booleans.)
out
optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
an array having the same data type as `x`.
The output array must have the same rank (i.e., number of dimensions) as `x`
and must have the same shape as `x`, except for the axis specified by `axis`
whose size must equal the number of elements in `indices`.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.max.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With `ivy.Array` input:
>>> x = ivy.array([4,5,6])
>>> indices = ivy.array([2,1,0])
>>> y = ivy.take(x, indices)
>>> print(y)
ivy.array([6, 5, 4])
>>> x = ivy.array([4.7,5.2,6.5])
>>> indices = ivy.array([[0,1]])
>>> y = ivy.zeros_like(indices, dtype=x.dtype)
>>> ivy.take(x, indices, out=y)
>>> print(y)
ivy.array([[4.7, 5.2]])
>>> x = ivy.array([False, False, True])
>>> indices = ivy.array([[4,3,2]])
>>> y = ivy.zeros_like(indices, dtype=x.dtype)
>>> ivy.take(x, indices, out=y, mode="wrap")
>>> print(y)
ivy.array([[False, False, True]])
With `ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([True,False,False]),
... b=ivy.array([2.3,4.5,6.7]),
... c=ivy.array([1,2,3]))
>>> indices = ivy.array([[1,9,2]])
>>> y = ivy.take(x, indices)
>>> print(y)
{
a: ivy.array([[False, True, False]]),
b: ivy.array([[4.5, nan, 6.69999981]]),
c: ivy.array([[2, -2147483648, 3]])
}
"""
return ivy.current_backend().take(
x, indices, axis=axis, mode=mode, fill_value=fill_value, out=out
)
@inputs_to_ivy_arrays
@handle_exceptions
@handle_device
def trim_zeros(
a: Union[ivy.Array, ivy.NativeArray],
/,
*,
trim: str = "fb",
) -> ivy.Array:
"""ivy.Container instance method variant of ivy.trim_zeros. This method
simply wraps the function, and so the docstring for ivy.trim_zeros also
applies to this method with minimal changes.
Parameters
----------
a : 1-D array
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
1-D array
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = ivy.array([0, 0, 0, 0, 8, 3, 0, 0, 7, 1, 0])
>>> ivy.trim_zeros(a)
array([8, 3, 0, 0, 7, 1])
>>> ivy.trim_zeros(a, 'b')
array([0, 0, 0, 0, 8, 3, 0, 0, 7, 1])
>>> ivy.trim_zeros([0, 8, 3, 0, 0])
[8, 3]
"""
return ivy.current_backend(a).trim_zeros(a, trim=trim)
trim_zeros.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def unflatten(
x: Union[ivy.Array, ivy.NativeArray],
/,
dim: int,
shape: Tuple[int],
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Expand a dimension of the input tensor over multiple dimensions.
Parameters
----------
x
input tensor.
dim
dimension to be unflattened, specified as an index into input.shape.
shape
new shape of the unflattened dimension. One of its elements can be -1 in
which case the corresponding output dimension is inferred. Otherwise,
the product of sizes must equal input.shape[dim].
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
view of input with the specified dimension unflattened.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.permute_dims.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
>>> ivy.unflatten(torch.randn(3, 4, 1), dim=1, shape=(2, 2)).shape
torch.Size([3, 2, 2, 1])
>>> ivy.unflatten(torch.randn(3, 4, 1), dim=1, shape=(-1, 2)).shape
torch.Size([3, 2, 2, 1])
>>> ivy.unflatten(torch.randn(5, 12, 3), dim=-2, shape=(2, 2, 3, 1, 1)).shape
torch.Size([5, 2, 2, 3, 1, 1, 3])
"""
return ivy.current_backend(x).unflatten(x, dim=dim, shape=shape, out=out)
| ivy/ivy/functional/ivy/experimental/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/manipulation.py",
"repo_id": "ivy",
"token_count": 38962
} | 51 |
# For Review
# global
from typing import Union, Optional, Tuple, List, Iterable, Sequence
from numbers import Number
from numpy.core.numeric import normalize_axis_tuple
# local
import ivy
from ivy.utils.backend import current_backend
from ivy.func_wrapper import (
handle_array_function,
to_native_arrays_and_back,
handle_out_argument,
handle_nestable,
handle_array_like_without_promotion,
handle_view,
handle_device,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
def _calculate_out_shape(axis, array_shape):
if type(axis) not in (tuple, list):
axis = (axis,)
out_dims = len(axis) + len(array_shape)
norm_axis = normalize_axis_tuple(axis, out_dims)
shape_iter = iter(array_shape)
out_shape = [
1 if current_ax in norm_axis else next(shape_iter)
for current_ax in range(out_dims)
]
return out_shape
# Array API Standard #
# -------------------#
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def concat(
xs: Union[
Tuple[Union[ivy.Array, ivy.NativeArray], ...],
List[Union[ivy.Array, ivy.NativeArray]],
],
/,
*,
axis: int = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Join a sequence of arrays along an existing axis.
Parameters
----------
xs
input arrays to join. The arrays must have the same shape, except in the
dimension specified by axis.
axis
axis along which the arrays will be joined. If axis is None, arrays are
flattened before concatenation. If axis is negative, the axis is along which
to join is determined by counting from the last dimension. Default: ``0``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an output array containing the concatenated values. If the input arrays have
different data types, normal Type Promotion Rules apply.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.concat.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
>>> x = ivy.array([[1, 2], [3, 4]])
>>> y = ivy.array([[5, 6]])
>>> ivy.concat((x, y))
ivy.array([[1, 2],[3, 4],[5, 6]])
"""
return current_backend(xs[0]).concat(xs, axis=axis, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def expand_dims(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[bool] = None,
axis: Union[int, Sequence[int]] = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Expand the shape of an array by inserting a new axis (dimension) of size
one at the position specified by axis.
Parameters
----------
x
input array.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
axis
axis position (zero-based). If x has rank (i.e, number of dimensions) N, a
valid axis must reside on the closed-interval [-N-1, N]. If provided a negative
axis, the axis position at which to insert a singleton dimension is
computed as N + axis + 1. Hence, if provided -1, the resolved axis position
is N (i.e., a singleton dimension is appended to the input array x).
If provided -N-1, the resolved axis position is 0 (i.e., a singleton
dimension is prepended to the input array x). An IndexError exception
is raised if provided an invalid axis position.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array with its dimension added by one in a given axis.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an
extension of the `docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.expand_dims.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0, 1, 2])
>>> y = ivy.expand_dims(x)
>>> print(y)
ivy.array([[0, 1, 2]])
>>> x = ivy.array([[0.5, -0.7, 2.4],
... [ 1, 2, 3]])
>>> y = ivy.zeros((2, 1, 3))
>>> ivy.expand_dims(x, axis=1, out=y)
>>> print(y)
ivy.array([[[0.5, -0.7, 2.4]],
[[ 1., 2., 3.]]])
>>> x = ivy.array([[-1, -2],
... [ 3, 4]])
>>> y = ivy.zeros((1, 2, 2))
>>> ivy.expand_dims(x, axis=0, out=y)
>>> print(y)
ivy.array([[[-1, -2],
[3, 4]]])
>>> x = ivy.array([[-1.1, -2.2, 3.3],
... [ 4.4, 5.5, 6.6]])
>>> y = ivy.expand_dims(x, axis=(0, -1))
>>> print(y)
ivy.array([[[[-1.1],
[-2.2],
[ 3.3]],
[[ 4.4],
[ 5.5],
[ 6.6]]]])
>>> x = ivy.array([[-1.7, -3.2, 2.3],
... [ 6.3, 1.4, 5.7]])
>>> y = ivy.expand_dims(x, axis=[0, 1, -1])
>>> print(y)
ivy.array([[[[[-1.7],
[-3.2],
[ 2.3]],
[[ 6.3],
[ 1.4],
[ 5.7]]]]])
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> y = ivy.expand_dims(x, axis=-1)
>>> print(y)
{
a: ivy.array([[0.],
[1.],
[2.]]),
b: ivy.array([[3.],
[4.],
[5.]])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> container_axis = ivy.Container(a=0, b=1)
>>> y = ivy.expand_dims(x, axis=container_axis)
>>> print(y)
{
a: ivy.array([[0., 1., 2.]]),
b: ivy.array([[3.],
[4.],
[5.]])
}
"""
return current_backend(x).expand_dims(x, copy=copy, axis=axis, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def flip(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[bool] = None,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Reverses the order of elements in an array along the given axis. The
shape of the array must be preserved.
Parameters
----------
x
input array.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
axis
axis (or axes) along which to flip. If axis is None, all input array axes are
flipped. If axis is negative, axis is counted from the last dimension. If
provided more than one axis, only the specified axes. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an output array having the same data type and shape as`x and whose elements,
relative to ``x``, are reordered.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.flip.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([3, 4, 5])
>>> y = ivy.flip(x)
>>> print(y)
ivy.array([5, 4, 3])
>>> x = ivy.array([[1, 2, 3], [4, 5, 6]])
>>> y = ivy.zeros((2, 3))
>>> ivy.flip(x, out=y)
>>> print(y)
ivy.array([[6, 5, 4],
[3, 2, 1]])
>>> x = ivy.array([[1, 2, 3], [4, 5, 6]])
>>> y = ivy.zeros((2, 3))
>>> ivy.flip(x, axis=0, out=y)
>>> print(y)
ivy.array([[4, 5, 6],
[1, 2, 3]])
>>> x = ivy.array([[[1, 2, 3], [4, 5, 6]],[[7, 8, 9], [10, 11, 12]]])
>>> ivy.flip(x, axis=[0, 1], out=x)
>>> print(x)
ivy.array([[[10,11,12],[7,8,9]],[[4,5,6],[1,2,3]]])
>>> x = ivy.array([[[1, 2, 3], [4, 5, 6]],[[7, 8, 9], [10, 11, 12]]])
>>> ivy.flip(x, axis=(2, 1), out=x)
>>> print(x)
ivy.array([[[ 6, 5, 4],
[ 3, 2, 1]],
[[12, 11, 10],
[ 9, 8, 7]]])
"""
return current_backend(x).flip(x, copy=copy, axis=axis, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def permute_dims(
x: Union[ivy.Array, ivy.NativeArray],
/,
axes: Tuple[int, ...],
*,
copy: Optional[bool] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Permutes the axes (dimensions) of an array x.
Parameters
----------
x
input array.
axes
tuple containing a permutation of (0, 1, ..., N-1) where N is the number of axes
(dimensions) of x.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the axes permutation. The returned array must have the same
data type as x.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.permute_dims.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[1, 2, 3], [4, 5, 6]])
>>> y = ivy.permute_dims(x, axes=(1, 0))
>>> print(y)
ivy.array([[1, 4],
[2, 5],
[3, 6]])
>>> x = ivy.zeros((2, 3))
>>> y = ivy.permute_dims(x, axes=(1, 0))
>>> print(y)
ivy.array([[0., 0.],
[0., 0.],
[0., 0.]])
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[0., 1. ,2.]]), b=ivy.array([[3., 4., 5.]]))
>>> y = ivy.permute_dims(x, axes=(1, 0))
>>> print(y)
{
a: ivy.array([[0.],
[1.],
[2.]]),
b: ivy.array([[3.],
[4.],
[5.]])
}
>>> x = ivy.Container(a=ivy.array([[0., 1., 2.]]), b = ivy.array([[3., 4., 5.]]))
>>> y = ivy.Container(a=ivy.zeros((3, 1)), b= ivy.zeros((3, 1)))
>>> ivy.permute_dims(x, axes=(1, 0), out=y)
>>> print(y)
{
a: ivy.array([[0.],
[1.],
[2.]]),
b: ivy.array([[3.],
[4.],
[5.]])
}
"""
return current_backend(x).permute_dims(x, axes, copy=copy, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def reshape(
x: Union[ivy.Array, ivy.NativeArray],
/,
shape: Union[ivy.Shape, ivy.NativeShape, Sequence[int]],
*,
copy: Optional[bool] = None,
order: str = "C",
allowzero: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Give a new shape to an array without changing its data.
Parameters
----------
x
Input array to be reshaped.
shape
a new shape compatible with the original shape. One shape dimension
can be -1. In this case, the value is inferred from the length of the array and
remaining dimensions.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
order
Read the elements of x using this index order, and place the elements into
the reshaped array using this index order.
``C`` means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first axis index
changing slowest.
``F`` means to read / write the elements using Fortran-like index order, with
the first index changing fastest, and the last index changing slowest.
Note that the ``C`` and ``F`` options take no account of the memory layout
of the underlying array, and only refer to the order of indexing.
Default order is ``C``
allowzero
When ``allowzero=True``, any value in the ``shape`` argument that is equal to
zero, the zero value is honored. When ``allowzero=False``, any value in the
``shape`` argument that is equal to zero the corresponding dimension value is
copied from the input tensor dynamically.
Default value is ``True``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an output array having the same data type and elements as x.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.reshape.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[0., 1., 2.],[3., 4., 5.]])
>>> y = ivy.reshape(x,(3,2))
>>> print(y)
ivy.array([[0., 1.],
[2., 3.],
[4., 5.]])
>>> x = ivy.array([[0., 1., 2.],[3., 4., 5.]])
>>> y = ivy.reshape(x,(3,2), order='F')
>>> print(y)
ivy.array([[0., 4.],
[3., 2.],
[1., 5.]])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([[0., 1., 2.],[3., 4., 5.]])
>>> y = ivy.reshape(x,(2,3))
>>> print(y)
ivy.array([[0., 1., 2.],
[3., 4., 5.]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0, 1, 2, 3, 4, 5]),
... b=ivy.array([0, 1, 2, 3, 4, 5]))
>>> y = ivy.reshape(x,(2,3))
>>> print(y)
{
a: ivy.array([[0, 1, 2],
[3, 4, 5]]),
b: ivy.array([[0, 1, 2],
[3, 4, 5]])
}
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[0., 1., 2.]]), b=ivy.array([[3., 4., 5.]]))
>>> y = ivy.reshape(x, (-1, 1))
>>> print(y)
{
a: ivy.array([[0.],[1.],[2.]]),
b: ivy.array([[3.],[4.],[5.]])
}
"""
ivy.utils.assertions.check_elem_in_list(order, ["C", "F"])
return current_backend(x).reshape(
x, shape=shape, copy=copy, allowzero=allowzero, out=out, order=order
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def roll(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
shift: Union[int, Sequence[int]],
*,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[ivy.Array] = None,
) -> Union[ivy.Array, ivy.Container]:
"""Roll array elements along a specified axis. Array elements that roll
beyond the last position are re-introduced at the first position. Array
elements that roll beyond the first position are re-introduced at the last
position.
Parameters
----------
x
input array.
shift
number of places by which the elements are shifted. If shift is a tuple,
then axis must be a tuple of the same size, and each of the given axes must
be shifted by the corresponding element in shift. If shift is an int
and axis a tuple, then the same shift must be used for all specified
axes. If a shift is positive, then array elements must be shifted positively
(toward larger indices) along the dimension of axis. If a shift is negative,
then array elements must be shifted negatively (toward smaller indices) along
the dimension of axis.
axis
axis (or axes) along which elements to shift. If axis is None, the array
must be flattened, shifted, and then restored to its original shape.
Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an output array having the same data type as x and whose elements, relative
to x, are shifted.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.roll.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0., 1., 2.])
>>> y = ivy.roll(x, 1)
>>> print(y)
ivy.array([2., 0., 1.])
>>> x = ivy.array([[0., 1., 2.],
... [3., 4., 5.]])
>>> y = ivy.zeros((2, 3))
>>> ivy.roll(x, 2, axis=-1, out=y)
>>> print(y)
ivy.array([[1., 2., 0.],
[4., 5., 3.]])
>>> x = ivy.array([[[0., 0.], [1., 3.], [2., 6.]],
... [[3., 9.], [4., 12.], [5., 15.]]])
>>> ivy.roll(x, shift=(1, -1), axis=(0, 2), out=x)
>>> print(x)
ivy.array([[[ 9., 3.],
[12., 4.],
[15., 5.]],
[[ 0., 0.],
[ 3., 1.],
[ 6., 2.]]])
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> y = ivy.roll(x, 1)
>>> print(y)
{
a: ivy.array([2., 0., 1.]),
b: ivy.array([5., 3., 4.])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> shift = ivy.Container(a=1, b=-1)
>>> y = ivy.roll(x, shift)
>>> print(y)
{
a: ivy.array([2., 0., 1.]),
b: ivy.array([4., 5., 3.])
}
"""
return current_backend(x).roll(x, shift, axis=axis, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def squeeze(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
copy: Optional[bool] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Remove singleton dimensions (axes) from x.
Parameters
----------
x
input array.
axis
axis (or axes) to squeeze. If a specified axis has a size greater than one, a
ValueError is. If None, then all squeezable axes are squeezed. Default: ``None``
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an output array having the same data type and elements as x.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.squeeze.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[[0, 1], [2, 3]]])
>>> print(ivy.squeeze(x, axis=0))
ivy.array([[0, 1], [2, 3]])
>>> x = ivy.array([[[[1, 2, 3]], [[4, 5, 6]]]])
>>> print(ivy.squeeze(x, axis=2))
ivy.array([[[1, 2, 3], [4, 5, 6]]])
>>> x = ivy.array([[[0], [1], [2]]])
>>> print(ivy.squeeze(x, axis=None))
ivy.array([0, 1, 2])
>>> print(ivy.squeeze(x, axis=0))
ivy.array([[0],
[1],
[2]])
>>> print(ivy.squeeze(x, axis=2))
ivy.array([[0, 1, 2]])
>>> print(ivy.squeeze(x, axis=(0, 2)))
ivy.array([0, 1, 2])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> y = ivy.squeeze(x, axis=None)
>>> print(y)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
"""
return current_backend(x).squeeze(x, axis=axis, copy=copy, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def stack(
arrays: Union[
Tuple[Union[ivy.Array, ivy.NativeArray], ...],
List[Union[ivy.Array, ivy.NativeArray]],
],
/,
*,
axis: int = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Join a sequence of arrays along a new axis.
Parameters
----------
arrays
input arrays to join. Each array must have the same shape.
axis
axis along which the arrays will be joined. Providing an axis specifies the
index of the new axis in the dimensions of the result. For example, if axis
is 0, the new axis will be the first dimension and the output array will
have shape (N, A, B, C); if axis is 1, the new axis will be the
second dimension and the output array will have shape (A, N, B, C); and, if
axis is -1, the new axis will be the last dimension and the output array
will have shape (A, B, C, N). A valid axis must be on the interval
[-N, N), where N is the rank (number of dimensions) of x. If
provided an axis outside of the required interval, the function must raise
an exception. Default: ``0``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an output array having rank N+1, where N is the rank (number of
dimensions) of x. If the input arrays have different data types, normal
ref:`type-promotion` must apply. If the input arrays have the same data type,
the output array must have the same data type as the input arrays.
.. note::
This specification leaves type promotion between data type families (i.e.,
intxx and floatxx) unspecified.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.stack.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :code: `ivy.Array` input:
>>> x = ivy.array([0., 1., 2., 3., 4.])
>>> y = ivy.array([6.,7.,8.,9.,10.])
>>> ivy.stack((x,y))
ivy.array([[ 0., 1., 2., 3., 4.],
[ 6., 7., 8., 9., 10.]])
With :code: `ivy.Array` input and different `axis` :
>>> ivy.stack((x,y),axis=1)
ivy.array([[ 0., 6.],
[ 1., 7.],
[ 2., 8.],
[ 3., 9.],
[ 4., 10.]])
"""
res = current_backend(arrays).stack(arrays, axis=axis, out=out)
if ivy.exists(out):
return ivy.inplace_update(out, res)
return res
# Extra #
# ------#
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def clip(
x: Union[ivy.Array, ivy.NativeArray],
/,
x_min: Optional[Union[Number, ivy.Array, ivy.NativeArray]] = None,
x_max: Optional[Union[Number, ivy.Array, ivy.NativeArray]] = None,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Clips (limits) the values in an array.
Given an interval, values outside the interval are clipped to the interval edges
(element-wise). For example, if an interval of [0, 1] is specified, values smaller
than 0 become 0, and values larger than 1 become 1. Minimum value needs to smaller
or equal to maximum value to return correct results.
Parameters
----------
x
Input array containing elements to clip.
x_min
Minimum value.
x_max
Maximum value.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
An array with the elements of x, but where values < x_min are replaced with
x_min, and those > x_max with x_max.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> y = ivy.clip(x, 1., 5.)
>>> print(y)
ivy.array([1., 1., 2., 3., 4., 5., 5., 5., 5., 5.])
>>> x = ivy.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> y = ivy.zeros_like(x)
>>> ivy.clip(x, 2., 7., out=y)
>>> print(y)
ivy.array([2., 2., 2., 3., 4., 5., 6., 7., 7., 7.])
>>> x = ivy.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> x_min = ivy.array([3., 3., 1., 0., 2., 3., 4., 0., 4., 4.])
>>> x_max = ivy.array([5., 4., 3., 3., 5., 7., 8., 3., 8., 8.])
>>> y = ivy.clip(x, x_min, x_max)
>>> print(y)
ivy.array([3., 3., 2., 3., 4., 5., 6., 3., 8., 8.])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> x_min = ivy.native_array([3., 3., 1., 0., 2., 3., 4., 2., 4., 4.])
>>> x_max = ivy.native_array([5., 4., 3., 3., 5., 7., 8., 3., 8., 8.])
>>> y = ivy.clip(x, x_min, x_max)
>>> print(y)
ivy.array([3., 3., 2., 3., 4., 5., 6., 3., 8., 8.])
With a mix of :class:`ivy.Array` and :class:`ivy.NativeArray` inputs:
>>> x = ivy.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> x_min = ivy.native_array([3., 3., 1., 0., 2., 3., 4., 2., 4., 4.])
>>> x_max = ivy.native_array([5., 4., 3., 3., 5., 7., 8., 3., 8., 8.])
>>> y = ivy.clip(x, x_min, x_max)
>>> print(y)
ivy.array([3., 3., 2., 3., 4., 5., 6., 3., 8., 8.])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> y = ivy.clip(x, 1., 5.)
>>> print(y)
{
a: ivy.array([1., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> x_min = ivy.Container(a=0, b=-3)
>>> x_max = ivy.Container(a=1, b=-1)
>>> y = ivy.clip(x, x_min,x_max)
>>> print(y)
{
a: ivy.array([0., 1., 1.]),
b: ivy.array([-1., -1., -1.])
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> x_min = ivy.array([3., 0., 1])
>>> x_max = ivy.array([5., 4., 3.])
>>> y = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> z = ivy.clip(y, x_min, x_max)
>>> print(z)
{
a: ivy.array([3., 1., 2.]),
b: ivy.array([3., 4., 3.])
}
"""
return current_backend(x).clip(x, x_min, x_max, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def constant_pad(
x: Union[ivy.Array, ivy.NativeArray],
/,
pad_width: Iterable[Tuple[int]],
*,
value: Number = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Pad an array with a constant value.
Parameters
----------
x
Input array to pad.
pad_width
Number of values padded to the edges of each axis.
Specified as ((before_1, after_1), … (before_N, after_N)), where N is number of
axes of x.
value
The constant value to pad the array with.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Padded array of rank equal to x with shape increased according to pad_width.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3, 4, 5])
>>> y = ivy.constant_pad(x, pad_width = [[2, 3]])
>>> print(y)
ivy.array([0, 0, 1, 2, 3, 4, 5, 0, 0, 0])
>>> x = ivy.array([[1, 2], [3, 4]])
>>> y = ivy.constant_pad(x, pad_width=[(2, 3), (2, 3)])
>>> print(y)
ivy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0, 0],
[0, 0, 3, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> x = ivy.array([[1, 2], [3, 4]])
>>> y = ivy.constant_pad(x, pad_width = [[3, 2], [2, 3]])
>>> print(y)
ivy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0, 0],
[0, 0, 3, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> x = ivy.array([[2.], [3.]])
>>> y = ivy.zeros((4, 3))
>>> ivy.constant_pad(x, pad_width = [(1, 1), (1, 1)], value = 5.0, out= y)
>>> print(y)
ivy.array([[5., 5., 5.],
[5., 2., 5.],
[5., 3., 5.],
[5., 5., 5.]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a = ivy.array([1., 2., 3.]),
... b = ivy.array([3., 4., 5.]))
>>> y = ivy.constant_pad(x, pad_width = [[2, 3]], value = 5.0)
>>> print(y)
{
a: ivy.array([5., 5., 1., 2., 3., 5., 5., 5.]),
b: ivy.array([5., 5., 3., 4., 5., 5., 5., 5.])
}
"""
return current_backend(x).constant_pad(x, pad_width=pad_width, value=value, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def repeat(
x: Union[ivy.Array, ivy.NativeArray],
/,
repeats: Union[int, Iterable[int]],
*,
axis: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Repeat values along a given dimension.
Parameters
----------
x
Input array.
repeats
The number of repetitions for each element. repeats is broadcast to fit the
shape of the given axis.
axis
The axis along which to repeat values. By default, use the flattened input
array, and return a flat output array.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The repeated output array.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([3, 4, 5])
>>> y = ivy.repeat(x, 2)
>>> print(y)
ivy.array([3, 3, 4, 4, 5, 5])
>>> x = ivy.array([[1, 2, 3], [4, 5, 6]])
>>> y = ivy.repeat(x, [1, 2], axis=0)
>>> print(y)
ivy.array([[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([0., 1., 2.]))
>>> y = ivy.repeat(x, 2, axis=0)
>>> print(y)
{
a: ivy.array([0., 0., 1., 1., 2., 2.]),
b: ivy.array([0., 0., 1., 1., 2., 2.])
}
"""
return current_backend(x).repeat(x, repeats, axis=axis, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@to_native_arrays_and_back
@handle_array_function
@handle_device
def split(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[bool] = None,
num_or_size_splits: Optional[
Union[int, Sequence[int], ivy.Array, ivy.NativeArray]
] = None,
axis: int = 0,
with_remainder: bool = False,
) -> List[ivy.Array]:
"""Split an array into multiple sub-arrays.
Parameters
----------
x
array to be divided into sub-arrays.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
num_or_size_splits
Number of equal arrays to divide the array into along the given axis if an
integer. The size of each split element if a sequence of integers or 1-D array.
Default is to divide into as many 1-dimensional arrays as the axis dimension.
axis
The axis along which to split, default is ``0``.
with_remainder
If the tensor does not split evenly, then store the last remainder entry.
Default is ``False``.
Returns
-------
ret
A list of sub-arrays.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.split(x)
>>> print(y)
[ivy.array([1]),ivy.array([2]),ivy.array([3])]
>>> x = ivy.array([[3, 2, 1], [4, 5, 6]])
>>> y = ivy.split(x, num_or_size_splits=2, axis=1, with_remainder=True)
>>> print(y)
[ivy.array([[3,2],[4,5]]),ivy.array([[1],[6]])]
>>> x = ivy.array([4, 6, 5, 3])
>>> y = x.split(num_or_size_splits=[1, 3], axis=0, with_remainder=False)
>>> print(y)
ivy.array([[4], [6, 5, 3]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([10, 45, 2]))
>>> y = ivy.split(x)
>>> print(y)
[
{
a: ivy.array([10])
},
{
a: ivy.array([45])
},
{
a: ivy.array([2])
}
]
"""
return current_backend(x).split(
x,
copy=copy,
num_or_size_splits=num_or_size_splits,
axis=axis,
with_remainder=with_remainder,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def swapaxes(
x: Union[ivy.Array, ivy.NativeArray],
axis0: int,
axis1: int,
/,
*,
copy: Optional[bool] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Interchange two axes of an array.
Parameters
----------
x
Input array.
axis0
First axis to be swapped.
axis1
Second axis to be swapped.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
x with its axes permuted.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[0, 1, 2]])
>>> y = ivy.swapaxes(x, 0, 1)
>>> print(y)
ivy.array([[0],
[1],
[2]])
>>> x = ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> y = ivy.swapaxes(x, 0, 1)
>>> print(y)
ivy.array([[[0, 1],
[4, 5]],
[[2, 3],
[6, 7]]])
>>> x = ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> y = ivy.swapaxes(x, 0, 2)
>>> print(y)
ivy.array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
>>> x = ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> y = ivy.swapaxes(x, 1, 2)
>>> print(y)
ivy.array([[[0, 2],
[1, 3]],
[[4, 6],
[5, 7]]])
>>> x = ivy.array([[0, 1, 2]])
>>> y = ivy.swapaxes(x, 0, 1)
>>> print(y)
ivy.array([[0],
[1],
[2]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[0., 1., 2.]]), b=ivy.array([[3., 4., 5.]]))
>>> y = ivy.swapaxes(x, 0, 1)
>>> print(y)
{
a: ivy.array([[0.],
[1.],
[2.]]),
b: ivy.array([[3.],
[4.],
[5.]])
}
# Both the description and the type hints above assumes an array input for
simplicity, but this function is *nestable*, and therefore also accepts
:class:`ivy.Container` instances in place of any of the arguments.
"""
return current_backend(x).swapaxes(x, axis0, axis1, copy=copy, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def tile(
x: Union[ivy.Array, ivy.NativeArray],
/,
repeats: Iterable[int],
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Construct an array by repeating x the number of times given by reps.
Parameters
----------
x
Input array.
repeats
The number of repetitions of x along each axis.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The tiled output array.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1,2,3,4])
>>> y = ivy.tile(x, 3)
>>> print(y)
ivy.array([1,2,3,4,1,2,3,4,1,2,3,4])
>>> x = ivy.array([[1,2,3],
... [4,5,6]])
>>> y = ivy.tile(x, (2,3))
>>> print(y)
ivy.array([[1,2,3,1,2,3,1,2,3],
[4,5,6,4,5,6,4,5,6],
[1,2,3,1,2,3,1,2,3],
[4,5,6,4,5,6,4,5,6]])
>>> x = ivy.array([[[0], [1]]])
>>> y = ivy.tile(x,(2,2,3))
>>> print(y)
ivy.array([[[0,0,0],
[1,1,1],
[0,0,0],
[1,1,1]],
[[0,0,0],
[1,1,1],
[0,0,0],
[1,1,1]]])
With :class:`ivy.Container` input:
>>> x = ivy.Container( a = ivy.array([0,1,2]), b = ivy.array([[3],[4]]))
>>> y = ivy.tile(x, (1,2))
>>> print(y)
{
a: ivy.array([[0,1,2,0,1,2]]),
b: ivy.array([[3,3],[4,4]])
}
# Both the description and the type hints above assumes an array input for
simplicity, but this function is *nestable*, and therefore also accepts
:class:`ivy.Container` instances in place of any of the arguments.
"""
return current_backend(x).tile(x, repeats, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_view
@to_native_arrays_and_back
@handle_array_function
@handle_device
def unstack(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
copy: Optional[bool] = None,
axis: int = 0,
keepdims: bool = False,
) -> List[ivy.Array]:
"""Unpacks the given dimension of a rank-R array into rank-(R-1) arrays.
Parameters
----------
x
Input array to unstack.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view of the input array.
axis
Axis for which to unpack the array.
keepdims
Whether to keep dimension 1 in the unstack dimensions. Default is ``False``.
Returns
-------
ret
List of arrays, unpacked along specified dimensions.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
>>> y = ivy.unstack(x, axis=0)
>>> print(y)
[ivy.array([[1, 2],
[3, 4]]), ivy.array([[5, 6],
[7, 8]])]
>>> x = ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
>>> y = ivy.unstack(x, axis=1, keepdims=True)
>>> print(y)
[ivy.array([[[1, 2]],
[[5, 6]]]), ivy.array([[[3, 4]],
[[7, 8]]])]
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]),
b=ivy.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]]))
>>> ivy.unstack(x, axis=0)
[{
a: ivy.array([[1, 2],
[3, 4]]),
b: ivy.array([[9, 10],
[11, 12]])
}, {
a: ivy.array([[5, 6],
[7, 8]]),
b: ivy.array([[13, 14],
[15, 16]])
}]
>>> x = ivy.Container(a=ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]),
... b=ivy.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]]))
>>> ivy.unstack(x, axis=1, keepdims=True)
[{
a: ivy.array([[[1, 2]],
[[5, 6]]]),
b: ivy.array([[[9, 10]],
[[13, 14]]])
}, {
a: ivy.array([[[3, 4]],
[[7, 8]]]),
b: ivy.array([[[11, 12]],
[[15, 16]]])
}]
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
return current_backend(x).unstack(x, copy=copy, axis=axis, keepdims=keepdims)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def zero_pad(
x: Union[ivy.Array, ivy.NativeArray],
/,
pad_width: Iterable[Tuple[int]],
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Pad an array with zeros.
Parameters
----------
x
Input array to pad.
pad_width
Number of values padded to the edges of each axis. Specified as
((before_1, after_1), … (before_N, after_N)), where N is number of axes of x.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Padded array of rank equal to x with shape increased according to pad_width.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.concat.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1., 2., 3.,4, 5, 6])
>>> y = ivy.zero_pad(x, pad_width = [[2, 3]])
>>> print(y)
ivy.array([0., 0., 1., 2., 3., 4., 5., 6., 0., 0., 0.])
>>> x = ivy.array([[1., 2., 3.],[4, 5, 6]])
>>> y = ivy.zero_pad(x, pad_width = [[2, 3], [2, 3]])
>>> print(y)
ivy.array([[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 2., 3., 0., 0., 0.],
[0., 0., 4., 5., 6., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.]])
>>> x = ivy.Container(a = ivy.array([1., 2., 3.]), b = ivy.array([3., 4., 5.]))
>>> y = ivy.zero_pad(x, pad_width = [[2, 3]])
>>> print(y)
{
a: ivy.array([0., 0., 1., 2., 3., 0., 0., 0.]),
b: ivy.array([0., 0., 3., 4., 5., 0., 0., 0.])
}
"""
return current_backend(x).zero_pad(x, pad_width, out=out)
| ivy/ivy/functional/ivy/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/ivy/manipulation.py",
"repo_id": "ivy",
"token_count": 21831
} | 52 |
"""Collection of Ivy's losses as stateful classes."""
# local
import ivy
from ivy.stateful.module import Module
class LogPoissonLoss(Module):
def __init__(
self,
*,
compute_full_loss: bool = False,
axis: int = -1,
reduction: str = "none",
):
self._compute_full_loss = compute_full_loss
self._axis = axis
self._reduction = reduction
Module.__init__(self)
def _forward(
self, true, pred, *, compute_full_loss=None, axis=None, reduction=None
):
"""Perform forward pass of the Log Poisson Loss.
true
input array containing true labels.
pred
input array containing Predicted labels.
compute_full_loss
whether to compute the full loss. If false, a constant term is dropped
in favor of more efficient optimization. Default: ``False``.
axis
the axis along which to compute the log-likelihood loss. If axis is ``-1``,
the log-likelihood loss will be computed along the last dimension.
Default: ``-1``.
reduction
``'none'``: No reduction will be applied to the output.
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``.
Returns
-------
ret
The binary log-likelihood loss between the given distributions.
"""
return ivy.log_poisson_loss(
true,
pred,
compute_full_loss=ivy.default(compute_full_loss, self._compute_full_loss),
axis=ivy.default(axis, self._axis),
reduction=ivy.default(reduction, self._reduction),
)
def _extra_repr(self) -> str:
return (
f"compute_full_loss={self._compute_full_loss}, axis={self._axis}, "
f"reduction={self._reduction}"
)
class CrossEntropyLoss(Module):
def __init__(
self,
*,
axis: int = -1,
epsilon: float = 1e-7,
reduction: str = "sum",
):
self._axis = axis
self._epsilon = epsilon
self._reduction = reduction
Module.__init__(self)
def _forward(self, true, pred, *, axis=None, epsilon=None, reduction=None):
"""Perform forward pass of the Cross Entropy Loss.
true
input array containing true labels.
pred
input array containing Predicted labels.
axis
the axis along which to compute the cross-entropy loss. If axis is ``-1``,
the cross-entropy loss will be computed along the last dimension.
Default: ``-1``.
epsilon
small value to avoid division by zero. Default: ``1e-7``.
reduction
``'none'``: No reduction will be applied to the output.
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'sum'``.
Returns
-------
ret
The cross-entropy loss between the given distributions.
"""
return ivy.cross_entropy(
true,
pred,
axis=ivy.default(axis, self._axis),
epsilon=ivy.default(epsilon, self._epsilon),
reduction=ivy.default(reduction, self._reduction),
)
def _extra_repr(self) -> str:
return (
f"axis={self._axis}, epsilon={self._epsilon}, reduction={self._reduction}"
)
class BinaryCrossEntropyLoss(Module):
def __init__(
self,
*,
from_logits: bool = False,
epsilon: float = 0.0,
reduction: str = "none",
):
self._from_logits = from_logits
self._epsilon = epsilon
self._reduction = reduction
Module.__init__(self)
def _forward(
self,
true,
pred,
*,
from_logits=None,
epsilon=None,
reduction=None,
pos_weight=None,
axis=None,
):
"""
Parameters
----------
true
input array containing true labels.
pred
input array containing Predicted labels.
from_logits
Whether `pred` is expected to be a logits tensor. By
default, we assume that `pred` encodes a probability distribution.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when calculating
the loss. If epsilon is ``0``, no smoothing will be applied. Default: ``0``.
reduction
``'none'``: No reduction will be applied to the output.
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``.
pos_weight
a weight for positive examples. Must be an array with length equal to the
number of classes.
axis
Axis along which to compute crossentropy.
Returns
-------
ret
The binary cross entropy between the given distributions.
"""
return ivy.binary_cross_entropy(
true,
pred,
from_logits=ivy.default(from_logits, self._from_logits),
epsilon=ivy.default(epsilon, self._epsilon),
reduction=ivy.default(reduction, self._reduction),
pos_weight=pos_weight,
axis=axis,
)
def _extra_repr(self) -> str:
return (
f"from_logits={self._from_logits}, epsilon={self._epsilon}, "
f"reduction={self._reduction}"
)
| ivy/ivy/stateful/losses.py/0 | {
"file_path": "ivy/ivy/stateful/losses.py",
"repo_id": "ivy",
"token_count": 2621
} | 53 |
import warnings
import ivy
import functools
from typing import Callable
import traceback as tb
import inspect
import os
import ast
import builtins
# Helpers #
# ------- #
def _remove_so_log(trace):
old_stack_trace = tb.extract_tb(trace)
old_frames = inspect.getinnerframes(trace)
transpile_frame = None
module_frame = None
module_st = None
traced_lineno = None
new_stack_trace = []
track = False
for idx, st in enumerate(old_stack_trace):
if ".pyx" in repr(st):
continue
if "<string>" in repr(st):
if "compiled_fn" in repr(st) and module_frame:
track = True
traced_lineno = st.lineno
if "<module>" in repr(st):
module_frame = old_frames[idx]
module_st = st
elif (
transpile_frame is None
and os.path.join("ivy", "compiler") in st.filename
and st.name in ["compile", "transpile"]
):
transpile_frame = old_frames[idx]
elif track:
ret_st = _align_source(
st, transpile_frame, module_frame, module_st, traced_lineno
)
if ret_st:
[new_stack_trace.append(r) for r in ret_st]
if track:
track = False
else:
new_stack_trace.append(st)
return new_stack_trace
def _align_source(st, transpile_frame, module_frame, module_st, traced_lineno):
from ivy.compiler.utils.VVX import trace_obj
from ivy.compiler.utils.IIV import Graph
curr_obj = [None, None, "", ""]
if transpile_frame:
t_v = inspect.getargvalues(transpile_frame.frame)
obj = t_v.locals[t_v.varargs][0]
traced_data = trace_obj(obj, t_v.locals["args"], t_v.locals["kwargs"], {})
curr_obj[0] = traced_data[1]
curr_obj[1] = traced_data[2]
curr_obj[2] = traced_data[3]
if module_frame:
t_v = inspect.getargvalues(module_frame.frame)
for k, v in t_v.locals.items():
if k in module_st.line and isinstance(v, Graph):
traced_data = trace_obj(t_v.locals[v.__name__], (), {}, {})
curr_obj[0] = traced_data[1]
curr_obj[1] = traced_data[2]
curr_obj[2] = v.__name__
if traced_lineno:
line = v._Graph__fn_str.split("\n")[traced_lineno - 1]
line = line.split("=")[1].strip()
line = line.split("(")[0].strip()
target_name = line.split(".")[-1].strip()
curr_obj[3] = line
area = traced_lineno / len(v._Graph__fn_str.strip().split("\n"))
curr_obj = _get_traces(curr_obj, area, t_v.locals, target_name)
if curr_obj[0] is None:
return None
if not isinstance(curr_obj[0], list):
curr_obj = [curr_obj]
return curr_obj
def _get_traces(curr_obj, area, local_dict, target_name):
from ivy.compiler.utils.VVX import trace_obj, get_source_code, CallVistior
traces_list = []
func = local_dict[curr_obj[2]]
func_module = inspect.getmodule(func)
rooted_source = get_source_code(func).strip()
try:
module_ast = ast.parse(rooted_source)
visitor = CallVistior(func_module)
visitor.visit(module_ast)
except SyntaxError:
pass
non_lib_objs_name_list = [f.__name__ for f in visitor.non_lib_objs]
rooted_src_list = rooted_source.split("\n")
max_idx = round(len(rooted_src_list) * area) - 1
for i in range(max_idx, 0, -1):
if target_name in rooted_src_list[i]:
curr_obj[3] = rooted_src_list[i]
curr_obj[1] += i
break
elif builtins.any(
[name in rooted_src_list[i] for name in non_lib_objs_name_list]
):
found = False
for name in non_lib_objs_name_list:
if name in rooted_src_list[i]:
traced_data = trace_obj(local_dict[name], (), {}, {})
ret_obj = [traced_data[1], traced_data[2], name, curr_obj[3]]
ret_obj = _get_traces(ret_obj, 1, local_dict, target_name)
if ret_obj:
traces_list += ret_obj
found = True
break
if found:
curr_obj[3] = rooted_src_list[i]
curr_obj[1] += i
break
return [curr_obj] + traces_list
def _check_if_path_found(path, full_path):
"""Check if the path is found in the full path.
Parameters
----------
path
the path to check
full_path
the full path to check
Returns
-------
ret
True if the path is found, False otherwise
"""
return path in full_path
def _configure_stack_trace(traceback):
"""Configure the stack trace to be displayed in the console.
Parameters
----------
traceback
the traceback object
"""
tb = traceback
trace_mode = ivy.exception_trace_mode
show_wrappers = ivy.show_func_wrapper_trace_mode
ivy_path = os.path.join("ivy", "functional", "ivy")
frontend_path = os.path.join("ivy", "functional", "frontends")
wrapper_path = os.path.join("ivy", "func_wrapper.py")
while tb.tb_next:
frame = tb.tb_next.tb_frame
file_path = frame.f_code.co_filename
if trace_mode == "ivy":
if _check_if_path_found(ivy_path, file_path):
tb = tb.tb_next
else:
tb.tb_next = tb.tb_next.tb_next
elif trace_mode == "frontend":
if _check_if_path_found(frontend_path, file_path) or _check_if_path_found(
ivy_path, file_path
):
tb = tb.tb_next
else:
tb.tb_next = tb.tb_next.tb_next
elif not show_wrappers:
if _check_if_path_found(wrapper_path, file_path):
tb.tb_next = tb.tb_next.tb_next
else:
tb = tb.tb_next
else:
tb = tb.tb_next
def _add_native_error(default):
"""Append the native error to the message if it exists.
Parameters
----------
default
list containing all the messages
Returns
-------
ret
list containing all the messages, with the native error appended if it exists
"""
trace_mode = ivy.exception_trace_mode
if isinstance(default[-1], Exception):
if isinstance(default[-1], IvyException):
if default[-1].native_error is not None:
# native error was passed in the message
native_error = default[-1].native_error
else:
# a string was passed in the message
# hence the last element is an IvyException
default[-1] = str(default[-1])
return default
else:
# exception was raised by the backend natively
native_error = default[-1]
if trace_mode == "full":
default[-1] = native_error.__class__.__name__
default.append(str(native_error))
else:
default[-1] = str(native_error)
return default
def _combine_messages(*messages, include_backend=True):
if not include_backend:
return " ".join(messages)
default = [
"numpy" if ivy.current_backend_str() == "" else ivy.current_backend_str()
]
delimiter = ": "
for message in messages:
default.append(message)
# adding the native error as well if it exists and the trace mode is set to "full"
default = _add_native_error(default)
return delimiter.join(default)
class IvyException(Exception):
def __init__(self, *messages, include_backend=False):
self.native_error = (
messages[0]
if len(messages) == 1
and isinstance(messages[0], Exception)
and not include_backend
else None
)
if self.native_error is None:
super().__init__(
_combine_messages(*messages, include_backend=include_backend)
)
else:
super().__init__(str(messages[0]))
class IvyBackendException(IvyException):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
class IvyInvalidBackendException(IvyException):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
class IvyNotImplementedException(IvyException, NotImplementedError):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
class IvyError(IvyException):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
class IvyIndexError(IvyException, IndexError):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
class IvyAttributeError(IvyException, AttributeError):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
class IvyValueError(IvyException, ValueError):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
class IvyBroadcastShapeError(IvyException):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
class IvyDtypePromotionError(IvyException):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
class IvyDeviceError(IvyException):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
class InplaceUpdateException(IvyException):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
_non_ivy_exceptions_mapping = {
IndexError: IvyIndexError,
AttributeError: IvyAttributeError,
ValueError: IvyValueError,
Exception: IvyBackendException,
NotImplementedError: IvyNotImplementedException,
}
def handle_exceptions(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_exceptions(*args, **kwargs):
"""Catch all exceptions and raise them in IvyException.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, or raise IvyException if error is thrown.
"""
try:
return fn(*args, **kwargs)
except IvyException as e:
_handle_exceptions_helper(e, type(e))
except Exception as e:
ivy_exception = _non_ivy_exceptions_mapping.get(
type(e), IvyBackendException
)
_handle_exceptions_helper(e, ivy_exception)
def _handle_exceptions_helper(e, cls):
_configure_stack_trace(e.__traceback__)
raise cls(fn.__name__, str(e), include_backend=True)
_handle_exceptions.handle_exceptions = True
return _handle_exceptions
# Inplace Update
# to avoid raising warnings on setting the same backend multiple times
_inplace_warning_cache = {}
def _handle_inplace_mode(ivy_pack=None):
if not ivy_pack:
ivy_pack = ivy
current_backend = ivy_pack.current_backend_str()
if (
current_backend != ""
and not _inplace_warning_cache.get(current_backend)
and not ivy_pack.native_inplace_support
and ivy_pack.inplace_mode == "lenient"
):
warnings.warn(
f"The current backend: '{current_backend}' does not support "
"inplace updates natively. Ivy would quietly create new arrays when "
"using inplace updates with this backend, leading to memory overhead "
"(same applies for views). If you want to control your memory "
"management, consider doing ivy.set_inplace_mode('strict') which "
"should raise an error whenever an inplace update is attempted "
"with this backend."
)
_inplace_warning_cache[current_backend] = True
def _check_inplace_update_support(x, ensure_in_backend):
current_backend = ivy.current_backend_str()
is_tf_variable = current_backend == "tensorflow" and not ivy.is_ivy_array(
x, exclusive=True
)
if (
ensure_in_backend
or ivy.is_native_array(x)
or (ivy.inplace_mode == "strict" and not is_tf_variable)
):
raise ivy.utils.exceptions.InplaceUpdateException(
f"{current_backend} does not support inplace updates "
"and ivy cannot support the operation in 'strict' mode\n"
"To enable inplace update, use ivy.set_inplace_mode('lenient')\n"
)
| ivy/ivy/utils/exceptions.py/0 | {
"file_path": "ivy/ivy/utils/exceptions.py",
"repo_id": "ivy",
"token_count": 6061
} | 54 |
# global
import os
import pytest
from typing import Dict
import sys
import multiprocessing as mp
# for enabling numpy's bfloat16 behavior
from packaging import version
from .helpers.globals import mod_backend, mod_frontend
multiprocessing_flag = False # multiversion
# local
import ivy_tests.test_ivy.helpers.test_parameter_flags as pf
from ivy import set_exception_trace_mode
from ivy_tests.test_ivy.helpers import globals as test_globals
from ivy_tests.test_ivy.helpers.available_frameworks import available_frameworks # noqa
from ivy_tests.test_ivy.helpers.multiprocessing import backend_proc, frontend_proc
from ivy_tests.test_ivy.helpers.pipeline_helper import (
BackendHandler,
BackendHandlerMode,
)
GENERAL_CONFIG_DICT = {}
UNSET_TEST_CONFIG = {"list": [], "flag": []}
UNSET_TEST_API_CONFIG = {"list": [], "flag": []}
TEST_PARAMS_CONFIG = []
SKIP_GROUND_TRUTH = False
UNSUPPORTED_FRAEMWORK_DEVICES = {"numpy": ["gpu", "tpu"]}
if "ARRAY_API_TESTS_MODULE" not in os.environ:
os.environ["ARRAY_API_TESTS_MODULE"] = "ivy.functional.backends.numpy"
def default_framework_mapper(fw, fw_path="/opt/fw/", set_too=False):
# do a path search, get the latest
# so that we can get the highest version
# available dynamically and set that for
# use by the rest of the code
# eg: torch/1.11.0 and torch/1.12.0
# this will map to torch/1.12.0
try:
versions = os.listdir(f"/opt/fw/{fw}")
except FileNotFoundError:
# if no version exists return None
return None
versions = [version.parse(v) for v in versions]
versions.sort()
if set_too:
sys.path.insert(1, f"{fw_path}{fw}/{str(versions[-1])}")
return str(versions[-1])
def pytest_report_header(config):
return [
f"backend(s): {config.getoption('backend')}",
f"device: {config.getoption('device')}",
f"number of Hypothesis examples: {config.getoption('num_examples')}",
]
def pytest_configure(config):
global available_frameworks
global multiprocessing_flag
if config.getoption("--set-backend"):
BackendHandler._update_context(BackendHandlerMode.SetBackend)
# Ivy Exception traceback
set_exception_trace_mode(config.getoption("--ivy-tb"))
# Pytest traceback
config.option.tbstyle = config.getoption("--tb")
# device
raw_value = config.getoption("--device")
if raw_value == "all":
devices = ["cpu", "gpu:0", "tpu:0"]
else:
devices = raw_value.split(",")
# framework
raw_value = config.getoption("--backend")
if raw_value == "all":
backend_strs = available_frameworks
else:
backend_strs = raw_value.split(",")
no_mp = config.getoption("--no-mp")
if not no_mp:
# we go multiprocessing, if multiversion
known_backends = {"tensorflow", "torch", "jax"}
found_backends = set()
for fw in backend_strs:
if "/" in fw:
# set backend to be used
BackendHandler._update_context(BackendHandlerMode.SetBackend)
multiprocessing_flag = True
# spin up multiprocessing
# build mp process, queue, initiation etc
input_queue = mp.Queue()
output_queue = mp.Queue()
proc = mp.Process(target=backend_proc, args=(input_queue, output_queue))
# start the process so that it loads the framework
input_queue.put(fw)
proc.start()
# we have the process running, the framework imported within,
# we now pack the queue and the process and store it in dict
# for future access
fwrk, ver = fw.split("/")
mod_backend[fwrk] = (proc, input_queue, output_queue)
# set the latest version for the rest of the test code and move on
default_framework_mapper(fwrk, set_too=False)
found_backends.add(fwrk)
if found_backends:
# we know it's multiversion+multiprocessing
# spin up processes for other backends that
# were not found in --backend flag
left_frameworks = known_backends.difference(found_backends)
for fw in left_frameworks:
# spin up multiprocessing
# build mp process, queue, initiation etc
# find the latest version of this framework
# and set it in the path for rest of the code
# to access
version = default_framework_mapper(fw, set_too=False)
# spin up process only if a version was found else don't
if version:
input_queue = mp.Queue()
proc = mp.Process(
target=backend_proc, args=(input_queue, output_queue)
)
# start the process so that it loads the framework
input_queue.put(f"{fw}/{version}")
proc.start()
# we have the process running, the framework imported within,
# we now pack the queue and the process and store it in dict
# for future access
mod_backend[fw] = (proc, input_queue, output_queue)
else:
# no multiprocessing if multiversion
for fw in backend_strs:
if "/" in fw:
multiprocessing_flag = True
# multiversion, but without multiprocessing
sys.path.insert(1, f"/opt/fw/{fw}")
# frontend
frontend = config.getoption("--frontend")
if frontend:
frontend_strs = frontend.split(",")
# if we are passing a frontend flag, it has to have a version with it
for frontend in frontend_strs:
# spin up multiprocessing
fw, ver = frontend.split("/")
# build mp process, queue, initiation etc
queue = mp.Queue()
proc = mp.Process(target=frontend_proc, args=(queue,))
# start the process so that it loads the framework
proc.start()
queue.put(f"{fw}/{ver}")
# we have the process running, the framework imported within,
# we now pack the queue and the process and store it in dict
# for future access
mod_frontend[fw] = (proc, queue)
# trace_graph
raw_value = config.getoption("--trace_graph")
if raw_value == "both":
trace_modes = [True, False]
elif raw_value == "true":
trace_modes = [True]
else:
trace_modes = [False]
# implicit
raw_value = config.getoption("--with_implicit")
if raw_value == "true":
implicit_modes = [True, False]
else:
implicit_modes = [False]
# create test configs
for backend_str in backend_strs:
for device in devices:
if "/" in backend_str:
backend_str = backend_str.split("/")[0]
if (
backend_str in UNSUPPORTED_FRAEMWORK_DEVICES
and device.partition(":")[0]
in UNSUPPORTED_FRAEMWORK_DEVICES[backend_str]
):
continue
for trace_graph in trace_modes:
for implicit in implicit_modes:
TEST_PARAMS_CONFIG.append(
(
device,
backend_str,
trace_graph,
implicit,
)
)
process_cl_flags(config)
@pytest.fixture(autouse=True)
def run_around_tests(request, on_device, backend_fw, trace_graph, implicit):
try:
test_globals.setup_api_test(
backend_fw,
(
request.function.ground_truth_backend
if hasattr(request.function, "ground_truth_backend")
else None
),
on_device,
(
request.function.test_data
if hasattr(request.function, "test_data")
else None
),
)
except Exception as e:
test_globals.teardown_api_test()
raise RuntimeError(f"Setting up test for {request.function} failed.") from e
yield
test_globals.teardown_api_test()
def pytest_generate_tests(metafunc):
# Skip backend test against groud truth backend
# This redundant and wastes resources, as we going to be comparing
# The backend against it self
global SKIP_GROUND_TRUTH
if hasattr(metafunc.function, "ground_truth_backend"):
test_paramters = TEST_PARAMS_CONFIG.copy()
# Find the entries that contains the ground truth backend as it's backend
for entry in test_paramters.copy():
# Entry 1 is backend_fw
if entry[1] == metafunc.function.ground_truth_backend and SKIP_GROUND_TRUTH:
test_paramters.remove(entry)
metafunc.parametrize(
"on_device,backend_fw,trace_graph,implicit", test_paramters
)
else:
metafunc.parametrize(
"on_device,backend_fw,trace_graph,implicit", TEST_PARAMS_CONFIG
)
def process_cl_flags(config) -> Dict[str, bool]:
getopt = config.getoption
no_extra_testing = getopt("--no-extra-testing")
tmp_config = {
"as_variable": (
getopt("--skip-variable-testing"),
getopt("--with-variable-testing"),
),
"native_array": (
getopt("--skip-native-array-testing"),
getopt("--with-native-array-testing"),
),
"with_out": (
getopt("--skip-out-testing"),
getopt("--with-out-testing"),
),
"container": (
getopt("--skip-nestable-testing"),
getopt("--with-nestable-testing"),
),
"instance_method": (
getopt("--skip-instance-method-testing"),
getopt("--with-instance-method-testing"),
),
"test_gradients": (
getopt("--skip-gradient-testing"),
getopt("--with-gradient-testing"),
),
"test_trace": (
getopt("--skip-trace-testing"),
getopt("--with-trace-testing"),
),
"transpile": (
False,
getopt("--with-transpile"),
),
"test_cython_wrapper": (
getopt("--skip-cython-wrapper-testing"),
getopt("--with-cython-wrapper-testing"),
),
}
# whether to skip gt testing or not
# global SKIP_GROUND_TRUTH
# SKIP_GROUND_TRUTH = not tmp_config["transpile"][1]
# final mapping for hypothesis value generation
for k, v in tmp_config.items():
# when both flags are true
if v[0] and v[1]:
raise Exception(
f"--skip-{k}--testing and --with-{k}--testing flags cannot be used "
"together"
)
if v[1] and no_extra_testing:
raise Exception(
f"--with-{k}--testing and --no-extra-testing flags cannot be used "
"together"
)
# skipping a test
if v[0] or no_extra_testing:
pf.build_flag(k, False)
# extra testing
if v[1]:
pf.build_flag(k, True)
def pytest_addoption(parser):
parser.addoption("--no-mp", action="store", default=None)
parser.addoption(
"--set-backend",
action="store_true",
default=False,
help="Force the testing pipeline to use ivy.set_backend for backend setting",
)
parser.addoption("--device", action="store", default="cpu")
parser.addoption("-B", "--backend", action="store", default="all")
parser.addoption("--trace_graph", action="store_true")
parser.addoption("--with_implicit", action="store_true")
parser.addoption("--frontend", action="store", default=None)
parser.addoption("--env", action="store", default=None)
parser.addoption("--ground_truth", action="store", default=None)
parser.addoption("--skip-variable-testing", action="store_true")
parser.addoption("--skip-native-array-testing", action="store_true")
parser.addoption("--skip-out-testing", action="store_true")
parser.addoption("--skip-nestable-testing", action="store_true")
parser.addoption("--skip-instance-method-testing", action="store_true")
parser.addoption("--skip-gradient-testing", action="store_true")
parser.addoption("--skip-trace-testing", action="store_true")
parser.addoption("--with-variable-testing", action="store_true")
parser.addoption("--with-native-array-testing", action="store_true")
parser.addoption("--with-out-testing", action="store_true")
parser.addoption("--with-nestable-testing", action="store_true")
parser.addoption("--with-instance-method-testing", action="store_true")
parser.addoption("--with-gradient-testing", action="store_true")
parser.addoption("--with-trace-testing", action="store_true")
parser.addoption("--with-transpile", action="store_true")
parser.addoption("--no-extra-testing", action="store_true")
parser.addoption(
"--my_test_dump",
action="store",
default=None,
help="Print test items in my custom format",
)
parser.addoption("--skip-cython-wrapper-testing", action="store_true")
parser.addoption("--with-cython-wrapper-testing", action="store_true")
def pytest_collection_finish(session):
# Make sure we're not accidentally accessing it during test
global TEST_PARAMS_CONFIG
del TEST_PARAMS_CONFIG
if session.config.option.my_test_dump is not None:
for item in session.items:
item_path = os.path.relpath(item.path)
print(f"{item_path}::{item.name}")
for backend in mod_backend:
proc = mod_backend[backend]
proc.terminate()
pytest.exit("Done!")
| ivy/ivy_tests/test_ivy/conftest.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/conftest.py",
"repo_id": "ivy",
"token_count": 6358
} | 55 |
class NativeClass:
"""An empty class to represent a class that only exist in a specific
framework.
Attributes
----------
_native_class : class reference
A reference to the framework-specific class.
"""
def __init__(self, native_class):
"""Construct the native class object.
Parameters
----------
native_class : class reference
A reperence to the framework-specific class being represented.
"""
self._native_class = native_class
| ivy/ivy_tests/test_ivy/test_frontends/__init__.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/__init__.py",
"repo_id": "ivy",
"token_count": 186
} | 56 |
import pytest
@pytest.fixture(scope="session")
def frontend():
return "jax"
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/conftest.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/conftest.py",
"repo_id": "ivy",
"token_count": 31
} | 57 |
# global
from hypothesis import strategies as st, settings
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers.testing_helpers import handle_frontend_test
# can_cast
@handle_frontend_test(
fn_tree="jax.numpy.can_cast",
from_=helpers.get_dtypes("valid", full=False),
to=helpers.get_dtypes("valid", full=False),
casting=st.sampled_from(["no", "equiv", "safe", "same_kind", "unsafe"]),
test_with_out=st.just(False),
)
# there are 100 combinations of dtypes, so run 200 examples to make sure all are tested
@settings(max_examples=200)
def test_jax_can_cast(
*,
from_,
to,
casting,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=[],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
from_=from_[0],
to=to[0],
casting=casting,
)
@handle_frontend_test(
fn_tree="jax.numpy.finfo",
dtype=helpers.get_dtypes("numeric", full=False),
test_with_out=st.just(False),
)
def test_jax_finfo(*, dtype, test_flags, on_device, fn_tree, frontend, backend_fw):
helpers.test_frontend_function(
input_dtypes=[],
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
dtype=dtype[0],
backend_to_test=backend_fw,
)
@handle_frontend_test(
fn_tree="jax.numpy.iinfo",
dtype=helpers.get_dtypes("numeric", full=False),
test_with_out=st.just(False),
)
@settings(max_examples=200)
def test_jax_iinfo(*, dtype, test_flags, on_device, fn_tree, frontend, backend_fw):
helpers.test_frontend_function(
input_dtypes=[],
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
int_type=dtype[0],
backend_to_test=backend_fw,
)
# promote_types
@handle_frontend_test(
fn_tree="jax.numpy.promote_types",
type1=helpers.get_dtypes("valid", full=False),
type2=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
# there are 100 combinations of dtypes, so run 200 examples to make sure all are tested
@settings(max_examples=200)
def test_jax_promote_types(
*,
type1,
type2,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=[],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
type1=type1[0],
type2=type2[0],
test_values=False,
)
assert str(ret._ivy_dtype) == str(frontend_ret[0])
@handle_frontend_test(
fn_tree="jax.numpy.result_type",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=st.shared(helpers.ints(min_value=2, max_value=5), key="num_arrays"),
shared_dtype=False,
),
test_with_out=st.just(False),
)
@settings(max_examples=200)
def test_jax_result_type(
*, dtype_and_x, test_flags, on_device, fn_tree, frontend, backend_fw
):
dtype, x = helpers.as_lists(*dtype_and_x)
kw = {}
for i, (dtype_, x_) in enumerate(zip(dtype, x)):
kw[f"x{i}"] = x_
test_flags.num_positional_args = len(kw)
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
**kw,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_dtype.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_dtype.py",
"repo_id": "ivy",
"token_count": 1748
} | 58 |
# global
import pytest
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# softsign
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.ops.softsign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(kind="float", full=False, key="dtype"),
safety_factor_scale="log",
small_abs_safety_factor=20,
),
)
def test_mindspore_softsign(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_mindspore/test_ops/test_mindspore_nn_func.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_mindspore/test_ops/test_mindspore_nn_func.py",
"repo_id": "ivy",
"token_count": 407
} | 59 |
# global
import sys
import numpy as np
from hypothesis import strategies as st, assume
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
# tensorinv
@st.composite
def _get_inv_square_matrices(draw):
dim_size = draw(helpers.ints(min_value=1, max_value=10))
batch_shape = draw(st.sampled_from([2, 4, 6, 8, 10]))
generated_shape = (dim_size,) * batch_shape
generated_ind = int(np.floor(len(generated_shape) / 2))
handpicked_shape, handpicked_ind = draw(
st.sampled_from([[(24, 6, 4), 1], [(8, 3, 6, 4), 2], [(6, 7, 8, 16, 21), 3]])
)
shape, ind = draw(
st.sampled_from(
[(generated_shape, generated_ind), (handpicked_shape, handpicked_ind)]
)
)
input_dtype = draw(
helpers.get_dtypes("float", index=1, full=False).filter(
lambda x: x not in ["float16", "bfloat16"]
)
)
invertible = False
while not invertible:
a = draw(
helpers.array_values(
dtype=input_dtype[0],
shape=shape,
min_value=-100,
max_value=100,
)
)
try:
np.linalg.inv(a)
invertible = True
except np.linalg.LinAlgError:
pass
return input_dtype, a, ind
# --- Main --- #
# ------------ #
# inv
@handle_frontend_test(
fn_tree="numpy.linalg.inv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
small_abs_safety_factor=2,
safety_factor_scale="log",
shape=helpers.ints(min_value=2, max_value=20).map(lambda x: (x, x)),
).filter(lambda x: np.linalg.cond(x[1][0].tolist()) < 1 / sys.float_info.epsilon),
test_with_out=st.just(False),
)
def test_numpy_inv(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
)
# lstsq
@handle_frontend_test(
fn_tree="numpy.linalg.lstsq",
x=helpers.get_first_solve_matrix(adjoint=True),
y=helpers.get_second_solve_matrix(),
test_with_out=st.just(False),
)
def test_numpy_lstsq(
x,
y,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype1, a, _ = x
dtype2, b = y
ret, ret_gt = helpers.test_frontend_function(
input_dtypes=[dtype1, dtype2],
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
a=a,
b=b,
test_values=False,
)
for ret_f, ret_gtt in zip(ret, ret_gt):
# TODO: Uncomment this once the function is implemented on the API side
# frontend_ret = ret_f
# frontend_ret_gt = ret_gt
# ret_flattened = helpers.flatten_and_to_np(ret=frontend_ret)
# ret_gt_flattened = helpers.flatten_fw_and_to_np(
# ret=frontend_ret_gt, fw="numpy")
# helpers.value_test(
# ret_np_flat=ret_flattened,
# ret_np_from_gt_flat=ret_gt_flattened,
# ground_truth_backend="numpy",
# )
return
# pinv
@handle_frontend_test(
fn_tree="numpy.linalg.pinv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=2,
),
test_with_out=st.just(False),
)
def test_numpy_pinv(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
)
# solve
@handle_frontend_test(
fn_tree="numpy.linalg.solve",
x=helpers.get_first_solve_batch_matrix(),
y=helpers.get_second_solve_batch_matrix(),
test_with_out=st.just(False),
)
def test_numpy_solve(
x,
y,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype1, x1, _ = x
dtype2, x2, _ = y
helpers.test_frontend_function(
input_dtypes=[dtype1, dtype2],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-4,
atol=1e-4,
a=x1,
b=x2,
)
@handle_frontend_test(
fn_tree="numpy.linalg.tensorinv",
params=_get_inv_square_matrices(),
test_with_out=st.just(False),
)
def test_numpy_tensorinv(
*,
params,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
dtype, x, ind = params
if backend_fw == "paddle":
# Paddle only supports ndim from 0 to 9
assume(x.ndim <= 9)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
rtol=1e-01,
atol=1e-01,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
a=x,
ind=ind,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_solving_equations_and_inverting_matrices.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_solving_equations_and_inverting_matrices.py",
"repo_id": "ivy",
"token_count": 2790
} | 60 |
# global
from hypothesis import strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test, BackendHandler
# --- Helpers --- #
# --------------- #
@st.composite
def generate_copyto_args(draw):
input_dtypes, xs, casting, _ = draw(
np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
)
],
)
)
where = draw(np_frontend_helpers.where(shape=xs[0].shape))
return input_dtypes, xs, casting, where
# copyto
@handle_frontend_test(
fn_tree="numpy.copyto",
test_with_out=st.just(False),
copyto_args=generate_copyto_args(),
)
def test_numpy_copyto(
copyto_args,
backend_fw,
frontend,
):
_, xs, casting, where = copyto_args
if isinstance(where, (list, tuple)):
where = where[0]
with BackendHandler.update_backend(backend_fw) as ivy_backend:
src_ivy = ivy_backend.functional.frontends.numpy.array(xs[0])
dst_ivy = ivy_backend.functional.frontends.numpy.array(xs[1])
ivy_backend.functional.frontends.numpy.copyto(
dst_ivy, src_ivy, where=where, casting=casting
)
src_np = np.array(xs[0])
dst_np = np.array(xs[1])
np.copyto(dst_np, src_np, where=where, casting=casting)
assert dst_np.shape == dst_ivy.shape
# value test
dst_ = ivy_backend.to_numpy(dst_ivy.ivy_array)
helpers.assert_all_close(
dst_, dst_np, backend=backend_fw, ground_truth_backend=frontend
)
assert id(src_ivy) != id(dst_ivy)
# shape
@handle_frontend_test(
fn_tree="numpy.shape",
xs_n_input_dtypes_n_unique_idx=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid")
),
test_with_out=st.just(False),
)
def test_numpy_shape(
*,
xs_n_input_dtypes_n_unique_idx,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs = xs_n_input_dtypes_n_unique_idx
ret, ret_gt = helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
array=xs[0],
)
# Manually compare the shape here because ivy.shape doesn't return an array, so
# ivy.to_numpy will narrow the bit-width, resulting in different dtypes. This is
# not an issue with the front-end function, but how the testing framework converts
# non-array function outputs to arrays.
assert len(ret) == len(ret_gt)
for i, j in zip(ret, ret_gt):
assert i == j
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_basic_operations.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_basic_operations.py",
"repo_id": "ivy",
"token_count": 1425
} | 61 |
# global
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# arccosh
@handle_frontend_test(
fn_tree="numpy.arccosh",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="arccosh"
),
)
def test_numpy_arccosh(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# arcsinh
@handle_frontend_test(
fn_tree="numpy.arcsinh",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="arcsinh"
),
)
def test_numpy_arcsinh(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# arctanh
@handle_frontend_test(
fn_tree="numpy.arctanh",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="arctanh"
),
)
def test_numpy_arctanh(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# cosh
@handle_frontend_test(
fn_tree="numpy.cosh",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="cosh"
),
)
def test_numpy_cosh(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# sinh
@handle_frontend_test(
fn_tree="numpy.sinh",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="sinh"
),
)
def test_numpy_sinh(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
rtol=1e-2,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# tanh
@handle_frontend_test(
fn_tree="numpy.tanh",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="tanh"
),
)
def test_numpy_tanh(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-3,
rtol=1e-3,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_hyperbolic_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_hyperbolic_functions.py",
"repo_id": "ivy",
"token_count": 4136
} | 62 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
@handle_frontend_test(
fn_tree="numpy.count_nonzero",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), min_num_dims=1
),
keepdims=st.booleans(),
test_with_out=st.just(False),
)
def test_numpy_count_nonzero(
dtype_and_x,
keepdims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=0,
keepdims=keepdims,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_sorting_searching_counting/test_counting.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_sorting_searching_counting/test_counting.py",
"repo_id": "ivy",
"token_count": 414
} | 63 |