text
stringlengths 17
362k
| id
stringlengths 13
115
| metadata
dict | __index_level_0__
int64 0
75
|
---|---|---|---|
import pytest
import ivy
from ivy.functional.frontends.sklearn.utils.multiclass import type_of_target
# not suitable for usual frontend testing
@pytest.mark.parametrize(
("y", "label"),
[
([1.2], "continuous"),
([1], "binary"),
([1, 2], "binary"),
([1, 2, 3], "multiclass"),
([1, 2, 3, 4], "multiclass"),
([1, 2, 3, 4, 5], "multiclass"),
([1, 2, 2], "binary"),
([1, 2.0, 2, 3], "multiclass"),
([1.0, 2.0, 2.0], "binary"),
([[[1, 2], [3, 4]]], "unknown"),
([[1, 2], [1, 1]], "multilabel-indicator"),
],
)
def test_sklearn_type_of_target(y, label):
assert type_of_target(ivy.array(y)) == label
| ivy/ivy_tests/test_ivy/test_frontends/test_sklearn/test_utils/test_multiclass.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_sklearn/test_utils/test_multiclass.py",
"repo_id": "ivy",
"token_count": 343
} | 59 |
# global
from hypothesis import assume, strategies as st
from ivy.func_wrapper import output_to_native_arrays
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_linalg import (
_generate_dot_dtype_and_arrays,
)
from ivy_tests.test_ivy.test_frontends.test_tensorflow.test_nn import (
_generate_bias_data,
)
from ivy_tests.test_ivy.test_functional.test_experimental.test_nn.test_layers import (
_lstm_helper,
)
import ivy
from ivy.functional.frontends.tensorflow.func_wrapper import (
inputs_to_ivy_arrays,
outputs_to_frontend_arrays,
)
import ivy.functional.frontends.tensorflow as tf_frontend
# --- Helpers --- #
# --------------- #
@st.composite
def _x_and_filters(
draw,
dtypes,
data_format,
padding=None,
stride_min=1,
stride_max=4,
dilation_min=1,
dilation_max=4,
type: str = "depthwise",
):
data_format = draw(data_format)
dtype = draw(dtypes)
dim = 2 if type in ["depthwise", "separable"] else 4
if padding is None:
padding = (st.sampled_from(["same", "valid"]),)
padding = draw(padding)
dilations = draw(
st.one_of(
st.integers(dilation_min, dilation_max),
st.lists(
st.integers(dilation_min, dilation_max), min_size=dim, max_size=dim
),
)
)
fdilations = [dilations] * dim if isinstance(dilations, int) else dilations
if type in ["depthwise", "separable"]:
# if any value in dilations is greater than 1, tensorflow implements
# depthwise_covn2d as an atrous depthwise convolution, in which case all values
# in strides must be equal to 1.
if any(x > 1 for x in fdilations):
stride = 1
else:
stride = draw(st.integers(stride_min, stride_max))
else:
stride = draw(
st.one_of(
st.integers(stride_min, stride_max),
st.lists(
st.integers(stride_min, stride_max), min_size=dim, max_size=dim
),
)
)
if dim == 2:
min_x_height = 1
min_x_width = 1
filter_shape = draw(
st.tuples(
helpers.ints(min_value=3, max_value=5),
helpers.ints(min_value=3, max_value=5),
helpers.ints(min_value=1, max_value=3),
helpers.ints(min_value=1, max_value=3),
)
)
min_x_height = filter_shape[0] + (filter_shape[0] - 1) * (fdilations[0] - 1)
min_x_width = filter_shape[1] + (filter_shape[1] - 1) * (fdilations[1] - 1)
d_in = filter_shape[2]
if data_format == "channels_last":
x_shape = draw(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=min_x_height, max_value=100),
helpers.ints(min_value=min_x_width, max_value=100),
helpers.ints(min_value=d_in, max_value=d_in),
)
)
else:
x_shape = draw(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=d_in, max_value=d_in),
helpers.ints(min_value=min_x_height, max_value=100),
helpers.ints(min_value=min_x_width, max_value=100),
)
)
x = draw(
helpers.array_values(dtype=dtype[0], shape=x_shape, min_value=0, max_value=1)
)
filters = draw(
helpers.array_values(
dtype=dtype[0], shape=filter_shape, min_value=0, max_value=1
)
)
if type in ["depthwise", "separable"]:
stride = (stride, stride)
if isinstance(dilations, int):
dilations = (dilations,) * dim
return dtype, x, filters, dilations, data_format, stride, padding
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="tensorflow.keras.backend.dot",
data=_generate_dot_dtype_and_arrays(min_num_dims=2),
)
def test_tensorflow_dot(*, data, on_device, fn_tree, frontend, test_flags, backend_fw):
(input_dtypes, x) = data
return helpers.test_frontend_function(
input_dtypes=input_dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
on_device=on_device,
frontend=frontend,
fn_tree=fn_tree,
rtol=0.5,
atol=0.5,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="tensorflow.keras.backend.bias_add",
data=_generate_bias_data(keras_backend_fn=True),
test_with_out=st.just(False),
)
def test_tensorflow_keras_backend_bias_add(
*,
data,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
data_format, dtype, x, bias = data
helpers.test_frontend_function(
input_dtypes=dtype * 2,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
bias=bias,
data_format=data_format,
)
@handle_frontend_test(
fn_tree="tensorflow.keras.backend.depthwise_conv2d",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["channels_last"]),
padding=st.sampled_from(["valid", "same"]),
type="depthwise",
),
test_with_out=st.just(False),
)
def test_tensorflow_keras_backend_depthwise_conv2d(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, filters, dilation, data_format, stride, padding = x_f_d_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x,
depthwise_kernel=filters,
strides=stride,
padding=padding,
data_format=data_format,
dilation_rate=dilation,
)
# mean
@handle_frontend_test(
fn_tree="tensorflow.keras.backend.mean",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
force_int_axis=True,
valid_axis=True,
min_num_dims=1,
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
),
keepdims=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_keras_backend_mean(
*,
dtype_x_axis,
keepdims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
atol=1e-1,
rtol=1e-1,
on_device=on_device,
x=x[0],
axis=axis,
keepdims=keepdims,
)
@handle_frontend_test(
fn_tree="tensorflow.keras.backend.rnn",
rnn_args=_lstm_helper(),
test_with_out=st.just(False),
)
def test_tensorflow_keras_backend_rnn(
*,
rnn_args,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
(
input_dtypes,
inputs,
kernel_orig,
recurrent_kernel_orig,
bias_orig,
recurrent_bias_orig,
initial_states,
go_backwards,
mask,
unroll,
input_length,
time_major,
zero_output_for_mask,
return_all_outputs,
) = rnn_args
# unsupported dtype of float16 is in our _lstm_step function
# so can't be inferred through ivy.function_unsupported_devices_and_dtypes
assume(not (backend_fw == "torch" and input_dtypes[0] == "float16"))
def _lstm_step(cell_inputs, cell_states):
nonlocal kernel_orig, recurrent_kernel_orig, bias_orig, recurrent_bias_orig
kernel = ivy.array(kernel_orig)
recurrent_kernel = ivy.array(recurrent_kernel_orig)
bias = ivy.array(bias_orig)
recurrent_bias = ivy.array(recurrent_bias_orig)
h_tm1 = cell_states[0] # previous memory state
c_tm1 = cell_states[1] # previous carry state
z = ivy.dot(cell_inputs, kernel) + bias
z += ivy.dot(h_tm1, recurrent_kernel) + recurrent_bias
z0, z1, z2, z3 = ivy.split(z, num_or_size_splits=4, axis=-1)
i = ivy.sigmoid(z0) # input
f = ivy.sigmoid(z1) # forget
c = f * c_tm1 + i * ivy.tanh(z2)
o = ivy.sigmoid(z3) # output
h = o * ivy.tanh(c)
return h, [h, c]
np_vals = [inputs, *initial_states, mask]
if mask is None:
np_vals.pop(-1)
with ivy.utils.backend.ContextManager(backend_fw):
_lstm_step_backend = outputs_to_frontend_arrays(
inputs_to_ivy_arrays(_lstm_step)
)
vals = [ivy.array(val) for val in np_vals]
if len(vals) > 3:
inputs, init_h, init_c, mask = vals
else:
inputs, init_h, init_c = vals
initial_states = [init_h, init_c]
args = (_lstm_step_backend, inputs, initial_states)
kwargs = {
"go_backwards": go_backwards,
"mask": mask,
"constants": None,
"unroll": unroll,
"input_length": input_length,
"time_major": time_major,
"zero_output_for_mask": zero_output_for_mask,
"return_all_outputs": return_all_outputs,
}
ret = tf_frontend.keras.backend.rnn(*args, **kwargs)
ivy_ret = ivy.nested_map(lambda x: x.ivy_array, ret, shallow=False)
ivy_idxs = ivy.nested_argwhere(ivy_ret, ivy.is_ivy_array)
ivy_vals = ivy.multi_index_nest(ivy_ret, ivy_idxs)
ret_np_flat = [x.to_numpy() for x in ivy_vals]
with ivy.utils.backend.ContextManager(frontend):
_lstm_step_gt = output_to_native_arrays(inputs_to_ivy_arrays(_lstm_step))
import tensorflow as tf
vals = [ivy.array(val).data for val in np_vals]
if len(vals) > 3:
inputs, init_h, init_c, mask = vals
else:
inputs, init_h, init_c = vals
initial_states = [init_h, init_c]
args = (_lstm_step_gt, inputs, initial_states)
kwargs = {
"go_backwards": go_backwards,
"mask": mask,
"constants": None,
"unroll": unroll,
"input_length": input_length,
"time_major": time_major,
"zero_output_for_mask": zero_output_for_mask,
"return_all_outputs": return_all_outputs,
}
ret = tf.keras.backend.rnn(*args, **kwargs)
native_idxs = ivy.nested_argwhere(ret, lambda x: isinstance(x, ivy.NativeArray))
native_vals = ivy.multi_index_nest(ret, native_idxs)
frontend_ret_np_flat = [x.numpy() for x in native_vals]
helpers.value_test(
ret_np_flat=ret_np_flat,
ret_np_from_gt_flat=frontend_ret_np_flat,
rtol=1e-1,
atol=1e-1,
backend=backend_fw,
ground_truth_backend=frontend,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_keras/test_backend.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_keras/test_backend.py",
"repo_id": "ivy",
"token_count": 5755
} | 60 |
# global
from hypothesis import strategies as st
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_method
import pytest
CLASS_TREE = "ivy.functional.frontends.tensorflow.tensor.TensorShape"
# __add__
@pytest.mark.skip("TODO: test needs implementing correctly")
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="tensorflow.TensorShape",
method_name="__add__",
shape_list=helpers.list_of_size(x=st.sampled_from([0, 1, 2, 3, 4]), size=3),
other_list=helpers.list_of_size(x=st.sampled_from([0, 1, 2, 3, 4]), size=3),
)
def test_tensorflow__add__(
shape_list,
other_list,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
):
helpers.test_frontend_method(
init_input_dtypes=[ivy.int64],
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"dims": shape_list,
},
method_input_dtypes=[ivy.int64],
method_all_as_kwargs_np={
"other": other_list,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# __bool__
@pytest.mark.skip("TODO: test needs implementing correctly")
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="tensorflow.TensorShape",
method_name="__bool__",
shape_list=helpers.list_of_size(x=st.sampled_from([0, 1, 2, 3, 4]), size=3),
)
def test_tensorflow__bool__(
shape_list,
frontend,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
on_device,
):
helpers.test_frontend_method(
init_input_dtypes=[ivy.int64],
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"dims": shape_list,
},
method_input_dtypes=[ivy.int64],
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_tensorshape.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_tensorshape.py",
"repo_id": "ivy",
"token_count": 997
} | 61 |
# global
import numpy as np
from hypothesis import strategies as st
from ivy_tests.test_ivy.helpers import handle_frontend_test
# local
import ivy_tests.test_ivy.helpers as helpers
@handle_frontend_test(
fn_tree="torch.nn.functional.alpha_dropout",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=50,
allow_inf=False,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
),
prob=helpers.floats(min_value=0, max_value=0.9),
training=st.booleans(),
test_inplace=st.just(False),
)
def test_torch_alpha_dropout(
*,
dtype_and_x,
prob,
training,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
ret = helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
p=prob,
training=training,
test_values=False,
)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
x = np.asarray(x[0], input_dtype[0])
for u in ret:
# cardinality test
assert u.shape == x.shape
@handle_frontend_test(
fn_tree="torch.nn.functional.dropout",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=50,
allow_inf=False,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
),
prob=helpers.floats(min_value=0, max_value=0.9),
training=st.booleans(),
test_inplace=st.just(False),
)
def test_torch_dropout(
*,
dtype_and_x,
prob,
training,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
if not training or prob == 0:
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
p=prob,
training=training,
)
else:
ret = helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
p=prob,
training=training,
test_values=False,
)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
for u in ret:
# cardinality test
assert u.shape == x[0].shape
@handle_frontend_test(
fn_tree="torch.nn.functional.dropout1d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=50,
allow_inf=False,
min_num_dims=2,
max_num_dims=3,
min_dim_size=2,
),
prob=helpers.floats(min_value=0, max_value=0.9),
training=st.booleans(),
test_with_out=st.just(True),
test_inplace=st.just(False),
)
def test_torch_dropout1d(
*,
dtype_and_x,
prob,
training,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
ret = helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
p=prob,
training=training,
test_values=False,
backend_to_test=backend_fw,
)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
x = np.asarray(x[0], input_dtype[0])
for u in ret:
# cardinality test
assert u.shape == x.shape
@handle_frontend_test(
fn_tree="torch.nn.functional.dropout2d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=50,
allow_inf=False,
min_num_dims=3,
max_num_dims=4,
min_dim_size=1,
),
prob=helpers.floats(min_value=0, max_value=0.9),
training=st.booleans(),
test_with_out=st.just(False),
test_inplace=st.just(False),
)
def test_torch_dropout2d(
*,
dtype_and_x,
prob,
training,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
ret = helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
p=prob,
training=training,
test_values=False,
)
ret = helpers.flatten_and_to_np(backend=backend_fw, ret=ret)
x = np.asarray(x[0], dtype[0])
for u in ret:
# cardinality test
assert u.shape == x.shape
@handle_frontend_test(
fn_tree="torch.nn.functional.dropout3d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=50,
allow_inf=False,
min_num_dims=4,
max_num_dims=5,
min_dim_size=2,
),
prob=helpers.floats(min_value=0, max_value=0.9),
training=st.booleans(),
test_with_out=st.just(True),
test_inplace=st.just(False),
)
def test_torch_dropout3d(
*,
dtype_and_x,
prob,
training,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
ret = helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
p=prob,
training=training,
test_values=False,
)
ret = helpers.flatten_and_to_np(backend=backend_fw, ret=ret)
x = np.asarray(x[0], input_dtype[0])
for u in ret:
# cardinality test
assert u.shape == x.shape
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_dropout_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_dropout_functions.py",
"repo_id": "ivy",
"token_count": 3279
} | 62 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _elemwise_helper(draw):
value_strategy = st.one_of(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
st.integers(min_value=-10000, max_value=10000),
st.floats(min_value=-10000, max_value=10000),
)
dtype_and_x1 = draw(value_strategy)
if isinstance(dtype_and_x1, tuple):
dtype1 = dtype_and_x1[0]
x1 = dtype_and_x1[1][0]
else:
dtype1 = []
x1 = dtype_and_x1
dtype_and_x2 = draw(value_strategy)
if isinstance(dtype_and_x2, tuple):
dtype2 = dtype_and_x2[0]
x2 = dtype_and_x2[1][0]
else:
dtype2 = []
x2 = dtype_and_x2
num_pos_args = None
if not dtype1 and not dtype2:
num_pos_args = 2
elif not dtype1:
x1, x2 = x2, x1
input_dtypes = dtype1 + dtype2
return x1, x2, input_dtypes, num_pos_args
# --- Main --- #
# ------------ #
# ToDo: Fix this test after torch override of assert is implemented
# @handle_frontend_test(
# fn_tree="torch._assert",
# dtype_and_x=helpers.dtype_and_values(
# available_dtypes=helpers.get_dtypes("valid"),
# num_arrays=2,
# ),
# test_with_out=st.just(False),
# )
# def test_torch__assert(
# dtype_and_x,
# on_device,
# fn_tree,
# frontend,
# test_flags,
# ):
# input_dtype, x = dtype_and_x
# helpers.test_frontend_function(
# input_dtypes=input_dtype,
# frontend=frontend,
# test_flags=test_flags,
# fn_tree=fn_tree,
# on_device=on_device,
# condition=x[0],
# message=x[1],
# )
# bincount
@handle_frontend_test(
fn_tree="torch.bincount",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_value=1,
max_value=2,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=1,
),
key="a_s_d",
),
),
test_with_out=st.just(False),
)
def test_torch_bincount(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
weights=None,
minlength=0,
)
@handle_frontend_test(
fn_tree="torch.result_type",
dtypes_and_xs=_elemwise_helper(),
test_with_out=st.just(False),
)
def test_torch_result_type(
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
x1, x2, input_dtypes, num_pos_args = dtypes_and_xs
if num_pos_args is not None:
test_flags.num_positional_args = num_pos_args
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensor=x1,
other=x2,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_utilities.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_utilities.py",
"repo_id": "ivy",
"token_count": 1752
} | 63 |
"""Collection of tests for manipulation functions."""
# global
import numpy as np
from hypothesis import strategies as st, assume
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
# --- Helpers --- #
# --------------- #
@st.composite
def _arrays_idx_n_dtypes(draw):
num_dims = draw(st.shared(helpers.ints(min_value=1, max_value=4), key="num_dims"))
num_arrays = draw(
st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays")
)
common_shape = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=3),
size=num_dims - 1,
)
)
unique_idx = draw(helpers.ints(min_value=0, max_value=num_dims - 1))
unique_dims = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=3),
size=num_arrays,
)
)
xs = []
input_dtypes = draw(
helpers.array_dtypes(available_dtypes=draw(helpers.get_dtypes("float")))
)
for ud, dt in zip(unique_dims, input_dtypes):
x = draw(
helpers.array_values(
shape=common_shape[:unique_idx] + [ud] + common_shape[unique_idx:],
dtype=dt,
)
)
xs.append(x)
return xs, input_dtypes, unique_idx
# Extra #
# ------#
@st.composite
def _basic_min_x_max(draw):
dtype, value = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
)
)
min_val = draw(helpers.array_values(dtype=dtype[0], shape=()))
max_val = draw(
helpers.array_values(dtype=dtype[0], shape=()).filter(lambda x: x > min_val)
)
return [dtype], (value[0], min_val, max_val)
@st.composite
def _broadcastable_arrays(draw):
shapes = draw(helpers.mutually_broadcastable_shapes(num_shapes=3))
dtypes, values = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), shape=shapes[0]
)
)
min_val = draw(
st.one_of(
st.floats(-5, 5),
st.just(None),
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), shape=shapes[1]
),
)
)
max_val = draw(
st.one_of(
st.floats(-5, 5),
st.just(None),
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), shape=shapes[2]
),
)
)
if min_val is None and max_val is None:
generate_max = draw(st.booleans())
if generate_max:
max_val = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), shape=shapes[2]
)
)
else:
min_val = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), shape=shapes[1]
)
)
if min_val is not None:
if not isinstance(min_val, float):
dtypes.append(min_val[0][0])
min_val = min_val[1][0]
else:
dtypes.append(ivy.float32)
if max_val is not None:
if not isinstance(max_val, float):
dtypes.append(max_val[0][0])
max_val = max_val[1][0]
else:
dtypes.append(ivy.float32)
return dtypes, values[0], min_val, max_val
@st.composite
def _constant_pad_helper(draw):
dtype, value, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), ret_shape=True, min_num_dims=1
)
)
pad_width = tuple(
draw(
st.lists(
st.tuples(
helpers.ints(min_value=0, max_value=5),
helpers.ints(min_value=0, max_value=5),
),
min_size=len(shape),
max_size=len(shape),
)
)
)
return dtype, value, pad_width
@st.composite
def _get_splits(
draw,
allow_none=True,
min_num_dims=1,
axis=None,
allow_array_indices=True,
is_mod_split=False,
):
"""Generate valid splits, either by generating an integer that evenly
divides the axis or a list of splits that sum to the length of the axis
being split."""
shape = draw(
st.shared(helpers.get_shape(min_num_dims=min_num_dims), key="value_shape")
)
if axis is None:
axis = draw(
st.shared(helpers.get_axis(shape=shape, force_int=True), key="target_axis")
)
@st.composite
def _get_int_split(draw):
if shape[axis] == 0:
return 0
factors = []
for i in range(1, shape[axis] + 1):
if shape[axis] % i == 0:
factors.append(i)
return draw(st.sampled_from(factors))
@st.composite
def _get_list_split(draw, allow_arr_indices=True, is_other_split=False):
num_or_size_splits = []
while sum(num_or_size_splits) < shape[axis]:
split_value = draw(
helpers.ints(
min_value=1,
max_value=shape[axis] - sum(num_or_size_splits),
)
)
num_or_size_splits.append(split_value)
if is_other_split:
num_or_size_splits = list(set(num_or_size_splits))
if allow_arr_indices:
gen_random_native = draw(st.booleans())
if gen_random_native:
return np.asarray(num_or_size_splits, dtype=np.int32)
return num_or_size_splits
if allow_none:
return draw(
_get_list_split(
allow_arr_indices=allow_array_indices, is_other_split=is_mod_split
)
| _get_int_split()
| st.none()
)
else:
return draw(
_get_list_split(
allow_arr_indices=allow_array_indices, is_other_split=is_mod_split
)
| _get_int_split()
)
@st.composite
def _permute_dims_helper(draw):
shape = draw(st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"))
dims = [x for x in range(len(shape))]
permutation = draw(st.permutations(dims))
return permutation
@st.composite
def _repeat_helper(draw):
shape = draw(st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"))
axis = draw(
st.shared(
st.one_of(st.none(), helpers.get_axis(shape=shape, max_size=1)), key="axis"
)
)
if not isinstance(axis, int) and axis is not None:
axis = axis[0]
repeat_shape = (
(draw(st.one_of(st.just(1), st.just(shape[axis]))),)
if axis is not None
else (1,)
)
repeat = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
shape=repeat_shape,
min_value=0,
max_value=10,
)
)
return repeat
@st.composite
def _squeeze_helper(draw):
shape = draw(st.shared(helpers.get_shape(), key="value_shape"))
valid_axes = []
for index, axis in enumerate(shape):
if axis == 1:
valid_axes.append(index)
valid_axes.insert(0, None)
return draw(st.sampled_from(valid_axes))
@st.composite
def _stack_helper(draw):
shape = draw(st.shared(helpers.get_shape(min_num_dims=1), key="values_shape"))
num_arrays = draw(
st.shared(helpers.ints(min_value=1, max_value=3), key="num_arrays")
)
dtype = draw(st.sampled_from(draw(helpers.get_dtypes("valid"))))
arrays = []
dtypes = [dtype for _ in range(num_arrays)]
for _ in range(num_arrays):
array = draw(helpers.array_values(dtype=dtype, shape=shape))
arrays.append(np.asarray(array, dtype=dtype))
return dtypes, arrays
# --- Main --- #
# ------------ #
# clip
@handle_test(
fn_tree="functional.ivy.clip",
dtype_x_min_max=_broadcastable_arrays(),
)
def test_clip(*, dtype_x_min_max, test_flags, backend_fw, fn_name, on_device):
dtypes, x_list, min_val, max_val = dtype_x_min_max
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x_list,
x_min=min_val,
x_max=max_val,
)
# concat
@handle_test(
fn_tree="functional.ivy.concat",
xs_n_input_dtypes_n_unique_idx=_arrays_idx_n_dtypes(),
)
def test_concat(
*, xs_n_input_dtypes_n_unique_idx, test_flags, backend_fw, fn_name, on_device
):
xs, input_dtypes, unique_idx = xs_n_input_dtypes_n_unique_idx
helpers.test_function(
input_dtypes=input_dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
xs=xs,
axis=unique_idx,
)
# constant_pad
@handle_test(
fn_tree="functional.ivy.constant_pad",
dtype_value_pad_width_constant=_constant_pad_helper(),
)
def test_constant_pad(
*, dtype_value_pad_width_constant, test_flags, backend_fw, fn_name, on_device
):
dtype, value, pad_width = dtype_value_pad_width_constant
constant = float(value[0].flat[0]) # just use the first value as fill value
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=value[0],
pad_width=pad_width,
value=constant,
)
# expand_dims
@handle_test(
fn_tree="functional.ivy.expand_dims",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="value_shape"),
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(), key="value_shape"),
),
test_with_copy=st.just(True),
)
def test_expand_dims(*, dtype_value, axis, test_flags, backend_fw, fn_name, on_device):
dtype, value = dtype_value
try:
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=value[0],
axis=axis,
)
# ToDo: fix `get_axis`; `unique=True` does not always work
except (ValueError, Exception) as e:
if "repeated axis" in str(e):
assume(False)
raise e
# flip
@handle_test(
fn_tree="functional.ivy.flip",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=True),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
min_size=1,
max_size=1,
force_int=True,
),
test_with_copy=st.just(True),
)
def test_flip(*, dtype_value, axis, test_flags, backend_fw, fn_name, on_device):
dtype, value = dtype_value
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=value[0],
axis=axis,
)
# permute_dims
@handle_test(
fn_tree="functional.ivy.permute_dims",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=True),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
permutation=_permute_dims_helper(),
test_with_copy=st.just(True),
)
def test_permute_dims(
*, dtype_value, permutation, test_flags, backend_fw, fn_name, on_device
):
dtype, value = dtype_value
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=value[0],
axes=permutation,
)
# repeat
@handle_test(
fn_tree="functional.ivy.repeat",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=True),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
axis=st.shared(
st.one_of(
st.none(),
helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
max_size=1,
),
),
key="axis",
),
repeat=st.one_of(st.integers(1, 10), _repeat_helper()),
)
def test_repeat(
*, dtype_value, axis, repeat, test_flags, backend_fw, fn_name, on_device
):
value_dtype, value = dtype_value
if not isinstance(repeat, int):
repeat_dtype, repeat_list = repeat
repeat = repeat_list[0]
value_dtype += repeat_dtype
if not isinstance(axis, int) and axis is not None:
axis = axis[0]
helpers.test_function(
input_dtypes=value_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=value[0],
repeats=repeat,
axis=axis,
xs_grad_idxs=[[0, 0]],
)
@handle_test(
fn_tree="functional.ivy.reshape",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=True),
shape=st.shared(helpers.get_shape(), key="value_shape"),
),
reshape=helpers.reshape_shapes(
shape=st.shared(helpers.get_shape(), key="value_shape")
),
order=st.sampled_from(["C", "F"]),
allowzero=st.booleans(),
test_with_copy=st.just(True),
)
def test_reshape(
*,
dtype_value,
reshape,
order,
allowzero,
test_flags,
backend_fw,
fn_name,
on_device
):
dtype, value = dtype_value
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=value[0],
shape=reshape,
order=order,
allowzero=allowzero,
)
# roll
@handle_test(
fn_tree="functional.ivy.roll",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
shift=helpers.dtype_and_values(
available_dtypes=[ivy.int32],
max_num_dims=1,
min_dim_size=st.shared(
helpers.ints(min_value=1, max_value=10),
key="shift_len",
),
max_dim_size=st.shared(
helpers.ints(min_value=1, max_value=10),
key="shift_len",
),
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
force_tuple=True,
unique=False,
min_size=st.shared(
helpers.ints(min_value=1, max_value=10),
key="shift_len",
),
max_size=st.shared(
helpers.ints(min_value=1, max_value=10),
key="shift_len",
),
),
# test_gradients=st.just(False),
)
def test_roll(*, dtype_value, shift, axis, test_flags, backend_fw, fn_name, on_device):
value_dtype, value = dtype_value
shift_dtype, shift_val = shift
if shift_val[0].ndim == 0: # If shift is an int
shift_val = shift_val[0] # Drop shift's dtype (always int32)
axis = axis[0] # Extract an axis value from the tuple
else:
# Drop shift's dtype (always int32) and convert list to tuple
shift_val = tuple(shift_val[0].tolist())
helpers.test_function(
input_dtypes=value_dtype + shift_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=value[0],
shift=shift_val,
axis=axis,
xs_grad_idxs=[[0, 0]],
)
# TODO: there is a failure with paddle (dtype('int32')) caused by the `_get_splits`
# method which returns a numpy array with a numpy dtype
@handle_test(
fn_tree="functional.ivy.split",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
axis=st.shared(
helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
force_int=True,
),
key="target_axis",
),
with_remainder=st.booleans(),
num_or_size_splits=_get_splits(),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_split(
*,
dtype_value,
num_or_size_splits,
axis,
with_remainder,
test_flags,
backend_fw,
fn_name,
on_device
):
dtype, value = dtype_value
if (
not isinstance(num_or_size_splits, int)
and not isinstance(num_or_size_splits, list)
and num_or_size_splits is not None
):
dtype = [*dtype, num_or_size_splits.dtype]
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=value[0],
num_or_size_splits=num_or_size_splits,
axis=axis,
with_remainder=with_remainder,
)
# squeeze
@handle_test(
fn_tree="functional.ivy.squeeze",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=True),
shape=st.shared(helpers.get_shape(), key="value_shape"),
),
axis=_squeeze_helper(),
test_with_copy=st.just(True),
)
def test_squeeze(*, dtype_value, axis, test_flags, backend_fw, fn_name, on_device):
dtype, value = dtype_value
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=value[0],
axis=axis,
)
# stack
@handle_test(
fn_tree="functional.ivy.stack",
dtypes_arrays=_stack_helper(),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="values_shape"),
force_int=True,
),
)
def test_stack(*, dtypes_arrays, axis, test_flags, backend_fw, fn_name, on_device):
dtypes, arrays = dtypes_arrays
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
arrays=arrays,
axis=axis,
)
# swapaxes
@handle_test(
fn_tree="functional.ivy.swapaxes",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=True),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
),
axis0=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"), force_int=True
),
axis1=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"), force_int=True
),
test_with_copy=st.just(True),
)
def test_swapaxes(
*, dtype_value, axis0, axis1, test_flags, backend_fw, fn_name, on_device
):
dtype, value = dtype_value
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=value[0],
axis0=axis0,
axis1=axis1,
)
@handle_test(
fn_tree="functional.ivy.tile",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=True),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
repeat=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("signed_integer"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape").map(
lambda rep: (len(rep),)
),
min_value=0,
max_value=10,
),
)
def test_tile(*, dtype_value, repeat, test_flags, backend_fw, fn_name, on_device):
dtype, value = dtype_value
repeat_dtype, repeat_list = repeat
helpers.test_function(
input_dtypes=dtype + repeat_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=value[0],
repeats=repeat_list[0],
rtol_=1e-2,
atol_=1e-2,
xs_grad_idxs=[[0, 0]],
)
# unstack
@handle_test(
fn_tree="functional.ivy.unstack",
x_n_dtype_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=5,
min_axis=1,
max_axis=4,
),
keepdims=st.booleans(),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_unstack(
*, x_n_dtype_axis, keepdims, test_flags, backend_fw, fn_name, on_device
):
# smoke test
dtype, x, axis = x_n_dtype_axis
if axis >= len(x[0].shape):
axis = len(x[0].shape) - 1
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
axis=axis,
keepdims=keepdims,
)
# zero_pad
@handle_test(
fn_tree="functional.ivy.zero_pad",
dtype_value_pad_width=_constant_pad_helper(),
)
def test_zero_pad(*, dtype_value_pad_width, test_flags, backend_fw, fn_name, on_device):
dtype, value, pad_width = dtype_value_pad_width
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=value[0],
pad_width=pad_width,
)
| ivy/ivy_tests/test_ivy/test_functional/test_core/test_manipulation.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_manipulation.py",
"repo_id": "ivy",
"token_count": 10959
} | 64 |
# global
import math
from hypothesis import strategies as st
from hypothesis import assume
import numpy as np
import pytest
import itertools
import sys
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test, BackendHandler
import ivy
# --- Helpers --- #
# --------------- #
# batched_outer
@st.composite
def _batched_outer_data(draw):
shape = draw(helpers.get_shape(min_num_dims=2, max_num_dims=3))
tensors_num = draw(helpers.ints(min_value=1, max_value=5))
dtype, tensors = draw(
helpers.dtype_and_values(
num_arrays=tensors_num,
available_dtypes=helpers.get_dtypes("valid"),
shape=shape,
large_abs_safety_factor=20,
small_abs_safety_factor=20,
safety_factor_scale="log",
)
)
return dtype, tensors
@st.composite
def _generate_diag_args(draw):
x_shape = draw(
helpers.get_shape(
min_num_dims=1, max_num_dims=2, min_dim_size=1, max_dim_size=5
)
)
flat_x_shape = math.prod(x_shape)
dtype_x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=x_shape,
min_value=-1e2,
max_value=1e2,
)
)
offset = draw(helpers.ints(min_value=-5, max_value=5))
dtype = dtype_x[0]
dtype_padding_value = draw(
helpers.dtype_and_values(
available_dtypes=dtype,
max_dim_size=1,
min_dim_size=1,
min_num_dims=1,
max_num_dims=1,
min_value=-1e2,
max_value=1e2,
)
)
align = draw(
st.sampled_from(["RIGHT_LEFT", "RIGHT_RIGHT", "LEFT_LEFT", "LEFT_RIGHT"])
)
if offset < 0:
num_rows_is_negative = draw(st.booleans())
if num_rows_is_negative:
num_rows = -1
num_cols = draw(
st.one_of(
st.integers(min_value=-1, max_value=-1),
st.integers(min_value=flat_x_shape, max_value=50),
)
)
else:
num_rows_is_as_expected = draw(st.booleans())
if num_rows_is_as_expected:
num_rows = flat_x_shape + abs(offset)
num_cols = draw(
st.one_of(
st.integers(min_value=-1, max_value=-1),
st.integers(min_value=flat_x_shape, max_value=50),
)
)
else:
num_rows = draw(
st.integers(min_value=flat_x_shape + abs(offset) + 1, max_value=50)
)
num_cols = draw(st.sampled_from([-1, flat_x_shape]))
if offset > 0:
num_cols_is_negative = draw(st.booleans())
if num_cols_is_negative:
num_cols = -1
num_rows = draw(
st.one_of(
st.integers(min_value=-1, max_value=-1),
st.integers(min_value=flat_x_shape, max_value=50),
)
)
else:
num_cols_is_as_expected = draw(st.booleans())
if num_cols_is_as_expected:
num_cols = flat_x_shape + abs(offset)
num_rows = draw(
st.one_of(
st.integers(min_value=-1, max_value=-1),
st.integers(min_value=flat_x_shape, max_value=50),
)
)
else:
num_cols = draw(
st.integers(min_value=flat_x_shape + abs(offset) + 1, max_value=50)
)
num_rows = draw(st.sampled_from([-1, flat_x_shape]))
if offset == 0:
num_rows_is_negative = draw(st.booleans())
num_cols_is_negative = draw(st.booleans())
if num_rows_is_negative and num_cols_is_negative:
num_rows = -1
num_cols = -1
if num_rows_is_negative:
num_rows = -1
num_cols = draw(
st.integers(min_value=flat_x_shape + abs(offset), max_value=50)
)
if num_cols_is_negative:
num_cols = -1
num_rows = draw(
st.integers(min_value=flat_x_shape + abs(offset), max_value=50)
)
else:
num_rows_is_as_expected = draw(st.booleans())
if num_rows_is_as_expected:
num_rows = flat_x_shape
num_cols = draw(
st.integers(min_value=flat_x_shape + abs(offset), max_value=50)
)
else:
num_cols = flat_x_shape
num_rows = draw(
st.integers(min_value=flat_x_shape + abs(offset), max_value=50)
)
return dtype_x, offset, dtype_padding_value, align, num_rows, num_cols
# dot
@st.composite
def _generate_dot_dtype_and_arrays(draw, min_num_dims=0):
shape_a = draw(
helpers.get_shape(
min_dim_size=2, max_dim_size=5, min_num_dims=min_num_dims, max_num_dims=5
)
)
shape_b = draw(
helpers.get_shape(
min_dim_size=2, max_dim_size=5, min_num_dims=min_num_dims, max_num_dims=5
)
)
shape_a = list(shape_a)
shape_b = list(shape_b)
if len(shape_a) == 1 and len(shape_b) == 1:
shape_b[0] = shape_a[0]
elif len(shape_a) == 2 and len(shape_b) == 2:
shape_b[0] = shape_a[1]
elif len(shape_a) >= 2 and len(shape_b) == 1:
shape_b[0] = shape_a[-1]
elif len(shape_a) >= 1 and len(shape_b) >= 2:
shape_a[-1] = shape_b[-2]
dtype_1, a = draw(
helpers.dtype_and_values(
shape=shape_a,
available_dtypes=helpers.get_dtypes("float"),
min_value=-10,
max_value=10,
)
)
dtype_2, b = draw(
helpers.dtype_and_values(
shape=shape_b,
dtype=dtype_1,
min_value=-10,
max_value=10,
)
)
return [dtype_1[0], dtype_2[0]], [a[0], b[0]]
@st.composite
def _generate_eigh_tridiagonal_args(draw):
dtype, alpha = draw(
helpers.dtype_and_values(
min_dim_size=2,
min_num_dims=1,
max_num_dims=1,
min_value=2.0,
max_value=5,
available_dtypes=helpers.get_dtypes("float"),
)
)
beta_shape = len(alpha[0]) - 1
dtype, beta = draw(
helpers.dtype_and_values(
available_dtypes=dtype,
shape=(beta_shape,),
min_value=2.0,
max_value=5,
)
)
select = draw(st.sampled_from(("a", "i", "v")))
if select == "a":
select_range = None
elif select == "i":
range_slice = draw(
st.slices(beta_shape).filter(
lambda x: x.start
and x.stop
and x.step
and x.start >= 0
and x.stop >= 0
and x.step >= 0
and x.start < x.stop
)
)
select_range = [range_slice.start, range_slice.stop]
else:
select_range = [-100, 100]
eigvals_only = draw(st.booleans())
tol = draw(st.floats(1e-5, 1e-3) | st.just(None))
return dtype, alpha, beta, eigvals_only, select, select_range, tol
@st.composite
def _generate_general_inner_product_args(draw):
dim = draw(st.integers(min_value=1, max_value=3))
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=(dim, dim),
min_value=1,
max_value=10.0,
num_arrays=2,
shared_dtype=True,
allow_nan=False,
)
)
max_value = dim - 1 if dim > 1 else dim
n_modes = draw(st.integers(min_value=1, max_value=max_value) | st.just(None))
return x_dtype, x, n_modes
# multi_dot
@st.composite
def _generate_multi_dot_dtype_and_arrays(draw):
input_dtype = [draw(st.sampled_from(draw(helpers.get_dtypes("numeric"))))]
matrices_dims = draw(
st.lists(st.integers(min_value=2, max_value=10), min_size=4, max_size=4)
)
shape_1 = (matrices_dims[0], matrices_dims[1])
shape_2 = (matrices_dims[1], matrices_dims[2])
shape_3 = (matrices_dims[2], matrices_dims[3])
matrix_1 = draw(
helpers.dtype_and_values(
shape=shape_1,
dtype=input_dtype,
min_value=-10,
max_value=10,
)
)
matrix_2 = draw(
helpers.dtype_and_values(
shape=shape_2,
dtype=input_dtype,
min_value=-10,
max_value=10,
)
)
matrix_3 = draw(
helpers.dtype_and_values(
shape=shape_3,
dtype=input_dtype,
min_value=-10,
max_value=10,
)
)
return input_dtype, [matrix_1[1][0], matrix_2[1][0], matrix_3[1][0]]
# solve_triangular
@st.composite
def _generate_solve_triangular_args(draw):
shape = draw(
st.lists(st.integers(min_value=1, max_value=3), min_size=2, max_size=5)
)
shape_b = list(shape)
shape_a = list(shape)
shape_a[-1] = shape_a[-2] # Make square
dtype_a, a = draw(
helpers.dtype_and_values(
shape=shape_a,
available_dtypes=helpers.get_dtypes("float"),
min_value=-10,
max_value=10,
)
)
dtype_b, b = draw(
helpers.dtype_and_values(
shape=shape_b,
available_dtypes=helpers.get_dtypes("float"),
min_value=-10,
max_value=10,
)
)
dtype_a = dtype_a[0]
dtype_b = dtype_b[0]
a = a[0]
b = b[0]
upper = draw(st.booleans())
adjoint = draw(st.booleans())
unit_diagonal = draw(st.booleans())
for i in range(shape_a[-2]):
a[ivy.abs(a[..., i, i]) < 0.01, i, i] = 0.01 # Make diagonals non-zero
return upper, adjoint, unit_diagonal, [dtype_a, dtype_b], [a, b]
@st.composite
def _get_dtype_value1_value2_cov(
draw,
available_dtypes,
min_num_dims,
max_num_dims,
min_dim_size,
max_dim_size,
abs_smallest_val=None,
min_value=None,
max_value=None,
allow_inf=False,
exclude_min=False,
exclude_max=False,
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
):
shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
dtype = draw(st.sampled_from(draw(available_dtypes)))
values = []
for i in range(2):
values.append(
draw(
helpers.array_values(
dtype=dtype,
shape=shape,
abs_smallest_val=abs_smallest_val,
min_value=min_value,
max_value=max_value,
allow_inf=allow_inf,
exclude_min=exclude_min,
exclude_max=exclude_max,
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
)
)
)
value1, value2 = values[0], values[1]
# modifiers: rowVar, bias, ddof
rowVar = draw(st.booleans())
bias = draw(st.booleans())
ddof = draw(helpers.ints(min_value=0, max_value=1))
numVals = None
if rowVar is False:
numVals = -1 if numVals == 0 else 0
else:
numVals = 0 if len(shape) == 1 else -1
fweights = draw(
helpers.array_values(
dtype="int64",
shape=shape[numVals],
abs_smallest_val=1,
min_value=1,
max_value=10,
allow_inf=False,
)
)
aweights = draw(
helpers.array_values(
dtype="float64",
shape=shape[numVals],
abs_smallest_val=1,
min_value=1,
max_value=10,
allow_inf=False,
small_abs_safety_factor=1,
)
)
return [dtype], value1, value2, rowVar, bias, ddof, fweights, aweights
# higher_order_moment
@st.composite
def _higher_order_moment_data(draw):
shape = draw(helpers.get_shape(min_num_dims=2, max_num_dims=4))
order = draw(helpers.ints(min_value=0, max_value=5))
dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=shape,
large_abs_safety_factor=20,
small_abs_safety_factor=20,
safety_factor_scale="log",
)
)
return dtype, x[0], order
# initialize tucker
@st.composite
def _initialize_tucker_data(draw):
x_dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=5,
min_dim_size=2,
max_dim_size=5,
min_value=0.1,
max_value=10.0,
ret_shape=True,
)
)
dims = len(shape)
rank = []
for i in range(dims):
rank.append(draw(helpers.ints(min_value=1, max_value=shape[i])))
n_modes = draw(helpers.ints(min_value=2, max_value=dims))
modes = [*range(dims)][:n_modes]
mask_dtype, mask = draw(
helpers.dtype_and_values(
dtype=["int32"],
shape=shape,
min_value=0,
max_value=1,
)
)
svd_mask_repeats = draw(helpers.ints(min_value=0, max_value=3))
non_negative = draw(st.booleans())
return (
x_dtype + mask_dtype,
x[0],
rank,
modes,
non_negative,
mask[0],
svd_mask_repeats,
)
@st.composite
def _khatri_rao_data(draw):
num_matrices = draw(helpers.ints(min_value=2, max_value=4))
m = draw(helpers.ints(min_value=1, max_value=5))
input_dtypes, input = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=num_matrices,
min_dim_size=m,
max_dim_size=m,
min_num_dims=2,
max_num_dims=2,
large_abs_safety_factor=20,
small_abs_safety_factor=20,
safety_factor_scale="log",
)
)
skip_matrix = draw(helpers.ints(min_value=0, max_value=len(input) - 1))
weight_dtype, weights = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"), shape=(m,)
)
)
mask_dtype, mask = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_value=0,
max_value=1,
shape=(m,),
)
)
return (
input_dtypes + weight_dtype + mask_dtype,
input,
skip_matrix,
weights[0],
mask[0],
)
@st.composite
def _kronecker_data(draw):
num_arrays = draw(helpers.ints(min_value=2, max_value=5))
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=num_arrays,
large_abs_safety_factor=20,
small_abs_safety_factor=20,
safety_factor_scale="log",
shared_dtype=True,
min_num_dims=2,
max_num_dims=2,
)
)
skip_matrix = draw(
st.lists(st.integers(min_value=0, max_value=num_arrays - 1), unique=True)
)
reverse = draw(st.booleans())
return x_dtype, x, skip_matrix, reverse
# truncated svd
@st.composite
def _make_svd_nn_data(draw):
x_dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
max_dim_size=5,
min_value=1.0,
max_value=10.0,
ret_shape=True,
)
)
n, m = shape
_, U = draw(
helpers.dtype_and_values(
dtype=x_dtype,
available_dtypes=helpers.get_dtypes("float"),
shape=(n, m),
min_value=1.0,
max_value=10.0,
)
)
_, S = draw(
helpers.dtype_and_values(
dtype=x_dtype,
shape=(m,),
min_value=1.0,
max_value=10.0,
)
)
_, V = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=(m, m),
min_value=1.0,
max_value=10.0,
)
)
nntype = draw(st.sampled_from(["nndsvd", "nndsvda"]))
return x_dtype, x[0], U[0], S[0], V[0], nntype
@st.composite
def _mode_dot_data(draw):
shape_t1 = draw(helpers.get_shape(min_num_dims=2, max_num_dims=5))
mode = draw(helpers.ints(min_value=0, max_value=len(shape_t1) - 1))
mode_dimsize = shape_t1[mode]
t1_dtype, t1 = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=shape_t1,
large_abs_safety_factor=20,
small_abs_safety_factor=20,
safety_factor_scale="log",
)
)
t2_rows = draw(helpers.ints(min_value=1, max_value=4))
shape_t2 = draw(st.sampled_from([(mode_dimsize,), (t2_rows, mode_dimsize)]))
t2_dtype, t2 = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=shape_t2,
large_abs_safety_factor=20,
small_abs_safety_factor=20,
safety_factor_scale="log",
)
)
return t1_dtype + t2_dtype, t1[0], t2[0], mode
@st.composite
def _multi_mode_dot_data(draw):
t1_dtype, t1, shape_t1 = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
ret_shape=True,
min_num_dims=2,
large_abs_safety_factor=20,
small_abs_safety_factor=20,
safety_factor_scale="log",
)
)
modes = [*range(len(shape_t1))]
skip = draw(st.lists(helpers.ints(min_value=0, max_value=len(shape_t1) - 1)))
t2 = []
t2_dtype = []
for i in modes:
mode_dimsize = shape_t1[i]
rows = draw(helpers.ints(min_value=1, max_value=4))
shape = draw(st.sampled_from([(mode_dimsize,), (rows, mode_dimsize)]))
mat_or_vec_dtype, mat_or_vec = draw(
helpers.dtype_and_values(
dtype=t1_dtype,
shape=shape,
large_abs_safety_factor=20,
small_abs_safety_factor=20,
safety_factor_scale="log",
)
)
t2.append(mat_or_vec[0])
t2_dtype.append(mat_or_vec_dtype[0])
return t1_dtype + t2_dtype, t1[0], t2, modes, skip
# partial tucker
@st.composite
def _partial_tucker_data(draw):
x_dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=5,
min_dim_size=2,
max_dim_size=5,
min_value=0.1,
max_value=10.0,
ret_shape=True,
)
)
dims = len(shape)
rank = []
for i in range(dims):
rank.append(draw(helpers.ints(min_value=1, max_value=shape[i])))
n_modes = draw(helpers.ints(min_value=2, max_value=dims))
modes = [*range(dims)][:n_modes]
mask_dtype, mask = draw(
helpers.dtype_and_values(
dtype=["int32"],
shape=shape,
min_value=0,
max_value=1,
)
)
svd_mask_repeats = draw(helpers.ints(min_value=0, max_value=3))
n_iter_max = draw(helpers.ints(min_value=1, max_value=7))
tol = draw(helpers.floats(min_value=1e-5, max_value=1e-1))
return (
x_dtype + mask_dtype,
x[0],
rank,
modes,
n_iter_max,
mask[0],
svd_mask_repeats,
tol,
)
# tensor train
@st.composite
def _tensor_train_data(draw):
x_dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0.1,
max_value=10,
min_num_dims=2,
max_num_dims=5,
min_dim_size=2,
max_dim_size=5,
ret_shape=True,
).filter(lambda x: "float16" not in x[0] and "bfloat16" not in x[0])
)
dims = len(shape)
rank = [1]
for i in range(dims - 1):
rank.append(draw(helpers.ints(min_value=1, max_value=shape[i])))
rank.append(1)
return x_dtype, x[0], rank
# truncated svd
@st.composite
def _truncated_svd_data(draw):
x_dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
max_dim_size=5,
min_value=0.1,
max_value=10.0,
ret_shape=True,
)
)
uv = draw(st.booleans())
n_eigen = draw(helpers.ints(min_value=1, max_value=max(shape[-2:])))
return x_dtype, x[0], uv, n_eigen
@st.composite
def _tt_matrix_to_tensor_data(draw):
rank = 1
num_factors = draw(st.integers(min_value=1, max_value=3))
factor_dims = draw(
st.tuples(
st.integers(min_value=1, max_value=3), st.integers(min_value=1, max_value=3)
)
)
shape = (num_factors, rank, *factor_dims, rank)
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
shape=shape,
shared_dtype=True,
)
)
return x_dtype, x
# tucker
@st.composite
def _tucker_data(draw):
x_dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=4,
min_dim_size=2,
max_dim_size=3,
min_value=0.1,
max_value=10.0,
ret_shape=True,
)
)
dims = len(shape)
rank = []
for i in range(dims):
rank.append(draw(helpers.ints(min_value=1, max_value=shape[i])))
mask_dtype, mask = draw(
helpers.dtype_and_values(
dtype=["int32"],
shape=shape,
min_value=0,
max_value=1,
)
)
svd_mask_repeats = draw(helpers.ints(min_value=0, max_value=1))
n_iter_max = draw(helpers.ints(min_value=0, max_value=2))
tol = draw(helpers.floats(min_value=1e-5, max_value=1e-1))
init = draw(st.sampled_from(["svd", "random"]))
fixed_factors = draw(st.booleans())
if fixed_factors:
_, core = draw(
helpers.dtype_and_values(
dtype=x_dtype,
min_value=0.1,
max_value=10.0,
shape=rank,
)
)
factors = []
for i in range(dims):
_, factor = draw(
helpers.dtype_and_values(
dtype=x_dtype,
min_value=0.1,
max_value=10.0,
shape=(shape[i], rank[i]),
)
)
factors.append(factor[0])
fixed_factors = draw(
st.lists(
helpers.ints(min_value=0, max_value=dims - 1), unique=True, min_size=1
)
)
rank = [rank[i] for i in range(dims) if i not in fixed_factors]
init = ivy.TuckerTensor((core[0], factors))
return (
x_dtype + mask_dtype,
x[0],
rank,
fixed_factors,
init,
n_iter_max,
mask[0],
svd_mask_repeats,
tol,
)
# --- Main --- #
# ------------ #
@handle_test(
fn_tree="functional.ivy.experimental.adjoint",
dtype_x=helpers.dtype_and_values(
available_dtypes=(
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
),
min_num_dims=2,
max_num_dims=10,
min_dim_size=1,
max_dim_size=10,
min_value=-1.0e5,
max_value=1.0e5,
allow_nan=False,
shared_dtype=True,
),
)
def test_adjoint(dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
on_device=on_device,
)
@handle_test(
fn_tree="functional.ivy.experimental.batched_outer",
data=_batched_outer_data(),
)
def test_batched_outer(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtypes, tensors = data
if backend_fw == "paddle":
# to avoid large dimension results since paddle don't support them
tensors = tensors[:2]
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
atol_=1e-1,
rtol_=1e-1,
input_dtypes=input_dtypes,
tensors=tensors,
)
# test adapted from tensorly
# https://github.com/tensorly/tensorly/blob/main/tensorly/tenalg/tests/test_outer_product.py#L22
@pytest.mark.skip(
reason=(
"ivy.tensordot does not support batched_modes argument for the moment. "
"TODO please remove this when the functionality is added. "
"see https://github.com/unifyai/ivy/issues/21914"
)
)
def test_batched_outer_product():
batch_size = 3
X = ivy.random_uniform(shape=(batch_size, 4, 5, 6))
Y = ivy.random_uniform(shape=(batch_size, 3))
Z = ivy.random_uniform(shape=(batch_size, 2))
res = ivy.batched_outer([X, Y, Z])
true_res = ivy.tensordot(X, Y, (), batched_modes=0)
true_res = ivy.tensordot(true_res, Z, (), batched_modes=0)
np.testing.assert_array_almost_equal(res, true_res)
@handle_test(
fn_tree="functional.ivy.experimental.cond",
dtype_x=helpers.cond_data_gen_helper(),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_cond(dtype_x, test_flags, backend_fw, on_device, fn_name):
dtype, x = dtype_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
on_device=on_device,
fn_name=fn_name,
rtol_=1e-3,
atol_=1e-3,
x=x[0],
p=x[1],
)
# cov
@handle_test(
fn_tree="functional.ivy.experimental.cov",
dtype_x1_x2_cov=_get_dtype_value1_value2_cov(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=2,
min_dim_size=2,
max_dim_size=5,
min_value=1,
max_value=1e10,
abs_smallest_val=0.01,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
test_gradients=st.just(False),
test_with_out=st.just(False),
)
def test_cov(*, dtype_x1_x2_cov, test_flags, backend_fw, fn_name, on_device):
dtype, x1, x2, rowVar, bias, ddof, fweights, aweights = dtype_x1_x2_cov
helpers.test_function(
input_dtypes=[dtype[0], dtype[0], "int64", "float64"],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x1,
x2=x2,
rowVar=rowVar,
bias=bias,
ddof=ddof,
fweights=fweights,
aweights=aweights,
return_flat_np_arrays=True,
rtol_=1e-2,
atol_=1e-2,
)
@handle_test(
fn_tree="functional.ivy.experimental.diagflat",
args_packet=_generate_diag_args(),
test_gradients=st.just(False),
)
def test_diagflat(*, test_flags, backend_fw, fn_name, args_packet, on_device):
dtype_x, offset, dtype_padding_value, align, num_rows, num_cols = args_packet
x_dtype, x = dtype_x
padding_value_dtype, padding_value = dtype_padding_value
padding_value = padding_value[0][0]
helpers.test_function(
input_dtypes=x_dtype + ["int64"] + padding_value_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
offset=offset,
padding_value=padding_value,
align=align,
num_rows=num_rows,
num_cols=num_cols,
on_device=on_device,
atol_=1e-01,
rtol_=1 / 64,
)
@handle_test(
fn_tree="functional.ivy.experimental.dot",
data=_generate_dot_dtype_and_arrays(),
)
def test_dot(*, data, test_flags, backend_fw, fn_name, on_device):
(input_dtypes, x) = data
return helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
xs_grad_idxs=[[0, 0]],
input_dtypes=input_dtypes,
test_values=True,
rtol_=0.5,
atol_=0.5,
a=x[0],
b=x[1],
)
@handle_test(
fn_tree="functional.ivy.experimental.eig",
dtype_x=helpers.dtype_and_values(
available_dtypes=(
ivy.float32,
ivy.float64,
ivy.int32,
ivy.int64,
ivy.complex64,
ivy.complex128,
),
min_num_dims=2,
max_num_dims=3,
min_dim_size=10,
max_dim_size=10,
min_value=1.0,
max_value=1.0e5,
shared_dtype=True,
),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_eig(dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
on_device=on_device,
fn_name=fn_name,
test_values=False,
x=x[0],
)
# eigh_tridiagonal
@handle_test(
fn_tree="eigh_tridiagonal",
args_packet=_generate_eigh_tridiagonal_args(),
ground_truth_backend="numpy",
test_gradients=st.just(False),
)
def test_eigh_tridiagonal(
*,
args_packet,
test_flags,
backend_fw,
fn_name,
on_device,
):
dtype, alpha, beta, eigvals_only, select, select_range, tol = args_packet
test_flags.with_out = False
results = helpers.test_function(
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
input_dtypes=dtype,
alpha=alpha[0],
beta=beta[0],
eigvals_only=eigvals_only,
select=select,
select_range=select_range,
tol=tol,
test_values=eigvals_only,
return_flat_np_arrays=True,
)
if results is None:
return
ret_np_flat, ret_np_from_gt_flat = results
reconstructed_np = None
for i in range(len(ret_np_flat) // 2):
eigenvalue = ret_np_flat[i]
eigenvector = ret_np_flat[len(ret_np_flat) // 2 + i]
if reconstructed_np is not None:
reconstructed_np += eigenvalue * np.matmul(
eigenvector.reshape(1, -1), eigenvector.reshape(-1, 1)
)
else:
reconstructed_np = eigenvalue * np.matmul(
eigenvector.reshape(1, -1), eigenvector.reshape(-1, 1)
)
reconstructed_from_np = None
for i in range(len(ret_np_from_gt_flat) // 2):
eigenvalue = ret_np_from_gt_flat[i]
eigenvector = ret_np_from_gt_flat[len(ret_np_flat) // 2 + i]
if reconstructed_from_np is not None:
reconstructed_from_np += eigenvalue * np.matmul(
eigenvector.reshape(1, -1), eigenvector.reshape(-1, 1)
)
else:
reconstructed_from_np = eigenvalue * np.matmul(
eigenvector.reshape(1, -1), eigenvector.reshape(-1, 1)
)
# value test
helpers.assert_all_close(
reconstructed_np,
reconstructed_from_np,
rtol=1e-1,
atol=1e-2,
backend=backend_fw,
ground_truth_backend=test_flags.ground_truth_backend,
)
@handle_test(
fn_tree="functional.ivy.experimental.eigvals",
dtype_x=helpers.dtype_and_values(
available_dtypes=(
ivy.float32,
ivy.float64,
ivy.int32,
ivy.int64,
ivy.complex64,
ivy.complex128,
),
min_num_dims=2,
max_num_dims=3,
min_dim_size=10,
max_dim_size=10,
min_value=1.0,
max_value=1.0e5,
shared_dtype=True,
),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_eigvals(dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
on_device=on_device,
fn_name=fn_name,
test_values=False,
x=x[0],
)
@handle_test(
fn_tree="functional.ivy.experimental.general_inner_product",
data=_generate_general_inner_product_args(),
)
def test_general_inner_product(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtypes, x, n_modes = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=input_dtypes,
a=x[0],
b=x[1],
n_modes=n_modes,
)
@handle_test(
fn_tree="functional.ivy.experimental.higher_order_moment",
data=_higher_order_moment_data(),
)
def test_higher_order_moment(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtypes, x, order = data
if backend_fw == "paddle":
# to avoid large dimension results since paddle don't support them
order = min(order, 2)
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
atol_=1e-1,
rtol_=1e-1,
input_dtypes=input_dtypes,
x=x,
order=order,
)
@handle_test(
fn_tree="functional.ivy.experimental.initialize_tucker",
data=_initialize_tucker_data(),
test_with_out=st.just(False),
)
def test_initialize_tucker(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtypes, x, rank, modes, non_negative, mask, svd_mask_repeats = data
results = helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
input_dtypes=input_dtypes,
x=x,
rank=rank,
modes=modes,
non_negative=non_negative,
mask=mask,
svd_mask_repeats=svd_mask_repeats,
test_values=False,
)
ret_np, ret_from_gt_np = results
core = helpers.flatten_and_to_np(ret=ret_np[0], backend=backend_fw)
factors = helpers.flatten_and_to_np(ret=ret_np[1], backend=backend_fw)
core_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np[0], backend=test_flags.ground_truth_backend
)
factors_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np[1], backend=test_flags.ground_truth_backend
)
with BackendHandler.update_backend(backend_fw) as ivy_backend:
n_elem = int(ivy_backend.prod(rank[: len(modes)])) * int(
ivy_backend.prod(x.shape[len(modes) :])
)
for c, c_gt in zip(core, core_gt):
assert np.prod(c.shape) == n_elem
assert np.prod(c_gt.shape) == n_elem
for f, f_gt in zip(factors, factors_gt):
assert np.prod(f.shape) == np.prod(f_gt.shape)
@handle_test(
fn_tree="functional.ivy.experimental.khatri_rao",
data=_khatri_rao_data(),
test_instance_method=st.just(False),
)
def test_khatri_rao(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtypes, input, skip_matrix, weights, mask = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
test_values=False,
input_dtypes=input_dtypes,
x=input,
weights=weights,
skip_matrix=skip_matrix,
mask=mask,
)
# The following two tests have been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/tenalg/tests/test_khatri_rao.py
@pytest.mark.parametrize(("columns", "rows"), [(4, [3, 4, 2])])
def test_khatri_rao_tensorly_1(columns, rows):
columns = columns
rows = rows
matrices = [ivy.arange(k * columns).reshape((k, columns)) for k in rows]
res = ivy.khatri_rao(matrices)
# resulting matrix must be of shape (prod(n_rows), n_columns)
n_rows = 3 * 4 * 2
n_columns = 4
assert res.shape[0] == n_rows
assert res.shape[1] == n_columns
@pytest.mark.parametrize(
("t1", "t2", "true_res"),
[
(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[1, 4, 7], [2, 5, 8], [3, 6, 9]],
[
[1.0, 8.0, 21.0],
[2.0, 10.0, 24.0],
[3.0, 12.0, 27.0],
[4.0, 20.0, 42.0],
[8.0, 25.0, 48.0],
[12.0, 30.0, 54.0],
[7.0, 32.0, 63.0],
[14.0, 40.0, 72.0],
[21.0, 48.0, 81.0],
],
)
],
)
def test_khatri_rao_tensorly_2(t1, t2, true_res):
t1 = ivy.array(t1)
t2 = ivy.array(t2)
true_res = ivy.array(true_res)
res = ivy.khatri_rao([t1, t2])
assert np.allclose(res, true_res)
@handle_test(
fn_tree="functional.ivy.experimental.kron",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=1,
max_dim_size=10,
num_arrays=2,
shared_dtype=True,
),
test_gradients=st.just(False),
)
def test_kron(*, dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
a=x[0],
b=x[1],
)
@handle_test(
fn_tree="functional.ivy.experimental.kronecker",
data=_kronecker_data(),
test_instance_method=st.just(False),
)
def test_kronecker(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtypes, input, skip_matrix, reverse = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=input_dtypes,
x=input,
skip_matrix=skip_matrix,
reverse=reverse,
)
# lu_factor
@handle_test(
fn_tree="functional.ivy.experimental.lu_factor",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
max_dim_size=5,
).filter(
lambda x: np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon
and np.linalg.det(np.asarray(x[1][0])) != 0
),
test_gradients=st.just(False),
)
def test_lu_factor(dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_x
ret = helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
test_values=False,
)
# check decomp is correct manually by getting the values from test_function above
# this is because the decomposition is not unique and test_values will not work
ret_f, ret_gt = ret
# check that the decomposition is correct for current fw at least
LU, p = ret_f.LU, ret_f.p
L = np.tril(LU, -1) + np.eye(LU.shape[0])
U = np.triu(LU)
P = np.eye(LU.shape[0])[p]
assert np.allclose(L @ U, P @ x[0])
@handle_test(
fn_tree="functional.ivy.experimental.lu_solve",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=helpers.get_shape(
min_num_dims=2, max_num_dims=2, min_dim_size=2, max_dim_size=2
),
num_arrays=2,
shared_dtype=True,
).filter(
lambda x: "float16" not in x[0]
and "bfloat16" not in x[0]
and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon
and np.linalg.det(np.asarray(x[1][0])) != 0
),
test_gradients=st.just(False),
)
def test_lu_solve(dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, arr = dtype_x
A, B = arr[0], arr[1]
ivy.set_backend(backend_fw)
lu_ = ivy.lu_factor(A)
lu, p = lu_.LU, lu_.p
X, X_gt = helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
lu=lu,
p=p,
b=B,
test_values=False,
)
assert np.allclose(A @ X, B)
@handle_test(
fn_tree="functional.ivy.experimental.make_svd_non_negative",
data=_make_svd_nn_data(),
test_with_out=st.just(False),
)
def test_make_svd_non_negative(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtype, x, U, S, V, nntype = data
results = helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
input_dtypes=input_dtype,
x=x,
U=U,
S=S,
V=V,
nntype=nntype,
test_values=False,
return_flat_np_arrays=True,
)
if results is None:
return
# returned values should be non negative
ret_flat_np, ret_from_gt_flat_np = results
W_flat_np, H_flat_np = ret_flat_np[0], ret_flat_np[1]
W_flat_np_gt, H_flat_np_gt = ret_from_gt_flat_np[0], ret_from_gt_flat_np[1]
assert np.all(W_flat_np >= 0)
assert np.all(H_flat_np >= 0)
assert np.all(W_flat_np_gt >= 0)
assert np.all(H_flat_np_gt >= 0)
helpers.assert_all_close(
W_flat_np,
W_flat_np_gt,
atol=1e-02,
backend=backend_fw,
ground_truth_backend=test_flags.ground_truth_backend,
)
helpers.assert_all_close(
H_flat_np,
H_flat_np_gt,
atol=1e-02,
backend=backend_fw,
ground_truth_backend=test_flags.ground_truth_backend,
)
# matrix_exp
@handle_test(
fn_tree="functional.ivy.experimental.matrix_exp",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
max_dim_size=2,
min_value=-100,
max_value=100,
allow_nan=False,
shared_dtype=True,
),
test_gradients=st.just(False),
)
def test_matrix_exp(dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
)
@handle_test(
fn_tree="functional.ivy.experimental.mode_dot",
data=_mode_dot_data(),
)
def test_mode_dot(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtypes, t1, t2, mode = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=input_dtypes,
x=t1,
matrix_or_vector=t2,
mode=mode,
)
@pytest.mark.parametrize(
("X", "U", "true_res"),
[
(
[
[[1, 13], [4, 16], [7, 19], [10, 22]],
[[2, 14], [5, 17], [8, 20], [11, 23]],
[[3, 15], [6, 18], [9, 21], [12, 24]],
],
[[1, 3, 5], [2, 4, 6]],
[
[[22, 130], [49, 157], [76, 184], [103, 211]],
[[28, 172], [64, 208], [100, 244], [136, 280]],
],
)
],
)
def test_mode_dot_tensorly(X, U, true_res):
X = ivy.array(X)
U = ivy.array(U)
true_res = ivy.array(true_res)
res = ivy.mode_dot(X, U, 0)
assert np.allclose(true_res, res, atol=1e-1, rtol=1e-1)
@handle_test(
fn_tree="functional.ivy.experimental.multi_dot",
dtype_x=_generate_multi_dot_dtype_and_arrays(),
test_gradients=st.just(False),
)
def test_multi_dot(dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
test_values=True,
x=x,
rtol_=1e-1,
atol_=6e-1,
)
@handle_test(
fn_tree="functional.ivy.experimental.multi_mode_dot",
data=_multi_mode_dot_data(),
)
def test_multi_mode_dot(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtypes, t1, t2, modes, skip = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=input_dtypes,
x=t1,
mat_or_vec_list=t2,
modes=modes,
skip=skip,
)
# The following 2 tests have been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/tenalg/tests/test_n_mode_product.py#L81
@pytest.mark.parametrize(
("X", "U", "true_res"),
[
([[1, 2], [0, -1]], [[2, 1], [-1, 1]], [1]),
],
)
def test_multi_mode_dot_tensorly_1(X, U, true_res):
X, U, true_res = ivy.array(X), ivy.array(U), ivy.array(true_res)
res = ivy.multi_mode_dot(X, U, [0, 1])
assert np.allclose(true_res, res)
@pytest.mark.parametrize(
"shape",
[
(3, 5, 4, 2),
],
)
def test_multi_mode_dot_tensorly_2(shape):
print(shape)
X = ivy.ones(shape)
vecs = [ivy.ones(s) for s in shape]
res = ivy.multi_mode_dot(X, vecs)
# result should be a scalar
assert ivy.shape(res) == ()
assert np.allclose(res, np.prod(shape))
# Average pooling each mode
# Order should not matter
vecs = [vecs[i] / s for i, s in enumerate(shape)]
for modes in itertools.permutations(range(len(shape))):
res = ivy.multi_mode_dot(X, [vecs[i] for i in modes], modes=modes)
assert ivy.shape(res) == ()
assert np.allclose(res, 1)
@handle_test(
fn_tree="functional.ivy.experimental.partial_tucker",
data=_partial_tucker_data(),
test_with_out=st.just(False),
)
def test_partial_tucker(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtypes, x, rank, modes, n_iter_max, mask, svd_mask_repeats, tol = data
results = helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
input_dtypes=input_dtypes,
x=x,
rank=rank,
modes=modes,
n_iter_max=n_iter_max,
tol=tol,
mask=mask,
svd_mask_repeats=svd_mask_repeats,
test_values=False,
)
ret_np, ret_from_gt_np = results
core = helpers.flatten_and_to_np(ret=ret_np[0], backend=backend_fw)
factors = helpers.flatten_and_to_np(ret=ret_np[1], backend=backend_fw)
core_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np[0], backend=test_flags.ground_truth_backend
)
factors_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np[1], backend=test_flags.ground_truth_backend
)
with BackendHandler.update_backend(backend_fw) as ivy_backend:
n_elem = int(ivy_backend.prod(rank[: len(modes)])) * int(
ivy_backend.prod(x.shape[len(modes) :])
)
for c, c_gt in zip(core, core_gt):
assert np.prod(c.shape) == n_elem
assert np.prod(c_gt.shape) == n_elem
for f, f_gt in zip(factors, factors_gt):
assert np.prod(f.shape) == np.prod(f_gt.shape)
# test adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/decomposition/tests/test_tucker.py#L24
@pytest.mark.parametrize(
("tol_norm_2", "tol_max_abs", "modes", "shape"),
[
(
10e-3,
10e-1,
[1, 2],
(3, 4, 3),
)
],
)
def test_partial_tucker_tensorly(tol_norm_2, tol_max_abs, modes, shape):
tensor = ivy.random_uniform(shape=shape)
(core, factors) = ivy.partial_tucker(
tensor, None, modes, n_iter_max=200, verbose=True
)
reconstructed_tensor = ivy.multi_mode_dot(core, factors, modes=modes)
norm_rec = ivy.sqrt(ivy.sum(reconstructed_tensor**2))
norm_tensor = ivy.sqrt(ivy.sum(tensor**2))
assert (norm_rec - norm_tensor) / norm_rec < tol_norm_2
# Test the max abs difference between the reconstruction and the tensor
assert ivy.max(ivy.abs(norm_rec - norm_tensor)) < tol_max_abs
# Test the shape of the core and factors
ranks = [3, 1]
(core, factors) = ivy.partial_tucker(
tensor, ranks, modes, n_iter_max=100, verbose=True
)
for i, rank in enumerate(ranks):
np.testing.assert_equal(
factors[i].shape,
(tensor.shape[i + 1], rank),
err_msg=(
f"factors[i].shape = {factors[i].shape}, expected"
f" {(tensor.shape[i + 1], rank)}"
),
)
np.testing.assert_equal(
core.shape,
[tensor.shape[0]] + ranks,
err_msg=f"core.shape = {core.shape}, expected {[tensor.shape[0]] + ranks}",
)
# Test random_state fixes the core and the factor matrices
(core1, factors1) = ivy.partial_tucker(
tensor,
ranks,
modes,
seed=0,
init="random",
)
(core2, factors2) = ivy.partial_tucker(
tensor,
ranks,
modes,
seed=0,
init="random",
)
np.allclose(core1, core2)
for factor1, factor2 in zip(factors1, factors2):
np.allclose(factor1, factor2)
@handle_test(
fn_tree="functional.ivy.experimental.solve_triangular",
data=_generate_solve_triangular_args(),
test_instance_method=st.just(False),
)
def test_solve_triangular(*, data, test_flags, backend_fw, fn_name, on_device):
# Temporarily ignore gradients on paddlepaddle backend
# See: https://github.com/unifyai/ivy/pull/25917
assume(not (backend_fw == "paddle" and test_flags.test_gradients))
upper, adjoint, unit_diagonal, input_dtypes, x = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-3,
atol_=1e-3,
input_dtypes=input_dtypes,
x1=x[0],
x2=x[1],
upper=upper,
adjoint=adjoint,
unit_diagonal=unit_diagonal,
)
@handle_test(
fn_tree="functional.ivy.experimental.svd_flip",
uv=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
min_num_dims=2,
max_num_dims=2,
),
u_based_decision=st.booleans(),
test_with_out=st.just(False),
)
def test_svd_flip(*, uv, u_based_decision, test_flags, backend_fw, fn_name, on_device):
input_dtypes, input = uv
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=input_dtypes,
U=input[0],
V=input[1],
u_based_decision=u_based_decision,
)
@handle_test(
fn_tree="functional.ivy.experimental.tensor_train",
data=_tensor_train_data(),
# TODO: add support for more modes
svd=st.just("truncated_svd"),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_tensor_train(*, data, svd, test_flags, backend_fw, fn_name, on_device):
input_dtype, x, rank = data
results = helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
input_dtypes=input_dtype,
input_tensor=x,
rank=rank,
svd=svd,
test_values=False,
)
ret_np, ret_from_gt_np = results
factors = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
factors_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np, backend=test_flags.ground_truth_backend
)
for f, f_gt in zip(factors, factors_gt):
assert np.prod(f.shape) == np.prod(f_gt.shape)
# The following 3 tests have been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/decomposition/tests/test_tt_decomposition.py
@pytest.mark.parametrize(
("shape", "rank"), [((3, 4, 5, 6, 2, 10), (1, 3, 3, 4, 2, 2, 1))]
)
def test_tensor_train_tensorly_1(shape, rank):
tensor = ivy.random_uniform(shape=shape)
tensor_shape = tensor.shape
factors = ivy.tensor_train(tensor, rank)
assert len(factors) == 6, "Number of factors should be 6, currently has " + str(
len(factors)
)
r_prev_iteration = 1
for k in range(6):
(r_prev_k, n_k, r_k) = factors[k].shape
assert tensor_shape[k] == n_k, (
"Mode 1 of factor "
+ str(k)
+ "needs "
+ str(tensor_shape[k])
+ " dimensions, currently has "
+ str(n_k)
)
assert r_prev_k == r_prev_iteration, " Incorrect ranks of factors "
r_prev_iteration = r_k
@pytest.mark.parametrize(
("shape", "rank"), [((3, 4, 5, 6, 2, 10), (1, 5, 4, 3, 8, 10, 1))]
)
def test_tensor_train_tensorly_2(shape, rank):
tensor = ivy.random_uniform(shape=shape)
factors = ivy.tensor_train(tensor, rank)
for k in range(6):
(r_prev, n_k, r_k) = factors[k].shape
first_error_message = (
"TT rank " + str(k) + " is greater than the maximum allowed "
)
first_error_message += str(r_prev) + " > " + str(rank[k])
assert r_prev <= rank[k], first_error_message
first_error_message = (
"TT rank " + str(k + 1) + " is greater than the maximum allowed "
)
first_error_message += str(r_k) + " > " + str(rank[k + 1])
assert r_k <= rank[k + 1], first_error_message
@pytest.mark.parametrize(("shape", "rank", "tol"), [((3, 3, 3), (1, 3, 3, 1), (10e-5))])
def test_tensor_train_tensorly_3(shape, rank, tol):
tensor = ivy.random_uniform(shape=shape)
factors = ivy.tensor_train(tensor, rank)
reconstructed_tensor = ivy.TTTensor.tt_to_tensor(factors)
error = ivy.vector_norm(ivy.matrix_norm(tensor - reconstructed_tensor, ord=2))
error /= ivy.vector_norm(ivy.matrix_norm(tensor, ord=2))
np.testing.assert_(error < tol, "norm 2 of reconstruction higher than tol")
@handle_test(
fn_tree="functional.ivy.experimental.truncated_svd",
data=_truncated_svd_data(),
test_with_out=st.just(False),
)
def test_truncated_svd(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtype, x, uv, n_eigenvecs = data
results = helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
input_dtypes=input_dtype,
x=x,
compute_uv=uv,
n_eigenvecs=n_eigenvecs,
test_values=False,
return_flat_np_arrays=True,
)
if results is None:
return
# value test based on recreating the original matrix and testing the consistency
ret_flat_np, ret_from_gt_flat_np = results
if uv:
for i in range(len(ret_flat_np) // 3):
U = ret_flat_np[i]
S = ret_flat_np[len(ret_flat_np) // 3 + i]
Vh = ret_flat_np[2 * len(ret_flat_np) // 3 + i]
m = U.shape[-1]
n = Vh.shape[-1]
S = np.expand_dims(S, -2) if m > n else np.expand_dims(S, -1)
for i in range(len(ret_from_gt_flat_np) // 3):
U_gt = ret_from_gt_flat_np[i]
S_gt = ret_from_gt_flat_np[len(ret_from_gt_flat_np) // 3 + i]
Vh_gt = ret_from_gt_flat_np[2 * len(ret_from_gt_flat_np) // 3 + i]
S_gt = np.expand_dims(S_gt, -2) if m > n else np.expand_dims(S_gt, -1)
with BackendHandler.update_backend("numpy") as ivy_backend:
S_mat = (
S
* ivy_backend.eye(
U.shape[-1], Vh.shape[-2], batch_shape=U.shape[:-2]
).data
)
S_mat_gt = (
S_gt
* ivy_backend.eye(
U_gt.shape[-1], Vh_gt.shape[-2], batch_shape=U_gt.shape[:-2]
).data
)
reconstructed = np.matmul(np.matmul(U, S_mat), Vh)
reconstructed_gt = np.matmul(np.matmul(U_gt, S_mat_gt), Vh_gt)
# value test
helpers.assert_all_close(
reconstructed,
reconstructed_gt,
atol=1e-04,
backend=backend_fw,
ground_truth_backend=test_flags.ground_truth_backend,
)
else:
S = ret_flat_np
S_gt = ret_from_gt_flat_np
helpers.assert_all_close(
S[0],
S_gt[0],
atol=1e-04,
backend=backend_fw,
ground_truth_backend=test_flags.ground_truth_backend,
)
@handle_test(
fn_tree="functional.ivy.experimental.tt_matrix_to_tensor",
data=_tt_matrix_to_tensor_data(),
test_gradients=st.just(False),
)
def test_tt_matrix_to_tensor(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = data
helpers.test_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e8,
atol_=1e8,
tt_matrix=x[0],
)
@handle_test(
fn_tree="functional.ivy.experimental.tucker",
data=_tucker_data(),
test_with_out=st.just(False),
)
def test_tucker(*, data, test_flags, backend_fw, fn_name, on_device):
(
input_dtypes,
x,
rank,
fixed_factors,
init,
n_iter_max,
mask,
svd_mask_repeats,
tol,
) = data
results = helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
input_dtypes=input_dtypes,
x=x,
rank=rank,
fixed_factors=fixed_factors,
n_iter_max=n_iter_max,
init=init,
tol=tol,
mask=mask,
svd_mask_repeats=svd_mask_repeats,
test_values=False,
)
ret_np, ret_from_gt_np = results
core = helpers.flatten_and_to_np(ret=ret_np[0], backend=backend_fw)
factors = helpers.flatten_and_to_np(ret=ret_np[1], backend=backend_fw)
core_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np[0], backend=test_flags.ground_truth_backend
)
factors_gt = helpers.flatten_and_to_np(
ret=ret_from_gt_np[1], backend=test_flags.ground_truth_backend
)
n_elem = 1
if isinstance(init, ivy.TuckerTensor):
for index in fixed_factors:
n_elem *= init[0].shape[index]
n_elem *= np.prod(rank)
for c, c_gt in zip(core, core_gt):
assert np.prod(c.shape) == n_elem
assert np.prod(c_gt.shape) == n_elem
for f, f_gt in zip(factors, factors_gt):
assert np.prod(f.shape) == np.prod(f_gt.shape)
# test adapted from tensorly
# https://github.com/tensorly/tensorly/blob/main/tensorly/decomposition/tests/test_tucker.py#L71
@pytest.mark.parametrize(
("tol_norm_2", "tol_max_abs", "shape", "ranks"),
[(10e-3, 10e-1, (3, 4, 3), [2, 3, 1])],
)
def test_tucker_tensorly(tol_norm_2, tol_max_abs, shape, ranks):
tensor = ivy.random_uniform(shape=shape)
tucker = ivy.tucker(tensor, None, n_iter_max=200, verbose=True)
reconstructed_tensor = tucker.to_tensor()
norm_rec = ivy.sqrt(ivy.sum(reconstructed_tensor**2))
norm_tensor = ivy.sqrt(ivy.sum(tensor**2))
assert (norm_rec - norm_tensor) / norm_rec < tol_norm_2
# Test the max abs difference between the reconstruction and the tensor
assert ivy.max(ivy.abs(reconstructed_tensor - tensor)) < tol_max_abs
# Test the shape of the core and factors
core, factors = ivy.tucker(tensor, ranks, n_iter_max=100)
for i, rank in enumerate(ranks):
np.testing.assert_equal(
factors[i].shape,
(tensor.shape[i], ranks[i]),
err_msg=(
f"factors[i].shape = {factors[i].shape}, expected"
f" {(tensor.shape[i], ranks[i])}"
),
)
np.testing.assert_equal(
core.shape[i],
rank,
err_msg=f"core.shape[i] = {core.shape[i]}, expected {rank}",
)
# try fixing the core
factors_init = [ivy.copy_array(f) for f in factors]
_, factors = ivy.tucker(
tensor,
ranks,
init=(core, factors),
fixed_factors=[1],
n_iter_max=100,
verbose=1,
)
assert np.allclose(factors[1], factors_init[1])
# Random and SVD init should converge to a similar solution
rank = shape
tucker_svd = ivy.tucker(tensor, rank, n_iter_max=200, init="svd")
tucker_random = ivy.tucker(tensor, rank, n_iter_max=200, init="random", seed=1234)
rec_svd = tucker_svd.to_tensor()
rec_random = tucker_random.to_tensor()
error = ivy.sqrt(ivy.sum((rec_svd - rec_random) ** 2))
error /= ivy.sqrt(ivy.sum(rec_svd**2))
tol_norm_2 = 1e-1
np.testing.assert_(
error < tol_norm_2, "norm 2 of difference between svd and random init too high"
)
np.testing.assert_(
ivy.max(ivy.abs(rec_svd - rec_random)) < tol_max_abs,
"abs norm of difference between svd and random init too high",
)
| ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py",
"repo_id": "ivy",
"token_count": 33178
} | 65 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
import ivy
# --- Helpers --- #
# --------------- #
@st.composite
def _group_norm_helper(draw):
data_format = draw(st.sampled_from(["NSC", "NCS"]))
shape = draw(
helpers.get_shape(
min_num_dims=2, max_num_dims=4, min_dim_size=2, max_dim_size=4
)
)
channel_size = shape[-1]
group_list = [*range(1, 4)]
group_list = list(filter(lambda x: (channel_size % x == 0), group_list))
num_groups = draw(st.sampled_from(group_list))
if data_format == "NCS":
shape = (shape[0], shape[-1], *shape[1:-1])
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"float",
),
shape=shape,
large_abs_safety_factor=50,
small_abs_safety_factor=50,
safety_factor_scale="log",
)
)
_, offset = draw(
helpers.dtype_and_values(
dtype=x_dtype,
shape=(channel_size,),
large_abs_safety_factor=50,
small_abs_safety_factor=50,
safety_factor_scale="log",
)
)
_, scale = draw(
helpers.dtype_and_values(
dtype=x_dtype,
shape=(channel_size,),
large_abs_safety_factor=50,
small_abs_safety_factor=50,
safety_factor_scale="log",
)
)
eps = draw(helpers.floats(min_value=1e-5, max_value=0.1))
return x_dtype, x[0], num_groups, data_format, scale[0], offset[0], eps
@st.composite
def _instance_and_batch_norm_helper(draw, *, min_dims=1, test_function="instance_norm"):
mixed_fn_compos = draw(st.booleans())
is_torch_backend = ivy.current_backend_str() == "torch"
data_format = draw(st.sampled_from(["NSC", "NCS"]))
shape1, shape2, shape3, shape4 = draw(
helpers.mutually_broadcastable_shapes(
num_shapes=4, min_dims=min_dims, min_side=2
)
)
shape = helpers.broadcast_shapes(shape1, shape2, shape3, shape4)
if (test_function == "instance_norm") or (is_torch_backend and not mixed_fn_compos):
shape1 = shape2 = shape3 = shape4 = (shape[-1],)
if data_format == "NCS":
shape = (shape[0], shape[-1], *shape[1:-1])
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"float",
mixed_fn_compos=mixed_fn_compos,
),
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
shape=shape,
max_value=999,
min_value=-1001,
)
)
_, mean = draw(
helpers.dtype_and_values(
dtype=x_dtype,
shape=shape1,
min_value=-1001,
max_value=999,
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
)
)
_, variance = draw(
helpers.dtype_and_values(
dtype=x_dtype,
shape=shape2,
min_value=0,
max_value=999,
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
)
)
_, offset = draw(
st.one_of(
helpers.dtype_and_values(
dtype=x_dtype,
shape=shape3,
min_value=-1001,
max_value=999,
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
),
st.just(([None], [None])),
)
)
_, scale = draw(
st.one_of(
helpers.dtype_and_values(
dtype=x_dtype,
shape=shape4,
min_value=-1001,
max_value=999,
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
),
st.just(([None], [None])),
)
)
eps = draw(
helpers.floats(min_value=1e-5, max_value=0.1, mixed_fn_compos=mixed_fn_compos)
)
momentum = draw(
helpers.floats(min_value=0.0, max_value=1.0, mixed_fn_compos=mixed_fn_compos)
)
return (
x_dtype,
x[0],
mean[0],
variance[0],
offset[0],
scale[0],
eps,
momentum,
data_format,
)
# --- Main --- #
# ------------ #
# batch_norm
@handle_test(
fn_tree="functional.ivy.experimental.batch_norm",
data=_instance_and_batch_norm_helper(min_dims=2, test_function="batch_norm"),
training=st.booleans(),
test_instance_method=st.just(False),
container_flags=st.just([False]),
)
def test_batch_norm(*, data, training, test_flags, backend_fw, fn_name, on_device):
x_dtype, x, mean, variance, offset, scale, eps, momentum, data_format = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
xs_grad_idxs=[[0, 0]],
rtol_=1e-2,
atol_=1e-2,
input_dtypes=x_dtype,
x=x,
mean=mean,
variance=variance,
scale=scale,
offset=offset,
eps=eps,
training=training,
momentum=momentum,
data_format=data_format,
)
# group_norm
@handle_test(
fn_tree="functional.ivy.experimental.group_norm",
data=_group_norm_helper(),
)
def test_group_norm(
*,
data,
test_flags,
backend_fw,
fn_name,
on_device,
):
x_dtype, x, num_groups, data_format, scale, offset, eps = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
xs_grad_idxs=[[0, 0]],
rtol_=1e-1,
atol_=1e-1,
input_dtypes=x_dtype,
x=x,
num_groups=num_groups,
scale=scale,
offset=offset,
eps=eps,
data_format=data_format,
)
@handle_test(
fn_tree="functional.ivy.experimental.instance_norm",
data=_instance_and_batch_norm_helper(min_dims=3),
training=st.booleans(),
)
def test_instance_norm(*, data, training, test_flags, backend_fw, fn_name, on_device):
x_dtype, x, mean, variance, offset, scale, eps, momentum, data_format = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
xs_grad_idxs=[[0, 0]],
rtol_=1e-1,
atol_=1e-1,
input_dtypes=x_dtype,
x=x,
mean=mean,
variance=variance,
scale=scale,
offset=offset,
eps=eps,
training=training,
momentum=momentum,
data_format=data_format,
)
@handle_test(
fn_tree="functional.ivy.experimental.l1_normalize",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"), valid_axis=True
),
)
def test_l1_normalize(*, dtype_values_axis, test_flags, backend_fw, fn_name, on_device):
x_dtype, x, axis = dtype_values_axis
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
input_dtypes=x_dtype,
x=x,
axis=axis,
)
# local_response_norm
@handle_test(
fn_tree="functional.ivy.experimental.local_response_norm",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=4,
max_num_dims=4,
min_dim_size=1,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
),
size=st.integers(min_value=1, max_value=10),
bias=st.floats(min_value=0.1, max_value=1.5),
alpha=st.floats(min_value=1e-4, max_value=1.2),
beta=st.floats(min_value=0.1, max_value=1.5),
average=st.booleans(),
data_format=st.sampled_from(["NHWC", "NCHW"]),
test_with_out=st.just(False),
test_instance_method=st.just(False),
container_flags=st.just([False]),
test_gradients=st.just(False),
)
def test_local_response_norm(
*,
dtype_and_x,
size,
bias,
alpha,
beta,
average,
data_format,
test_flags,
fn_name,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
input=x[0],
size=size,
bias=bias,
alpha=alpha,
beta=beta,
average=average,
data_format=data_format,
)
| ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_norms.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_norms.py",
"repo_id": "ivy",
"token_count": 4702
} | 66 |
import ivy
import numpy as np
import pytest
@pytest.mark.parametrize(
("weights", "factors", "projections", "true_res"),
[
(
(2, 3),
[[[1, 1], [1, 0]], [[2, 1], [1, 2]], [[1, 1], [1, 0], [1, 0]]],
[[[1, 0], [0, 1]], [[1, 0], [0, 0], [0, -1]]],
[[[7, 4, 4], [8, 2, 2]], [[4, 4, 4], [0, 0, 0], [-2, -2, -2]]],
)
],
)
def test_apply_parafac2_projections(weights, factors, projections, true_res):
weights = ivy.array(weights)
factors = [ivy.array(f) for f in factors]
projections = [ivy.array(p) for p in projections]
true_res = [ivy.array(t) for t in true_res]
new_weights, projected_factors = ivy.Parafac2Tensor.apply_parafac2_projections(
(weights, factors, projections)
)
np.allclose(new_weights, weights)
for i, Bi in enumerate(projected_factors[1]):
np.allclose(ivy.dot(projections[i], factors[1]), Bi)
@pytest.mark.parametrize(
("shape", "rank"),
[
(
[(4, 5)] * 3,
2,
)
],
)
def test_parafac2_normalise(shape, rank):
parafac2_tensor = ivy.random_parafac2(shape, rank)
normalised_parafac2_tensor = ivy.Parafac2Tensor.parafac2_normalise(
parafac2_tensor
) # , copy=copy)
expected_norm = ivy.ones((rank,))
for f in normalised_parafac2_tensor[1]:
norm = ivy.sqrt(ivy.sum(ivy.square(f), axis=0))
assert np.allclose(norm, expected_norm)
assert np.allclose(
ivy.Parafac2Tensor.parafac2_to_tensor(parafac2_tensor),
ivy.Parafac2Tensor.parafac2_to_tensor(normalised_parafac2_tensor),
)
@pytest.mark.parametrize(
("weights", "factors", "projections", "true_res"),
[
(
(2, 3),
[[[1, 1], [1, 0]], [[2, 1], [1, 2]], [[1, 1], [1, 0], [1, 0]]],
[[[1, 0], [0, 1]], [[1, 0], [0, 0], [0, -1]]],
[[[7, 4, 4], [8, 2, 2]], [[4, 4, 4], [0, 0, 0], [-2, -2, -2]]],
)
],
)
def test_parafac2_to_slices(weights, factors, projections, true_res):
weights = ivy.array(weights)
factors = [ivy.array(f) for f in factors]
projections = [ivy.array(p) for p in projections]
true_res = [ivy.array(t) for t in true_res]
for i, true_slice in enumerate(true_res):
assert np.allclose(
ivy.Parafac2Tensor.parafac2_to_slice((weights, factors, projections), i),
true_slice,
)
for true_slice, est_slice in zip(
true_res, ivy.Parafac2Tensor.parafac2_to_slices((weights, factors, projections))
):
np.allclose(true_slice, est_slice)
@pytest.mark.parametrize(
("weights", "factors", "projections", "true_res"),
[
(
(2, 3),
[[[1, 1], [1, 0]], [[2, 1], [1, 2]], [[1, 1], [1, 0], [1, 0]]],
[[[0, 0], [1, 0], [0, 1]], [[1, 0], [0, 0], [0, -1]]],
[[[0, 0, 0], [7, 4, 4], [8, 2, 2]], [[4, 4, 4], [0, 0, 0], [-2, -2, -2]]],
)
],
)
def test_parafac2_to_tensor(weights, factors, projections, true_res):
weights = ivy.array(weights)
factors = [ivy.array(f) for f in factors]
projections = [ivy.array(p) for p in projections]
true_res = ivy.array(true_res)
res = ivy.Parafac2Tensor.parafac2_to_tensor((weights, factors, projections))
assert np.allclose(res, true_res)
@pytest.mark.parametrize(
("shape", "rank"),
[
(
[(4, 5)] * 3,
2,
)
],
)
def test_parafac2_to_unfolded(shape, rank):
pf2_tensor = ivy.random_parafac2(shape, rank)
full_tensor = ivy.Parafac2Tensor.parafac2_to_tensor(pf2_tensor)
for mode in range(ivy.get_num_dims(full_tensor)):
assert np.allclose(
ivy.Parafac2Tensor.parafac2_to_unfolded(pf2_tensor, mode),
ivy.unfold(full_tensor, mode),
)
@pytest.mark.parametrize(
("shape", "rank"),
[
(
[(4, 5)] * 3,
2,
)
],
)
def test_parafac2_to_vec(shape, rank):
pf2_tensor = ivy.random_parafac2(shape, rank)
full_tensor = ivy.Parafac2Tensor.parafac2_to_tensor(pf2_tensor)
np.allclose(
ivy.Parafac2Tensor.parafac2_to_vec(pf2_tensor),
ivy.reshape(full_tensor, (-1)),
)
@pytest.mark.parametrize(
("true_shape", "true_rank"),
[
(
[(4, 5)] * 3,
2,
)
],
)
def test_validate_parafac2_tensor(true_shape, true_rank):
weights, factors, projections = ivy.random_parafac2(true_shape, true_rank)
# Check shape and rank returned
shape, rank = ivy.Parafac2Tensor.validate_parafac2_tensor(
(weights, factors, projections)
)
np.testing.assert_equal(
true_shape,
shape,
err_msg=f"Returned incorrect shape (got {shape}, expected {true_shape})",
)
np.testing.assert_equal(
true_rank,
rank,
err_msg=f"Returned incorrect rank (got {rank}, expected {true_rank})",
)
# One of the factors has the wrong rank
for mode in range(3):
false_shape = (ivy.shape(factors[mode])[0], true_rank + 1)
factors[mode], copy = ivy.random_uniform(shape=false_shape), factors[mode]
with np.testing.assert_raises(ValueError):
ivy.Parafac2Tensor.validate_parafac2_tensor((weights, factors, projections))
factors[mode] = copy
# Not three factor matrices
with np.testing.assert_raises(ValueError):
ivy.Parafac2Tensor.validate_parafac2_tensor((weights, factors[1:], projections))
# Not enough projections
with np.testing.assert_raises(ValueError):
ivy.Parafac2Tensor.validate_parafac2_tensor((weights, factors, projections[1:]))
# Wrong number of weights
with np.testing.assert_raises(ValueError):
ivy.Parafac2Tensor.validate_parafac2_tensor((weights[1:], factors, projections))
# The projections aren't orthogonal
false_projections = [ivy.random_uniform(shape=ivy.shape(P)) for P in projections]
with np.testing.assert_raises(ValueError):
ivy.Parafac2Tensor.validate_parafac2_tensor(
(weights, factors, false_projections)
)
| ivy/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_parafac2_tensor.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_parafac2_tensor.py",
"repo_id": "ivy",
"token_count": 3028
} | 67 |
"""Collection of tests for losses."""
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_method
# Binary Cross Entropy Loss
@handle_method(
method_tree="stateful.losses.BinaryCrossEntropyLoss.__call__",
dtype_and_true=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_value=1e-04,
max_value=1,
allow_inf=False,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
shape=(5,),
),
dtype_and_pred=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=1e-04,
max_value=1,
allow_inf=False,
exclude_min=True,
exclude_max=True,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
shape=(5,),
),
dtype_and_pos=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=1e-04,
max_value=1,
allow_inf=False,
exclude_min=True,
exclude_max=True,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
shape=(5,),
),
reduction=st.sampled_from(["none", "sum", "mean"]),
axis=helpers.ints(min_value=-1, max_value=0),
epsilon=helpers.floats(min_value=0, max_value=1.0),
from_logits=st.booleans(),
method_num_positional_args=helpers.num_positional_args(
fn_name="BinaryCrossEntropyLoss._forward"
),
)
def test_binary_cross_entropy_loss(
*,
dtype_and_true,
dtype_and_pred,
dtype_and_pos,
backend_fw,
from_logits,
reduction,
axis,
epsilon,
class_name,
method_name,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
dtype_true, true = dtype_and_true
dtype_pred, pred = dtype_and_pred
dtype_pos_weight, pos_weight = dtype_and_pos
if from_logits:
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
method_input_dtypes=dtype_true + dtype_pred + dtype_pos_weight,
init_all_as_kwargs_np={
"from_logits": from_logits,
"epsilon": epsilon,
"reduction": reduction,
},
method_all_as_kwargs_np={
"true": true[0],
"pred": pred[0],
"pos_weight": pos_weight[0],
"axis": axis,
},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
on_device=on_device,
)
else:
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
method_input_dtypes=dtype_true + dtype_pred,
init_all_as_kwargs_np={
"from_logits": from_logits,
"epsilon": epsilon,
"reduction": reduction,
},
method_all_as_kwargs_np={
"true": true[0],
"pred": pred[0],
"axis": axis,
},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
on_device=on_device,
)
# Cross Entropy Loss
@handle_method(
method_tree="stateful.losses.CrossEntropyLoss.__call__",
dtype_and_targets=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=3,
allow_inf=False,
min_num_dims=1,
max_num_dims=3,
min_dim_size=3,
),
dtype_and_log_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
small_abs_safety_factor=4,
safety_factor_scale="log",
min_value=0,
max_value=3,
allow_inf=False,
exclude_min=True,
exclude_max=True,
min_num_dims=1,
max_num_dims=3,
min_dim_size=3,
),
axis=st.integers(min_value=-1, max_value=1),
method_num_positional_args=helpers.num_positional_args(
fn_name="CrossEntropyLoss._forward"
),
reduction=st.sampled_from(["none", "mean", "sum"]),
)
def test_cross_entropy_loss(
*,
dtype_and_targets,
dtype_and_log_input,
axis,
reduction,
class_name,
backend_fw,
method_name,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
targets_dtype, targets = dtype_and_targets
log_input_dtype, log_input = dtype_and_log_input
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
method_input_dtypes=targets_dtype + log_input_dtype,
init_all_as_kwargs_np={
"axis": axis,
"reduction": reduction,
},
method_all_as_kwargs_np={"true": targets[0], "pred": log_input[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
on_device=on_device,
)
# Log Poisson Loss
@handle_method(
method_tree="stateful.losses.LogPoissonLoss.__call__",
dtype_and_targets=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=3,
allow_inf=False,
min_num_dims=1,
max_num_dims=3,
min_dim_size=3,
),
dtype_and_log_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
small_abs_safety_factor=4,
safety_factor_scale="log",
min_value=0,
max_value=3,
allow_inf=False,
exclude_min=True,
exclude_max=True,
min_num_dims=1,
max_num_dims=3,
min_dim_size=3,
),
axis=st.integers(min_value=-1, max_value=1),
compute_full_loss=st.sampled_from([True, False]),
method_num_positional_args=helpers.num_positional_args(
fn_name="LogPoissonLoss._forward"
),
reduction=st.sampled_from(["none", "mean", "sum"]),
)
def test_log_poisson_loss(
*,
dtype_and_targets,
dtype_and_log_input,
compute_full_loss,
axis,
reduction,
class_name,
backend_fw,
method_name,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
targets_dtype, targets = dtype_and_targets
log_input_dtype, log_input = dtype_and_log_input
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
method_input_dtypes=targets_dtype + log_input_dtype,
init_all_as_kwargs_np={
"compute_full_loss": compute_full_loss,
"axis": axis,
"reduction": reduction,
},
method_all_as_kwargs_np={"true": targets[0], "pred": log_input[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
on_device=on_device,
)
| ivy/ivy_tests/test_ivy/test_stateful/test_losses.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_stateful/test_losses.py",
"repo_id": "ivy",
"token_count": 3860
} | 68 |
import astunparse
import ast
import json
import sys
import subprocess
import os
import logging
from shared import BackendNativeObject
_backend_reference = "tensorflow"
_backend_import_alias = "tf"
_target_backend = ""
_config = None
_not_imlpemented_exc_name = "NotImplementedError"
_decorator_black_list = [
"with_unsupported_dtypes",
"with_supported_dtypes",
"with_unsupported_devices",
"with_supported_devices",
"with_unsupported_device_and_dtypes",
"with_supported_device_and_dtypes",
]
type_mapping = {}
class ReferenceDataGetter(ast.NodeVisitor):
def __init__(self):
self.natives = {}
self.framework_imports = []
def visit_ImportFrom(self, node: ast.ImportFrom):
if node.module is not None and node.module.startswith(_backend_reference):
for name in node.names:
self.framework_imports.append(
BackendNativeObject(name=name.name, namespace=node.module)
)
def visit_Assign(self, node: ast.Assign):
name = node.targets[0].id.lower()
if name.startswith("native"):
# [:-1] to ignore \n from unparser
unparsed_value = astunparse.unparse(node.value)[:-1]
if unparsed_value in ["int", "float", "bool", "str"]:
return
if unparsed_value in self.natives.keys():
self.natives[node.targets[0].id] = self.natives[unparsed_value]
else:
self.natives[node.targets[0].id] = unparsed_value
class SourceTransformer(ast.NodeTransformer):
def __init__(self, type_map, keep_private=False):
self.keep_private = keep_private
self.type_map = type_map
self.registered_imports = set()
def _get_full_name(self, node):
return astunparse.unparse(node)
def visit_Import(self, node: ast.Import):
# Remove reference backend imports
if node.names[0].name == _backend_reference:
self.generic_visit(node)
return
self.generic_visit(node)
return node
def visit_Name(self, node: ast.Name):
try:
old_id = node.id
node.id = self.type_map[node.id].full_name()
except KeyError:
pass
else:
namespace = self.type_map[old_id].namespace
if namespace != "":
self.registered_imports.add(namespace)
self.generic_visit(node)
return node
def visit_Attribute(self, node: ast.Attribute):
str_repr = self._get_full_name(node).strip()
str_repr_without_package = str_repr.partition(".")[-1]
if str_repr in self.type_map.keys():
new_node = ast.parse(self.type_map[str_repr].full_name())
node = new_node.body[0].value
namespace = self.type_map[str_repr].namespace
if namespace != "":
self.registered_imports.add(namespace)
elif str_repr_without_package in self.type_map.keys():
new_node = ast.parse(self.type_map[str_repr_without_package].full_name())
node = new_node.body[0].value
namespace = self.type_map[str_repr_without_package].namespace
if namespace != "":
self.registered_imports.add(namespace)
self.generic_visit(node)
return node
def visit_Assign(self, node: ast.Assign):
if not self.keep_private:
for name in node.targets:
if name.id.startswith("_") and not name.id.endswith("__"):
return None
self.generic_visit(node)
return node
def visit_FunctionDef(self, node: ast.FunctionDef):
# Remove private functions
if (
not self.keep_private
and node.name.startswith("_")
and not node.name.endswith("__")
):
self.generic_visit(node)
return None
# Replace function body with Pass
node.body = [
ast.Raise(
exc=ast.Call(
func=ast.Name(id=_not_imlpemented_exc_name, ctx=ast.Load()),
args=[
ast.Constant(
value=f"{_target_backend}.{node.name} Not Implemented",
kind=None,
)
],
keywords=[],
),
cause=None,
)
]
# Update decorators not to include ones in the blacklist
# Add Not Implemented decorator
new_list = []
for entry in node.decorator_list:
if isinstance(entry, ast.Call):
name_of_decorator = entry.func.id
else:
name_of_decorator = entry.id
if name_of_decorator in _decorator_black_list:
continue
new_list.append(entry)
node.decorator_list = new_list
self.generic_visit(node)
return node
class InitFileTransformer(ast.NodeTransformer):
def __init__(self, variables_to_update: dict):
self.variables = variables_to_update
def visit_Assign(self, node: ast.Assign):
target_str = astunparse.unparse(node.targets[0])[:-1]
if target_str in self.variables:
node.value = ast.parse(self.variables[target_str]).body[0].value
self.generic_visit(node)
return node
# Modify the AST tree
def _parse_module(tree: ast.Module, keep_private=False) -> ast.Module:
transformer = SourceTransformer(type_mapping, keep_private=keep_private)
transformer.visit(tree)
for obj in transformer.registered_imports:
import_node = ast.Import(names=[ast.alias(name=obj, asname=None)])
tree.body.insert(0, import_node)
# Add target backend import, add type hints classes imports
ast.fix_missing_locations(tree)
return tree
def _copy_tree(backend_reference_path: str, backend_generation_path: str):
for root, _, files in os.walk(backend_reference_path):
# Skip pycache dirs
if root.endswith("__pycache__"):
continue
relative_path = os.path.relpath(root, backend_reference_path)
# Make backend dirs
os.makedirs(os.path.join(backend_generation_path, relative_path), exist_ok=True)
for name in files:
# Skip pycache modules
if name.endswith("pyc"):
continue
with open(os.path.join(root, name)) as ref_file:
# Read source file from reference backend
ref_str = ref_file.read()
ref_tree = ast.parse(ref_str)
try:
tree_to_write = _parse_module(ref_tree)
except Exception as e:
print(f"Failed to parse {os.path.join(root, name)}, {e}")
# Create target backend
with open(
os.path.join(backend_generation_path, relative_path, name), "w"
) as generated_file:
generated_file.write(astunparse.unparse(tree_to_write))
def _create_type_mapping(config: dict, reference_backend_init_path: str):
# print pwd for debugging
print(os.getcwd())
print
with open(reference_backend_init_path, "r") as file:
file_src = file.read()
init_tree = ast.parse(file_src)
ast_visitor = ReferenceDataGetter()
ast_visitor.visit(init_tree)
del ast_visitor.natives["native_inplace_support"]
mapping = {}
for key, value in ast_visitor.natives.items():
if key not in config.keys():
logging.warning(f"type {key} found in reference backend but not in config.")
continue
obj = config[key]
mapping[value] = BackendNativeObject(
name=obj["name"], namespace=obj["namespace"]
)
global type_mapping
type_mapping = mapping
def generate(config_file):
global _config
with open(config_file, "r") as file:
_config = json.load(file)
global _target_backend
_target_backend = _config["name"]
backends_root = "ivy/functional/backends/"
backend_reference_path = backends_root + _backend_reference
backend_generation_path = backends_root + _target_backend
_create_type_mapping(_config, f"{backend_reference_path}/__init__.py")
# Copy and generate backend tree
_copy_tree(backend_reference_path, backend_generation_path)
with open(os.path.join(backend_reference_path, "__init__.py")) as ref_file:
# Read source file from reference backend
ref_str = ref_file.read()
ref_tree = ast.parse(ref_str)
try:
tree_to_write = _parse_module(ref_tree, keep_private=True)
params = {
"valid_devices": f"({_config['valid_devices']},)",
"invalid_devices": f"({_config['invalid_devices']},)",
"backend": f'"{_config["name"]}"',
"supports_gradients": _config["supports_gradients"].__str__(),
"native_inplace_support": _config["native_inplace_support"].__str__(),
}
valids = [
"valid_dtypes",
"valid_numeric_dtypes",
"valid_float_dtypes",
"valid_complex_dtypes",
"valid_int_dtypes",
"valid_uint_dtypes",
]
for key in valids:
params[f"{key}_dict"] = {
"None": tuple(f"ivy.{x}" for x in _config[key])
}.__str__()
params[f"in{key}_dict"] = {
"None": tuple(f"ivy.{x}" for x in _config[f"in{key}"])
}.__str__()
InitFileTransformer(params).visit(tree_to_write)
except Exception as e:
print(
"Failed to parse "
f"{os.path.join(backend_generation_path, '__init__.py')}, {e}"
)
# Create target backend
with open(
os.path.join(backend_generation_path, "__init__.py"), "w"
) as generated_file:
generated_file.write(astunparse.unparse(tree_to_write))
subprocess.run(["black", "-q", backend_generation_path])
subprocess.run(
[
"autoflake",
"-i",
"--remove-all-unused-imports",
"--ignore-init-module-imports",
"--quiet",
"-r",
backend_generation_path,
]
)
if __name__ == "__main__":
# Allow to call directly using config path
generate(sys.argv[1])
| ivy/scripts/backend_generation/tree_generation.py/0 | {
"file_path": "ivy/scripts/backend_generation/tree_generation.py",
"repo_id": "ivy",
"token_count": 4940
} | 69 |
# Run Tests
import os
import sys
from pymongo import MongoClient
from pymongo.errors import WriteError
import json
import old_run_test_helpers as old_helpers
from helpers import (
get_latest_package_version,
get_submodule_and_function_name,
)
from get_all_tests import BACKENDS
if __name__ == "__main__":
redis_url = sys.argv[1]
redis_pass = sys.argv[2]
mongo_key = sys.argv[3]
version_flag = sys.argv[4]
gpu_flag = sys.argv[5]
workflow_id = sys.argv[6]
priority_flag = sys.argv[7]
tracer_flag = sys.argv[8]
if len(sys.argv) > 9 and sys.argv[9] != "null":
run_id = sys.argv[9]
else:
run_id = f"https://github.com/unifyai/ivy/actions/runs/{workflow_id}"
device = "cpu"
if gpu_flag == "true":
device = "gpu"
tracer_str = ""
if tracer_flag == "true":
tracer_flag = "tracer_"
tracer_str = " --with-trace-testing"
else:
tracer_flag = ""
cluster = MongoClient(
f"mongodb+srv://deep-ivy:{mongo_key}@cluster0.qdvf8q3.mongodb.net/?retryWrites=true&w=majority" # noqa
)
db = cluster["ci_dashboard"]
# old
if priority_flag == "true":
priority_flag = True
else:
priority_flag = False
failed = False
old_db = cluster["Ivy_tests_multi_gpu"]
old_db_priority = cluster["Ivy_tests_priority"]
# pull gpu image for gpu testing
if device == "gpu":
os.system("docker pull unifyai/ivy:latest-gpu")
# read the tests to be run
with open("tests_to_run", "r") as f:
for line in f:
print(f"\n{'*' * 100}")
print(f"{line[:-1]}")
print(f"{'*' * 100}\n")
# get the test, submodule, backend and version
test_path, backend = line.strip().split(",")
is_frontend_test = "test_frontends" in test_path
collection = db["frontend_tests"] if is_frontend_test else db["ivy_tests"]
submodule, function_name = get_submodule_and_function_name(
test_path, is_frontend_test
)
version = get_latest_package_version(backend).replace(".", "_")
# old
coll, submod, test_fn = old_helpers.get_submodule(test_path)
backend_version = "latest-stable"
# multi-version tests
if version_flag == "true":
backends = [backend.strip()]
backend_name, backend_version = backend.split("/")
other_backends = [
fw for fw in BACKENDS if (fw not in (backend_name, "paddle"))
]
for other_backend in other_backends:
backends.append(
other_backend + "/" + get_latest_package_version(other_backend)
)
print("Backends:", backends)
os.system(
'docker run --name test-container -v "$(pwd)":/ivy/ivy '
f"-e REDIS_URL={redis_url} -e REDIS_PASSWD={redis_pass} "
"-itd unifyai/multiversion:latest /bin/bash -c"
f'python multiversion_framework_directory.py {" ".join(backends)};'
)
os.system(
"docker exec test-container cd ivy; python3 -m pytest --tb=short "
f"{test_path} --backend={backend.strip()}"
)
backend = backend.split("/")[0] + "\n"
backend_version = backend_version.strip()
else:
device_str = ""
device_access_str = ""
image = "unifyai/ivy:latest"
# gpu tests
if device == "gpu":
image = "unifyai/ivy:latest-gpu"
device_str = " --device=gpu:0"
device_access_str = " --gpus all"
os.system("docker pull unifyai/ivy:latest-gpu")
os.system(
f"docker run{device_access_str} --name test-container -v "
'"$(pwd)":/ivy -v "$(pwd)"/.hypothesis:/.hypothesis -e '
f"REDIS_URL={redis_url} -e REDIS_PASSWD={redis_pass} -itd {image}"
)
command = (
"docker exec test-container python3 -m pytest --tb=short"
f" {test_path}{device_str} --backend {backend}{tracer_str}"
)
os.system(command)
# run the test
sys.stdout.flush()
failed = bool(os.system(command))
# old (populate the old database with results)
if not failed:
res = old_helpers.make_clickable(
run_id, old_helpers.result_config["success"]
)
else:
res = old_helpers.make_clickable(
run_id, old_helpers.result_config["failure"]
)
failed = True
frontend_version = None
if coll[0] in ["numpy", "jax", "tensorflow", "torch", "paddle"]:
frontend_version = "latest-stable"
try:
if priority_flag:
print("Updating Priority DB")
old_helpers.update_individual_test_results(
old_db_priority[coll[0]],
coll[1],
submod,
backend,
test_fn,
res,
"latest-stable",
frontend_version,
device,
)
else:
print(backend_version)
old_helpers.update_individual_test_results(
old_db[coll[0]],
coll[1],
submod,
backend,
test_fn,
res,
backend_version,
frontend_version,
device,
)
except WriteError:
print("Old DB Write Error")
# skip updating db for instance methods as of now
# run transpilation tests if the test passed
if not failed and function_name:
print(f"\n{'*' * 100}")
print(f"{line[:-1]} --> transpilation tests")
print(f"{'*' * 100}\n")
command = f"{command} --num-examples 5 --with-transpile"
sys.stdout.flush()
os.system(command)
os.system(
"docker cp test-container:/ivy/report.json"
f" {__file__[: __file__.rfind(os.sep)]}/report.json"
)
# load data from report if generated
report_path = os.path.join(
__file__[: __file__.rfind(os.sep)], "report.json"
)
report_content = {}
if os.path.exists(report_path):
report_content = json.load(open(report_path))
# create a prefix str for the update query for frontend tests
# (with frontend version)
test_info = {}
prefix_str = ""
if is_frontend_test:
frontend = test_path[test_path.find("test_frontends") :].split(os.sep)[
1
][5:]
frontend_version = get_latest_package_version(frontend).replace(
".", "_"
)
test_info["frontend"] = frontend
prefix_str = f"{frontend_version}."
# initialize test information for ci_dashboard db
# format of the last 2 keys
# <frontend_version>.<backend_name>.<backend_version>.<status>.<device>
# <backend_name>.<backend_version>.<status>.<device>
# for frontend tests and ivy tests respectively
test_info = {
"_id": function_name,
"test_path": test_path,
"submodule": submodule,
f"{prefix_str}{backend}.{version}.{tracer_flag}status.{device}": (
not failed
),
f"{prefix_str}{backend}.{version}.{tracer_flag}workflow.{device}": (
run_id
),
}
# add transpilation metrics if report generated
if not failed and report_content and not tracer_flag:
if is_frontend_test:
test_info = {
**test_info,
"fw_time": report_content["fw_time"],
"ivy_nodes": report_content["ivy_nodes"],
}
transpilation_metrics = {
"nodes": report_content["nodes"][backend],
"time": report_content["time"][backend],
"args": report_content["args"][backend],
"kwargs": report_content["kwargs"][backend],
}
for metric, value in transpilation_metrics.items():
test_info[f"{prefix_str}{backend}.{version}.{metric}"] = value
# populate the ci_dashboard db, skip instance methods
if function_name:
id = test_info.pop("_id")
print(
collection.update_one({"_id": id}, {"$set": test_info}, upsert=True)
)
# delete the container
os.system("docker rm -f test-container")
# if any tests fail, the workflow fails
if failed:
sys.exit(1)
| ivy/scripts/run_tests/run_tests.py/0 | {
"file_path": "ivy/scripts/run_tests/run_tests.py",
"repo_id": "ivy",
"token_count": 5402
} | 70 |
#!/bin/bash -e
git checkout "$1"
git remote add upstream https://github.com/unifyai/ivy.git || true
git fetch upstream
git merge upstream/main --no-edit
git push
| ivy/scripts/shell/merge_with_upstream.sh/0 | {
"file_path": "ivy/scripts/shell/merge_with_upstream.sh",
"repo_id": "ivy",
"token_count": 52
} | 71 |
{
"ivy": {
"functional": ["negative.so",
"bitwise_xor.so",
"vander.so",
"std.so",
"atanh.so",
"argmin.so",
"asinh.so",
"squeeze.so",
"square.so",
"matrix_norm.so",
"not_equal.so",
"log.so",
"expand_dims.so",
"divide.so",
"min.so",
"unique_counts.so",
"vector_norm.so",
"matrix_rank.so",
"equal.so",
"expm1.so",
"sigmoid.so",
"adam_update.so",
"cumsum.so",
"lars_update.so",
"isinf.so",
"pinv.so",
"deg2rad.so",
"var.so",
"pow.so",
"random_uniform.so",
"trapz.so",
"adam_step.so",
"tile.so",
"tan.so",
"sparse_cross_entropy.so",
"det.so",
"round.so",
"acos.so",
"matrix_power.so",
"while_loop.so",
"cross.so",
"trunc.so",
"jac.so",
"sqrt.so",
"bitwise_left_shift.so",
"atan.so",
"clip.so",
"conv2d_transpose.so",
"exp2.so",
"less.so",
"conv2d.so",
"einsum.so",
"searchsorted.so",
"floor.so",
"cross_entropy.so",
"seed.so",
"scaled_dot_product_attention.so",
"bitwise_and.so",
"logaddexp2.so",
"optimizer_update.so",
"mish.so",
"mean.so",
"argsort.so",
"eigh.so",
"svd.so",
"cumprod.so",
"eigvalsh.so",
"asin.so",
"random_normal.so",
"try_except.so",
"split.so",
"log_softmax.so",
"nan_to_num.so",
"cmp_isnot.so",
"matrix_transpose.so",
"diag.so",
"remainder.so",
"sinh.so",
"bitwise_or.so",
"softplus.so",
"flip.so",
"conv_general_transpose.so",
"shuffle.so",
"roi_align.so",
"log1p.so",
"tensordot.so",
"zero_pad.so",
"logical_xor.so",
"inv.so",
"softmax.so",
"greater.so",
"logical_not.so",
"conv1d.so",
"vecdot.so",
"multi_head_attention.so",
"diagonal.so",
"isnan.so",
"inner.so",
"bitwise_invert.so",
"slogdet.so",
"tensorsolve.so",
"value_and_grad.so",
"depthwise_conv2d.so",
"trunc_divide.so",
"erf.so",
"svdvals.so",
"reshape.so",
"constant_pad.so",
"unique_all.so",
"qr.so",
"isfinite.so",
"logical_and.so",
"if_else.so",
"nonzero.so",
"tanh.so",
"conv.so",
"add.so",
"subtract.so",
"argmax.so",
"maximum.so",
"real.so",
"msort.so",
"fmin.so",
"abs.so",
"lstm_update.so",
"permute_dims.so",
"lamb_update.so",
"swapaxes.so",
"cosh.so",
"log10.so",
"bitwise_right_shift.so",
"for_loop.so",
"imag.so",
"dropout.so",
"where.so",
"roll.so",
"leaky_relu.so",
"fmod.so",
"randint.so",
"logical_or.so",
"relu.so",
"binary_cross_entropy.so",
"unique_values.so",
"linear.so",
"sin.so",
"vector_to_skew_symmetric_matrix.so",
"closest_valid_dtype.so",
"atan2.so",
"stack.so",
"max.so",
"sign.so",
"exp.so",
"cholesky.so",
"ceil.so",
"cmp_is.so",
"repeat.so",
"gelu.so",
"reciprocal.so",
"unstack.so",
"conv1d_transpose.so",
"less_equal.so",
"stop_gradient.so",
"angle.so",
"matmul.so",
"cos.so",
"execute_with_gradients.so",
"gradient_descent_update.so",
"softsign.so",
"unique_inverse.so",
"solve.so",
"sum.so",
"argwhere.so",
"greater_equal.so",
"outer.so",
"rad2deg.so",
"floor_divide.so",
"conv_general_dilated.so",
"logaddexp.so",
"concat.so",
"positive.so",
"minimum.so",
"log2.so",
"lcm.so",
"acosh.so",
"conv3d_transpose.so",
"multinomial.so",
"lu_factor.so",
"layer_norm.so",
"eig.so",
"conv3d.so",
"sort.so",
"isreal.so",
"multiply.so",
"gcd.so",
"grad.so",
"prod.so"]
}
}
| ivy/wrappers.json/0 | {
"file_path": "ivy/wrappers.json",
"repo_id": "ivy",
"token_count": 2925
} | 72 |
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="Remote Python 3.10.0 Docker (unifyai/ivy:latest)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="NUMPY" />
<option name="myDocStringFormat" value="NumPy" />
</component>
<component name="TestRunnerService">
<option name="PROJECT_TEST_RUNNER" value="py.test" />
</component>
</module>
| ivy/.idea/ivy.iml/0 | {
"file_path": "ivy/.idea/ivy.iml",
"repo_id": "ivy",
"token_count": 224
} | 0 |
## Frontend Task Checklist
#### IMPORTANT NOTICE 🚨:
The [Ivy Docs](https://unify.ai/docs/ivy/) represent the ground truth for the task descriptions and this checklist should only be used as a supplementary item to aid with the review process.
Please note that the contributor is not expected to understand everything in the checklist. It's mainly here for the reviewer to make sure everything has been done correctly 🙂
#### LEGEND 🗺:
- ❌ : Check item is not completed.
- ✅ : Check item is ready for review.
- 🆘 : Stuck/Doubting implementation (PR author should add comments explaining why).
- ⏩ : Check is not applicable to function (skip).
- 🆗 : Check item is implemented and does not require any edits.
#### CHECKS 📑:
1. - [ ] ❌: The function/method definition is not missing any of the original arguments.
2. - [ ] ❌: In case the function/method to be implemented is an alias of an existing function/method:
1. - [ ] ❌: It is being declared as such by setting `fun1 = fun2`, rather than being re-implemented from scratch.
2. - [ ] ❌: The alias is added to the existing function/method's test in the `aliases` parameter of `handle_frontend_test`/`handle_frontend_method`.
3. - [ ] ❌: The naming of the function/method and its arguments exactly matches the original.
4. - [ ] ❌: No defined argument is being ignored in the function/method's implementation.
5. - [ ] ❌: In special cases where an argument's implementation should be pending due to an incomplete superset of an ivy function:
1. - [ ] ❌: A descriptive comment has been left under the `Implement superset behavior` ToDo list in https://github.com/unifyai/ivy/issues/6406.
2. - [ ] ❌: A ToDo comment has been added prompting to pass the frontend argument to the ivy function whose behavior is to be extended.
6. - [ ] ❌: In case a frontend function is being added:
1. - [ ] ❌: It is a composition of ivy functions.
2. - [ ] ❌: In case the needed composition is long (using numerous ivy functions), a `Missing Function Suggestion` issue has been opened to suggest a new ivy function should be added to shorten the frontend implementation.
3. - [ ] ❌: `@to_ivy_arrays_and_back` has been added to the function.
7. - [ ] ❌: In case a frontend method is being added:
1. - [ ] ❌: It is composed of existing frontend functions or methods.
2. - [ ] ❌: If a required frontend function has not yet been added, the method may be implemented as a composition of ivy functions, making sure that:
- [ ] ❌: `@to_ivy_arrays_and_back` has been added to the method.
- [ ] ❌: A ToDo comment has been made prompting to remove the decorator and update the implementation as soon as the missing function has been added.
8. - [ ] ❌: The function/method's test has been added (except in the alias case mentioned in <2>):
1. - [ ] ❌: All supported arguments are being generated in `handle_frontend_test`/`handle_frontend_method` and passed to `test_frontend_function`/`test_frontend_method`.
2. - [ ] ❌: The argument generation covers all possible supported values. Array sizes, dimensions, and axes adhere to the full supported set of the original function/method.
3. - [ ] ❌: The `available_dtypes` parameter passed to the helper generating the function/method's input array is set to `helpers.get_dtypes("valid")`. If there are unsupported dtypes that cause the test to fail, they should be handled by adding `@with_supported_dtypes`/`@with_unsupported_dtype` to the function/method.
9. - [ ] ❌: The PR is not introducing any test failures.
1. - [ ] ❌: The lint checks are passing.
2. - [ ] ❌: The implemented test is passing for all backends.
10. - [ ] ❌: The PR `closes` a `Sub Task` issue linked to one of the open frontend ToDo lists.
11. - [ ] ❌: The function/method and its test have been added to the correct `.py` files corresponding to the addressed ToDo list.
12. - [ ] ❌: The PR only contains changes relevant to the addressed subtask.
| ivy/automation_tools/checklists/frontend_checklist.md/0 | {
"file_path": "ivy/automation_tools/checklists/frontend_checklist.md",
"repo_id": "ivy",
"token_count": 1209
} | 1 |
#!/bin/bash
docker build -t unifyai/ivy:latest --no-cache -f Dockerfile ..
docker build -t unifyai/ivy:latest-gpu --no-cache -f DockerfileGPU ..
| ivy/docker/rebuild_all_dockerfiles.sh/0 | {
"file_path": "ivy/docker/rebuild_all_dockerfiles.sh",
"repo_id": "ivy",
"token_count": 54
} | 2 |
Contributor Rewards
===================
We award a range of badges, each designed to formally recognize the specific achievements of our contributors in various key areas of ivy's development.
Badges
~~~~~~~
**Debugging Dynamos** - These badges are earned by creating useful issues. If you find a problem that isn't listed as an open task, report it by creating a new issue. Make sure to describe the problem thoroughly. If your issue is confirmed as useful and marked as a "Useful Issue" by our team, you'll receive the badge as recognition for your valuable contribution to improving our codebase.
**Merging Master** - These badges are formal recognitions for contributors whose pull requests consistently meet our standards of quality and are successfully merged into the main codebase.
**Merging Wizard** - These exclusive badges are awarded to contributors who successfully get *priority* pull requests merged. This recognition is for handling complex, high-priority tasks that have a substantial impact on the project's progress and success. Priority pull requests are those that close `"Priority Open" issues <https://github.com/unifyai/ivy/labels/Failing%20Test>`_ found within each framework.
**Ivy Inspectors** - These badges are given in acknowledgment of the essential role played by those who review our pull requests. It honours the commitment to quality and diligence in ensuring that each code merge aligns with our standards.
Each badge comes in four distinct tiers – Initial, Bronze, Silver, and Gold, mirroring your growing expertise and commitment in these areas.
Badge Tiers
~~~~~~~~~~~~
.. list-table::
:widths: 50 25 30 30 30
:header-rows: 1
* - GitHub Badge
- Initial (1 Task)
- Bronze (5 Tasks)
- Silver (15 Tasks)
- Gold (30 Tasks)
* - Debugging Dynamos
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_01-00.png
:width: 110
:alt: Alternative text
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_01-02.png
:width: 110
:alt: Alternative text
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_01-03.png
:width: 110
:alt: Alternative text
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_01-04.png
:width: 110
:alt: Alternative text
* - Merging Master
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_04-00.png
:width: 110
:alt: Alternative text
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_04-02.png
:width: 110
:alt: Alternative text
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_04-03.png
:width: 110
:alt: Alternative text
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_04-04.png
:width: 110
:alt: Alternative text
* - Merging Wizard
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_05-00.png
:width: 110
:alt: Alternative text
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_05-02.png
:width: 110
:alt: Alternative text
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_05-03.png
:width: 110
:alt: Alternative text
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_05-04.png
:width: 110
:alt: Alternative text
* - Ivy Inspectors
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_06-00.png
:width: 110
:alt: Alternative text
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_06-02.png
:width: 110
:alt: Alternative text
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_06-03.png
:width: 110
:alt: Alternative text
- .. image:: https://raw.githubusercontent.com/unifyai/ivy/main/.vaunt/badges/badge_06-04.png
:width: 110
:alt: Alternative text
| ivy/docs/overview/contributing/contributor_rewards.rst/0 | {
"file_path": "ivy/docs/overview/contributing/contributor_rewards.rst",
"repo_id": "ivy",
"token_count": 1581
} | 3 |
Docstring Examples
==================
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`docstring examples thread`: https://discord.com/channels/799879767196958751/1189906990307233822
After writing the general docstrings, the final step is to add helpful examples to the docstrings.
There are eight types of examples, which each need to be added:
**Functional** examples show the function being called like so :code:`ivy.func_name(...)`, and these should be added to the docstring of the function in the Ivy API :func:`ivy.func_name`.
**Array instance method** examples show the method being called like so :code:`x.func_name(...)` on an :class:`ivy.Array` instance, and these should be added to the docstring of the :class:`ivy.Array` instance method :meth:`ivy.Array.func_name`.
**Container instance method** examples show the method being called like so :code:`x.func_name(...)` on an :class:`ivy.Container` instance, and these should be added to the docstring of the :class:`ivy.Container` instance method :meth:`ivy.Container.func_name`.
**Array operator** examples show an operation being performed like so :code:`x + y` with :code:`x` being an :class:`ivy.Array` instance, and these should be added to the docstring of the :class:`ivy.Array` special method :meth:`ivy.Array.__<op_name>__`.
**Array reverse operator** examples show an operation being performed like so :code:`x + y` with :code:`x` being a :code:`Number` and :code:`y` being an :class:`ivy.Array` instance. These should be added to the docstring of the :class:`ivy.Array` reverse special method :meth:`ivy.Array.__r<op_name>__`.
**Container operator** examples show an operation being performed like so :code:`x + y` with :code:`x` being an :class:`ivy.Container` instance, and these should be added to the docstring of the :class:`ivy.Container` special method :meth:`ivy.Container.__<op_name>__`.
**Container reverse operator** examples show an operation being performed like so :code:`x + y` with :code:`x` being a :code:`Number` and :code:`y` being an :class:`ivy.Container` instance. These should be added to the docstring of the :class:`ivy.Container` reverse special method :meth:`ivy.Container.__r<op_name>__`.
The first three example types are very common, while the last four, unsurprisingly, are only relevant for *operator* functions such as :func:`ivy.add`, :func:`ivy.subtract`, :func:`ivy.multiply` and :func:`ivy.divide`.
For example, calling any of (:code:`+`, :code:`-`, :code:`*`, :code:`/` etc.) on the array will result in (:meth:`__add__`, :meth:`__sub__`, :meth:`__mul__`, :meth:`__truediv__` etc.) being called on the array class.
**Operator** examples are only relevant for *operator* functions. These are functions which are called when a corresponding operator is applied to an array.
For example, the functions :func:`ivy.add`, :func:`ivy.subtract`, :func:`ivy.multiply` and :func:`ivy.divide` are called when the operators :code:`+`, :code:`-`, :code:`*` and :code:`/` are used respectively.
Under the hood, these operators first call the special methods :meth:`__add__`, :meth:`__sub__`, :meth:`__mul__` and :meth:`__truediv__` respectively, on either the :class:`ivy.Array` or :class:`ivy.Container` instance upon which the operator is being applied.
These special methods in turn call the functions in the Ivy API mentioned above.
**Functional Examples**
To recap, *functional* examples show the function being called like so :code:`ivy.func_name(...)`, and these should be added to the docstring of the function in the Ivy API :func:`ivy.func_name`.
Firstly, we should include *functional* examples with :class:`ivy.Array` instances in the input.
These should:
1. cover all possible variants (explained below) for each of the arguments independently, not combinatorially. This means the number of examples should be equal to the maximum number of variations for a single argument, and not the entire grid of variations across all arguments (further explained in the examples below)
2. vary the values and input shapes considerably between examples
3. start with the simplest examples first. For example, this means using the default values for all optional arguments in the first example, and using small arrays, with a small number of dimensions, and with *simple* values for the function in question
4. show an example with: (a) :code:`out` unused, (b) :code:`out` used to update a new array :code:`y`, and (c) :code:`out` used to inplace update the input array :code:`x` (provided that it shares the same :code:`dtype` and :code:`shape` as the return)
5. If broadcasting is relevant for the function, then show examples which highlight this.
For example, passing in different shapes for two array arguments
For all remaining examples, we can repeat input values from these :class:`ivy.Array` *functional* examples covered by points 1-5.
The purpose of the extra examples with different input types in points 6-18 is to highlight the different contexts in which the function can be called (as an instance method etc.).
The purpose is not to provide an excessive number of variations of possible function inputs.
Next, for *nestable* functions there should be an example that:
6. passes in an :class:`ivy.Container` instance in place of one of the arguments
For *nestable* functions which accept more than one argument, there should also be an example that:
7. passes in :class:`ivy.Container` instances for multiple arguments
In all cases, the containers should have at least two leaves.
For example, the following container is okay to use for example purposes:
.. code-block:: python
x = ivy.Container(a=ivy.array([0.]), b=ivy.array([1.]))
Whereas the following container is not okay to use for example purposes:
.. code-block:: python
x = ivy.Container(a=ivy.array([0.]))
**Array Instance Method Example**
To recap, *array instance method* examples show the method being called like so :code:`x.func_name(...)` on an :class:`ivy.Array` instance, and these should be added to the docstring of the :class:`ivy.Array` instance method :meth:`ivy.Array.func_name`.
These examples are of course only relevant if an instance method for the function exists. If so, this example should simply:
8. call this instance method of the :class:`ivy.Array` class
**Container Instance Method Example**
To recap, *container instance method* examples show the method being called like so :code:`x.func_name(...)` on an :class:`ivy.Container` instance, and these should be added to the docstring of the :class:`ivy.Container` instance method :meth:`ivy.Container.func_name`.
These examples are of course only relevant if an instance method for the function exists.
If so, this example should simply:
9. call this instance method of the :class:`ivy.Container` class
**Array Operator Examples**
To recap, *array operator* examples show an operation being performed like so :code:`x + y` with :code:`x` being an :class:`ivy.Array` instance, and these should be added to the docstring of the :class:`ivy.Array` special method :meth:`ivy.Array.__<op_name>__`.
If the function is an *operator* function, then the *array operator* examples should:
10. call the operator on two :class:`ivy.Array` instances
11. call the operator with an :class:`ivy.Array` instance on the left and :class:`ivy.Container` on the right
**Array Reverse Operator Example**
To recap, *array reverse operator* examples show an operation being performed like so :code:`x + y` with :code:`x` being a :code:`Number` and :code:`y` being an :class:`ivy.Array` instance. These should be added to the docstring of the :class:`ivy.Array` reverse special method :meth:`ivy.Array.__r<op_name>__`.
If the function is an *operator* function, then the *array reverse operator* example should:
12. call the operator with a :code:`Number` on the left and an :class:`ivy.Array` instance on the right
**Container Operator Examples**
To recap, *container operator* examples show an operation being performed like so :code:`x + y` with :code:`x` being an :class:`ivy.Container` instance, and these should be added to the docstring of the :class:`ivy.Container` special method :meth:`ivy.Container.__<op_name>__`.
If the function is an *operator* function, then the *container operator* examples should:
13. call the operator on two :class:`ivy.Container` instances containing :code:`Number` instances at the leaves
14. call the operator on two :class:`ivy.Container` instances containing :class:`ivy.Array` instances at the leaves
15. call the operator with an :class:`ivy.Container` instance on the left and :class:`ivy.Array` on the right
**Container Reverse Operator Example**
To recap, *container reverse operator* examples show an operation being performed like so :code:`x + y` with :code:`x` being a :code:`Number` and :code:`y` being an :class:`ivy.Container` instance.
These should be added to the docstring of the :class:`ivy.Container` reverse special method :meth:`ivy.Container.__r<op_name>__`.
If the function is an *operator* function, then the *array reverse operator* example should:
16. call the operator with a :code:`Number` on the left and an :class:`ivy.Container` instance on the right
**Note**
All docstrings must run without error for all backend frameworks.
If some backends do not support some :code:`dtype` for a function, then we should not include this :code:`dtype` for any of the examples for that particular function in the docstring.
**All Possible Variants**
Point 1 mentions that the examples should cover *all possible variations*.
Let’s look at an example to make it more clear what is meant by *all possible variants* of each argument independently.
Let’s take an imaginary function with the following argument spec:
.. code-block:: python
def my_func(x: array,
mode: Union[std, prod, var],
some_flag: bool,
another_flag: bool = False,
axes: Optional[Union[int, List[int]]]=-1):
In this case, our examples would need to include
* :code:`x` being an :code:`array`
* :code:`mode` being all of: :code:`std`, :code:`prod`, :code:`var`
* :code:`some_flag` being both of: :code:`True`, :code:`False`
* :code:`another_flag` being all of: :code:`default`, :code:`True`, :code:`False`
* :code:`axis` being all of: :code:`default`, :code:`list`, :code:`int`.
Please note, this does not need to be done with a grid search.
There are 1 x 3 x 2 x 3 x 3 = 54 possible variations here, and we do not need an example for each one!
Instead, we only need as many examples as there are variations for the argument with the maximum number of variations, in this case jointly being the :code:`mode`, :code:`another_flag` and :code:`axis` arguments, each with 3 variations.
For example, we could have three examples using the following arguments:
.. code-block:: python
my_func(x0, std, True)
my_func(x1, prod, False, True, [0, 1, 2])
my_func(x2, var, True, False, 1)
It doesn’t matter how the variations are combined for the examples, as long as every variation for every argument is included in the examples.
These three examples procedurally go through the variations from left to right for each argument, but this doesn’t need to be the case if you think other combinations make more sense for the examples.
You can also add more examples if you think some important use cases are missed, this is just a lower limit on the examples that should be included in the docstring!
We'll next go through some examples to make these 18 points more clear.
ivy.tan
-------
**Functional Examples**
The signature for :func:`ivy.tan` is as follows:
.. code-block:: python
def tan(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None
) -> ivy.Array:
Let's start with the functional examples, with :class:`ivy.Array` instances in the input:
.. parsed-literal::
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0, 1, 2])
>>> y = ivy.tan(x)
>>> print(y)
ivy.array([0., 1.5574077, -2.1850398])
>>> x = ivy.array([0.5, -0.7, 2.4])
>>> y = ivy.zeros(3)
>>> ivy.tan(x, out=y)
>>> print(y)
ivy.array([0.5463025, -0.8422884, -0.91601413])
>>> x = ivy.array([[1.1, 2.2, 3.3],
... [-4.4, -5.5, -6.6]])
>>> ivy.tan(x, out=x)
>>> print(x)
ivy.array([[ 1.9647598, -1.3738229, 0.1597457],
[-3.0963247, 0.9955841, -0.3278579]])
These examples cover points 1, 2, 3, 4 and 5.
Please note that in the above case of `x` having multi-line input, it is necessary for each line of the input to be separated by a '...\' so that they can be parsed by the script that tests the examples in the docstrings.
Point 1 is simple to satisfy.
Ignoring the union over :class:`ivy.Array` and :class:`ivy.NativeArray` which is covered by points 6 and 7, and ignoring the *nestable* nature of the function which is covered by points 8 and 9, then as far as point 1 is concerned, the input :code:`x` only has one possible variation.
It must be an array.
Point 2 is satisfied, as the shape and values of the inputs are varied between each of the three examples.
Point 3 is satisfied, there are no optional inputs (aside from :code:`out`) and so this point is irrelevant, and the values and shapes do become increasingly *complex*.
Point 4 is clearly satisfied, as each of the three examples shown above use the :code:`out` argument exactly as explained in point 4.
The return has the same :code:`shape` and :code:`dtype` as the input, making all three examples possible.
Point 5 is not relevant, as there is only one array input, and so broadcasting rules do not apply.
We then also add an example with an :class:`ivy.Container` input, in order to satisfy point 6.
Point 7 is not relevant as there is only one input argument (excluding :code:`out` which does not count, as it essentially acts as an output)
.. parsed-literal::
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = ivy.tan(x)
>>> print(y)
{
a: ivy.array([0., 1.5574077, -2.1850398]),
b: ivy.array([-0.14254655, 1.1578213, -3.380515])
}
**Array Instance Method Example**
We then add an instance method example to :meth:`ivy.Array.tan` in order to satisfy
point 8.
.. code-block:: python
Examples
--------
>>> x = ivy.array([0., 1., 2.])
>>> y = x.tan()
>>> print(y)
ivy.array([0., 1.56, -2.19])
**Container Instance Method Example**
We then add an instance method example to :meth:`ivy.Container.tan` in order to satisfy point 9.
.. code-block:: python
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = x.tan()
>>> print(y)
{
a:ivy.array([0., 1.56, -2.19]),
b:ivy.array([-0.143, 1.16, -3.38])
}
**Array Operator Examples**
Points 10 and 11 are not relevant as :func:`ivy.tan` is not an *operator* function.
**Array Reverse Operator Example**
Point 12 is not relevant as :func:`ivy.tan` is not an *operator* function.
**Container Operator Examples**
Points 13, 14, and 15 are not relevant as :func:`ivy.tan` is not an *operator* function.
**Container Reverse Operator Example**
Point 16 is not relevant as :func:`ivy.tan` is not an *operator* function.
ivy.roll
--------
**Functional Examples**
The signature for :func:`ivy.roll` is as follows:
.. code-block:: python
def roll(
x: Union[ivy.Array, ivy.NativeArray],
/,
shift: Union[int, Sequence[int]],
*,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
Let's start with the functional examples, with :class:`ivy.Array` instances in the input:
.. parsed-literal::
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0., 1., 2.])
>>> y = ivy.roll(x, 1)
>>> print(y)
ivy.array([2., 0., 1.])
>>> x = ivy.array([[0., 1., 2.],
... [3., 4., 5.]])
>>> y = ivy.zeros((2, 3))
>>> ivy.roll(x, 2, -1, out=y)
>>> print(y)
ivy.array([[1., 2., 0.],
[4., 5., 3.]])
>>> x = ivy.array([[[0., 0.], [1., 3.], [2., 6.]],
... [[3., 9.], [4., 12.], [5., 15.]]])
>>> ivy.roll(x, (1, -1), (0, 2), out=x)
>>> print(x)
ivy.array([[[ 9., 3.],
[12., 4.],
[15., 5.]],
[[ 0., 0.],
[ 3., 1.],
[ 6., 2.]]])
These examples cover points 1, 2, 3, 4 and 5.
Again, please note that in the above case of `x` having multi-line input, it is necessary for each line of the input to be separated by a '...\' so that they can be parsed by the script that tests the examples in the docstrings.
Point 1 is a bit less trivial to satisfy than it was for :func:`ivy.tan` above.
While :code:`x` again only has one variation (for the same reason as explained in the :func:`ivy.tan` example above), :code:`shift` has two variations (:code:`int` or sequence of :code:`int`), and :code:`axis` has three variations (:code:`int`, :sequence of :code:`int`, or :code:`None`).
Therefore, we need at least three examples (equal to the maximum number of variations, in this case :code:`axis`), in order to show all variations for each argument.
By going through each of the three examples above, it can be seen that each variation for each argument is demonstrated in at least one of the examples.
Therefore, point 1 is satisfied.
Point 2 is satisfied, as the shape and values of the inputs are varied between each of the three examples.
Point 3 is satisfied, as the first example uses the default values for optional arguments, and the subsequent examples the non-default values in increasingly *complex* examples.
Point 4 is clearly satisfied, as each of the three examples shown above use the :code:`out` argument exactly as explained in point 4.
The return has the same :code:`shape` and :code:`dtype` as the input, making all three examples possible.
Point 5 is not relevant, as there is only one array input, and so broadcasting rules do not apply.
We then also add an example with an :class:`ivy.Container` for one of the inputs, in order to satisfy point 6.
.. parsed-literal::
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> y = ivy.roll(x, 1)
>>> print(y)
{
a: ivy.array([2., 0., 1.]),
b: ivy.array([5., 3., 4.])
}
Unlike :func:`ivy.tan`, point 7 is relevant in this case, as there are three function inputs in total (excluding :code:`out`).
We can therefore add an example with multiple :class:`ivy.Container` inputs, in order to satisfy point 7.
.. parsed-literal::
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> shift = ivy.Container(a=1, b=-1)
>>> y = ivy.roll(x, shift)
>>> print(y)
{
a: ivy.array([2., 0., 1.]),
b: ivy.array([4., 5., 3.])
}
**Array Instance Method Example**
We then add an instance method example to :meth:`ivy.Array.roll` in order to satisfy point 8.
.. code-block:: python
Examples
--------
>>> x = ivy.array([0., 1., 2.])
>>> y = x.roll(1)
>>> print(y)
ivy.array([2., 0., 1.])
**Container Instance Method Example**
We then add an instance method example to :meth:`ivy.Container.roll` in order to satisfy point 9.
.. code-block:: python
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = x.roll(1)
>>> print(y)
{
a: ivy.array([2., 0., 1.], dtype=float32),
b: ivy.array([5., 3., 4.], dtype=float32)
}
**Array Operator Examples**
Points 10 and 11 are not relevant as :func:`ivy.roll` is not an *operator* function.
**Array Reverse Operator Example**
Point 12 is not relevant as :func:`ivy.roll` is not an *operator* function.
**Container Operator Examples**
Points 13, 14, and 15 are not relevant as :func:`ivy.roll` is not an *operator* function.
**Container Reverse Operator Example**
Point 16 is not relevant as :code:`func.roll` is not an *operator* function.
ivy.add
-------
**Functional Examples**
The signature for :func:`ivy.add` is as follows:
.. code-block:: python
def add(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
Let's start with the functional examples, with :class:`ivy.Array` instances in the input:
.. parsed-literal::
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([4, 5, 6])
>>> z = ivy.add(x, y)
>>> print(z)
ivy.array([5, 7, 9])
>>> x = ivy.array([[1.1, 2.3, -3.6]])
>>> y = ivy.array([[4.8], [5.2], [6.1]])
>>> z = ivy.zeros((3, 3))
>>> ivy.add(x, y, out=z)
>>> print(z)
ivy.array([[5.9, 7.1, 1.2],
[6.3, 7.5, 1.6],
[7.2, 8.4, 2.5]])
>>> x = ivy.array([[[1.1], [3.2], [-6.3]]])
>>> y = ivy.array([[8.4], [2.5], [1.6]])
>>> ivy.add(x, y, out=x)
>>> print(x)
ivy.array([[[9.5],
[5.7],
[-4.7]]])
These examples cover points 1, 2, 3, 4 and 5.
Again, please note that in the above case of `x` having multi-line input, it is necessary for each line of the input to be separated by a '...\' so that they can be parsed by the script that tests the examples in the docstrings.
Point 1 is again trivial to satisfy, as was the case for :func:`ivy.tan`.
Ignoring the union over :class:`ivy.Array` and :class:`ivy.NativeArray` which is covered by points 6 and 7, and also ignoring the *nestable* nature of the function which is covered by points 8 and 9, then as far as point 1 is concerned, inputs :code:`x1` and :code:`x2` both only have one possible variation.
They must both be arrays.
Point 2 is satisfied, as the shape and values of the inputs are varied between each of the three examples.
Point 3 is satisfied, there are no optional inputs (aside from :code:`out`) and so this point is irrelevant, and the values and shapes do become increasingly *complex*.
Point 4 is clearly satisfied, as each of the three examples shown above use the :code:`out` argument exactly as explained in point 4.
The return has the same :code:`shape` and :code:`dtype` as the input, making all three examples possible.
Point 5 is satisfied, as the second example uses different shapes for the inputs :code:`x1` and :code:`x2`.
This causes the broadcasting rules to apply, which dictates how the operation is performed and the resultant shape of the output.
We then also add an example with an :class:`ivy.Container` for one of the inputs, in order to satisfy point 6.
.. parsed-literal::
With one :class:`ivy.Container` input:
>>> x = ivy.array([[1.1, 2.3, -3.6]])
>>> y = ivy.Container(a=ivy.array([[4.], [5.], [6.]]),
... b=ivy.array([[5.], [6.], [7.]]))
>>> z = ivy.add(x, y)
>>> print(z)
{
a: ivy.array([[5.1, 6.3, 0.4],
[6.1, 7.3, 1.4],
[7.1, 8.3, 2.4]]),
b: ivy.array([[6.1, 7.3, 1.4],
[7.1, 8.3, 2.4],
[8.1, 9.3, 3.4]])
}
Again, unlike :func:`ivy.tan`, point 7 is relevant in this case, as there are two function inputs in total (excluding :code:`out`).
We can therefore add an example with multiple :class:`ivy.Container` inputs, in order to satisfy point 7.
.. parsed-literal::
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([5, 6, 7]))
>>> z = ivy.add(x, y)
>>> print(z)
{
a: ivy.array([5, 7, 9]),
b: ivy.array([7, 9, 11])
}
**Array Instance Method Example**
We then add an instance method example to :meth:`ivy.Array.add` in order to satisfy point 8.
.. code-block:: python
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([4, 5, 6])
>>> z = x.add(y)
>>> print(z)
ivy.array([5, 7, 9])
**Container Instance Method Example**
We then add an instance method example to :meth:`ivy.Container.add` in order to satisfy point 9.
.. code-block:: python
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([5, 6, 7]))
>>> z = x.add(y)
>>> print(z)
{
a: ivy.array([5, 7, 9]),
b: ivy.array([7, 9, 11])
}
**Array Operator Examples**
Point 10 is satisfied by the following example in the :meth:`ivy.Array.__add__` docstring, with the operator called on two :class:`ivy.Array` instances.
.. parsed-literal::
Examples
--------
With :class:`ivy.Array` instances only:
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([4, 5, 6])
>>> z = x + y
>>> print(z)
ivy.array([5, 7, 9])
Point 11 is satisfied by the following example in the :meth:`ivy.Array.__add__` docstring, with the operator called with an :class:`ivy.Array` instance on the left and :class:`ivy.Container` on the right.
.. parsed-literal::
With mix of :class:`ivy.Array` and :class:`ivy.Container` instances:
>>> x = ivy.array([[1.1, 2.3, -3.6]])
>>> y = ivy.Container(a=ivy.array([[4.], [5.], [6.]]),
... b=ivy.array([[5.], [6.], [7.]]))
>>> z = x + y
>>> print(z)
{
a: ivy.array([[5.1, 6.3, 0.4],
[6.1, 7.3, 1.4],
[7.1, 8.3, 2.4]]),
b: ivy.array([[6.1, 7.3, 1.4],
[7.1, 8.3, 2.4],
[8.1, 9.3, 3.4]])
}
**Array Reverse Operator Examples**
Point 12 is satisfied by the following example in the :meth:`ivy.Array.__radd__` docstring, with the operator called with a :code:`Number` on the left and an :class:`ivy.Array` instance on the right.
.. code-block:: python
Examples
--------
>>> x = 1
>>> y = ivy.array([4, 5, 6])
>>> z = x + y
>>> print(z)
ivy.array([5, 6, 7])
**Container Operator Examples**
Point 13 is satisfied by the following example in the :meth:`ivy.Container.__add__` docstring, with the operator called on two :class:`ivy.Container` instances containing :code:`Number` instances at the leaves.
.. parsed-literal::
Examples
--------
With :code:`Number` instances at the leaves:
>>> x = ivy.Container(a=1, b=2)
>>> y = ivy.Container(a=3, b=4)
>>> z = x + y
>>> print(z)
{
a: 4,
b: 6
}
Point 14 is satisfied by the following example in the :meth:`ivy.Container.__add__` docstring, with the operator called on two :class:`ivy.Container` instances containing :class:`ivy.Array` instances at the leaves.
.. parsed-literal::
With :class:`ivy.Array` instances at the leaves:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([5, 6, 7]))
>>> z = x + y
>>> print(z)
{
a: ivy.array([5, 7, 9]),
b: ivy.array([7, 9, 11])
}
Point 15 is satisfied by the following example in the :meth:`ivy.Container.__add__` docstring, with the operator called with an :class:`ivy.Container` instance on the left and :class:`ivy.Array` on the right.
.. parsed-literal::
With a mix of :class:`ivy.Container` and :class:`ivy.Array` instances:
>>> x = ivy.Container(a=ivy.array([[4.], [5.], [6.]]),
... b=ivy.array([[5.], [6.], [7.]]))
>>> y = ivy.array([[1.1, 2.3, -3.6]])
>>> z = x + y
>>> print(z)
{
a: ivy.array([[5.1, 6.3, 0.4],
[6.1, 7.3, 1.4],
[7.1, 8.3, 2.4]]),
b: ivy.array([[6.1, 7.3, 1.4],
[7.1, 8.3, 2.4],
[8.1, 9.3, 3.4]])
}
**Container Reverse Operator Example**
Point 16 is satisfied by the following example in the :meth:`ivy.Container.__radd__` docstring, with the operator called with a :code:`Number` on the left and an :class:`ivy.Container` instance on the right.
.. code-block:: python
Examples
--------
>>> x = 1
>>> y = ivy.Container(a=3, b=4)
>>> z = x + y
>>> print(z)
{
a: 4,
b: 5
}
**Docstring Tests**
After making a Pull Request, each time you make a commit, then a number of checks are run on it to ensure everything's working fine.
One of these checks is the docstring tests named as :code:`test-docstrings / run-docstring-tests` in the GitHub actions.
The docstring tests check whether the docstring examples for a given function are valid or not.
It basically checks if the output upon execution of the examples that are documented match exactly with the ones shown in the docstrings.
Therefore each time you make a commit, you must ensure that the :code:`test-docstrings / run-docstring-tests` are working correctly at least for the function you are making changes to.
To check whether the docstring tests are passing you need to check the logs for :code:`test-docstrings / run-docstring-tests`:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/deep_dive/docstring_examples/docstring_failing_test_logs.png?raw=true
:width: 420
You will need to go through the logs and see if the list of functions for which the docstring tests are failing also has the function you are working with.
If the docstring tests are failing the logs show a list of functions having issues along with a diff message:
:code:`output for failing_fn_name on run: ......`
:code:`output in docs: ........`
as shown below:
.. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/deep_dive/docstring_examples/docstring_log.png
:width: 420
It can be quite tedious to go through the output diffs and spot the exact error, so you can take the help of online tools like `text compare <https://text-compare.com/>`_ to spot the minutest of differences.
Once you make the necessary changes and the function you are working on doesn't cause the docstring tests to fail, you should be good to go.
However, one of the reviewers might ask you to make additional changes involving examples.
Passing docstring tests is a necessary but not sufficient condition for the completion of docstring formatting.
.. note::
Docstring examples should not have code that imports ivy or sets a backend, otherwise it leads to segmentation faults.
**Round Up**
These three examples should give you a good understanding of what is required when adding docstring examples.
If you have any questions, please feel free to reach out on `discord`_ in the `docstring examples thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/rtce8XthiKA" class="video">
</iframe>
| ivy/docs/overview/deep_dive/docstring_examples.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/docstring_examples.rst",
"repo_id": "ivy",
"token_count": 11360
} | 4 |
Superset Behaviour
==================
.. _`Array API Standard`: https://data-apis.org/array-api/latest/
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`superset behavior thread`: https://discord.com/channels/799879767196958751/1189905520686014514
.. _`partial_mixed_handler`: https://github.com/unifyai/ivy/blob/a07919ebf64181852a3564c4d994bc1c25bd9a6f/ivy/functional/backends/tensorflow/experimental/layers.py#L817
.. _`handle_partial_mixed_function`: https://github.com/unifyai/ivy/blob/a07919ebf64181852a3564c4d994bc1c25bd9a6f/ivy/func_wrapper.py#L981
When implementing functions in Ivy, whether they are primary, compositional, or mixed, we are constantly faced with the question: which backend implementation should Ivy most closely follow?
Extending the Standard
----------------------
It might seem as though this question is already answered.
Ivy fully adheres to the `Array API Standard`_, which helpfully limits our design space for the functions, but in its current form this only covers a relatively small number of functions, which together make up less than half of the functions in Ivy.
Even for Ivy functions which adhere to the standard, the standard permits the addition of extra arguments and function features, provided that they do not contradict the requirements of the standard.
Therefore, we are still faced with the same kind of design decisions for all Ivy functions, even those appearing in the `Array API Standard`_.
What is the Superset?
---------------------
We explain through examples how Ivy always goes for the superset of functionality among the backend frameworks.
This means that even if only one framework supports a certain feature, then we still strive to include this feature in the Ivy function.
The Ivy function then entails the *superset* of all backend features.
However, this is not always totally possible, and in some cases certain framework-specific features must be sacrificed, but usually it's possible to implement a very generalized function which covers most of the unique features among the corresponding functions in each framework.
We strive to implement the superset for primary, compositional, and mixed functions.
In many cases compositional functions do not actually have corresponding backend-specific functions, but this is not always the case.
For example, :func:`ivy.linear` is a fully compositional function, but :func:`torch.nn.functional.linear` also exists.
We should therefore make sure the compositional :func:`ivy.linear` function includes all behaviours supported by :func:`torch.nn.functional.linear`.
A Non-Duplicate Superset
------------------------
It would be easy to assume that implementing the superset simply means adding all arguments from all related functions into the Ivy function.
However, this is **not** the case for a few reasons.
Firstly, different functions might have different argument names for the same behaviour.
Looking at the functions `numpy.concatenate <https://numpy.org/doc/stable/reference/generated/numpy.concatenate.html>`_ and `torch.cat <https://pytorch.org/docs/stable/generated/torch.cat.html>`_, we of course do not want to add both of the arguments :code:`axis` and :code:`dim` to :func:`ivy.concat`, as these both represent exactly the same thing: the dimension/axis along which to concatenate.
In this case, the argument is `covered <https://data-apis.org/array-api/latest/API_specification/generated/array_api.concat.html#array_api.concat>`_ in the `Array API Standard`_ and so we opt for :code:`axis`.
In cases where there are differences between the backend argument names, and the function or argument is not in the standard, then it is up to us to determine which argument name to use.
What is not the Superset?
-------------------------
We've already explained that we should not duplicate arguments in the Ivy function when striving for the superset.
Does this mean, provided that the proposed argument is not a duplicate, that we should always add this backend-specific argument to the Ivy function?
The answer is **no**.
When determining the superset, we are only concerned with the pure **mathematics** of the function, and nothing else.
For example, the :code:`name` argument is common to many TensorFlow functions, such as `tf.concat <https://www.tensorflow.org/api_docs/python/tf/concat>`_, and is used for uniquely identifying parts of the traced computation graph during logging and debugging.
This has nothing to do with the mathematics of the function, and so is *not* included in the superset considerations when implementing Ivy functions.
Similarly, in NumPy the argument :code:`subok` controls whether subclasses of the :class:`numpy.ndarray` class should be permitted, which is included in many functions, such as `numpy.ndarray.astype <https://numpy.org/doc/stable/reference/generated/numpy.ndarray.astype.html>`_.
Finally, in JAX the argument :code:`precision` is quite common, which controls the precision of the return values, as used in `jax.lax.conv <https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.conv.html>`_ for example.
Similarly, the functions :func:`jacfwd` and :func:`jacrev` in JAX are actually mathematically identical, and these functions differ *only* in their underlying algorithm, either forward mode or reverse mode.
None of the above arguments or function variants are included in our superset considerations, as again they are not relating to the pure mathematics, and instead relate to framework, hardware, or algorithmic specifics.
Given the abstraction layer that Ivy operates at, Ivy is fundamentally unable to control under-the-hood specifics such as those mentioned above.
However, this is by design, and the central benefit of Ivy is the ability to abstract many different runtimes and algorithms under the same banner, unified by their shared fundamental mathematics.
A special case is the NumPy :code:`order` argument which controls the low-level memory layout of the array.
Although it normally has no effect on the mathematics of a function, in certain manipulation routines like :code:`reshape`, :code:`flatten` and :code:`ravel`, order determines the way the elements are read and placed into the reshaped array.
Therefore, ivy supports :code:`order` for these functions and any remaining logic surrounding order is handled in the NumPy frontend.
Regarding the **only mathematics** rule regarding the superset considerations, there are two exceptions to this, which are the handling of data type and device arguments.
Neither of these relate to the pure mathematics of the function.
However, as is discussed below, we always strive to implement Ivy functions such that they support as many data types and devices as possible.
Balancing Generalization with Efficiency
----------------------------------------
Sometimes, the simplest way to implement superset behaviour comes at the direct expense of runtime efficiency.
We explore this through the examples of :func:`softplus`.
**ivy.softplus**
When looking at the :func:`softplus` (or closest equivalent) implementations for `Ivy <../../docs/functional/ivy/activations/ivy.functional.ivy.activations.softplus.rst>`_, `JAX <https://jax.readthedocs.io/en/latest/_autosummary/jax.nn.softplus.html>`_, `TensorFlow <https://www.tensorflow.org/api_docs/python/tf/math/softplus>`_, and `PyTorch <https://pytorch.org/docs/stable/generated/torch.nn.functional.softplus.html>`_, we can see that torch is the only framework which supports the inclusion of the :code:`beta` and :code:`threshold` arguments, which are added for improved numerical stability.
We can also see that numpy does not support a :func:`softplus` function at all.
Ivy should also support the :code:`beta` and :code:`threshold` arguments, in order to provide the generalized superset implementation among the backend frameworks.
Let's take the tensorflow backend implementation as an example when assessing the necessary changes.
Without superset behaviour, the implementation is incredibly simple, with only a single tensorflow function called under the hood.
.. code-block:: python
def softplus(x: Tensor,
/,
*,
out: Optional[Tensor] = None) -> Tensor:
return tf.nn.softplus(x)
The simplest approach would be to implement :func:`softplus` in each Ivy backend as a simple composition.
For example, a simple composition in the tensorflow backend would look like the following:
.. code-block:: python
def softplus(x: Tensor,
/,
*,
beta: Optional[Union[int, float]] = 1,
threshold: Optional[Union[int, float]] = 20,
out: Optional[Tensor] = None) -> Tensor:
res = (tf.nn.softplus(x * beta)) / beta
return tf.where(x * beta > threshold, x, res)
This approach uses the default argument values used by PyTorch, and it does indeed extend the behaviour correctly.
However, the implementation now uses **six** tensorflow function calls instead of one, being: :func:`__mul__`, :func:`tf.nn.softplus`, :func:`__div__`, :func:`__mul__`, :func:`__gt__`, :func:`tf.where` in order of execution.
If a user doesn't care about the extra :code:`threshold` and :code:`beta` arguments, then a 6× increase in backend functions is a heavy price to pay efficiency-wise.
Therefore, we should in general adopt a different approach when implementing superset behaviour.
We should still implement the superset, but keep this extended behaviour as optional as possible, with maximal efficiency and minimal intrusion in the case that this extended behaviour is not required.
The following would be a much better solution:
.. code-block:: python
def softplus(x: Tensor,
/,
*,
beta: Optional[Union[int, float]] = None,
threshold: Optional[Union[int, float]] = None,
out: Optional[Tensor] = None) -> Tensor:
if beta is not None and beta != 1:
x_beta = x * beta
res = (tf.nn.softplus(x_beta)) / beta
else:
x_beta = x
res = tf.nn.softplus(x)
if threshold is not None:
return tf.where(x_beta > threshold, x, res)
return res
You will notice that this implementation involves more lines of code, but this should not be confused with added complexity.
All Ivy code should be traced for efficiency, and in this case all the :code:`if` and :code:`else` statements are removed, and all that remains is the backend functions which were executed.
This new implementation will be traced to a graph of either one, three, four, or six functions depending on the values of :code:`beta` and :code:`threshold`, while the previous implementation would *always* traces to six functions.
This does mean we do not adopt the default values used by PyTorch, but that's okay.
Implementing the superset does not mean adopting the same default values for arguments, it simply means equipping the Ivy function with the capabilities to execute the superset of behaviours.
More Examples
-------------
We now take a look at some examples, and explain our rationale for deciding upon the function signature that we should use in Ivy.
The first three examples are more-or-less superset examples, while the last example involves a deliberate decision to not implement the full superset, for some of the reasons explained above.
**ivy.linspace**
When looking at the :func:`linspace` (or closest equivalent) implementations for `Ivy <../../docs/functional/ivy/creation/ivy.functional.ivy.creation.linspace.rst>`_, `JAX <https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.linspace.html>`_, `NumPy <https://numpy.org/doc/stable/reference/generated/numpy.linspace.html>`_, `TensorFlow <https://www.tensorflow.org/api_docs/python/tf/linspace>`_, and `PyTorch <https://pytorch.org/docs/stable/generated/torch.linspace.html>`_, we can see that torch does not support arrays for the :code:`start` and :code:`end` arguments, while JAX, numpy, and tensorflow all do.
Likewise, Ivy also supports arrays for the :code:`start` and :code:`stop` arguments, and in doing so provides the generalized superset implementation among the backend frameworks.
**ivy.eye**
When looking at the :func:`eye` (or closest equivalent) implementations for `Ivy <../../docs/functional/ivy/creation/ivy.functional.ivy.creation.eye.rst>`_, `JAX <https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.eye.html>`_, `NumPy <https://numpy.org/devdocs/reference/generated/numpy.eye.html>`_, `TensorFlow <https://www.tensorflow.org/api_docs/python/tf/eye>`_, and `PyTorch <https://pytorch.org/docs/stable/generated/torch.eye.html>`_, we can see that tensorflow is the only framework which supports a :code:`batch_shape` argument.
Likewise, Ivy also supports a :code:`batch_shape` argument, and in doing so provides the generalized superset implementation among the backend frameworks.
**ivy.scatter_nd**
When looking at the :func:`scatter_nd` (or closest equivalent) implementations for `Ivy <../../docs/functional/ivy/general/ivy.functional.ivy.general.scatter_nd.rst>`_, `JAX <https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndarray.at.html#jax.numpy.ndarray.at>`_, `NumPy <https://numpy.org/doc/stable/reference/generated/numpy.ufunc.at.html>`_, `TensorFlow <https://www.tensorflow.org/api_docs/python/tf/scatter_nd>`_, and `PyTorch <https://pytorch.org/docs/stable/generated/torch.scatter.html>`_, we can see that torch only supports scattering along a single dimension, while all other frameworks support scattering across multiple dimensions at once.
Likewise, Ivy also supports scattering across multiple dimensions at once, and in doing so provides the generalized superset implementation among the backend frameworks.
**ivy.logical_and**
When looking at the :func:`logical_and` (or closest equivalent) implementations for `Ivy <../../docs/functional/ivy/elementwise/ivy.functional.ivy.elementwise.logical_and.rst>`_, `JAX <https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.logical_and.html>`_, `NumPy <https://numpy.org/doc/stable/reference/generated/numpy.logical_and.html>`_, `TensorFlow <https://www.tensorflow.org/api_docs/python/tf/math/logical_and>`_, and `PyTorch <https://pytorch.org/docs/stable/generated/torch.logical_and.html>`_, we can see that numpy and torch support the :code:`out` argument for performing inplace updates, while JAX and tensorflow do not.
With regards to the supported data types, JAX, numpy, and torch support numeric arrays, while tensorflow supports only boolean arrays.
With regards to both of these points, Ivy provides the generalized superset implementation among the backend frameworks, with support for the :code:`out` argument and also support for both numeric and boolean arrays in the input.
However, as discussed above, :func:`np.logical_and` also supports the :code:`where` argument, which we opt to **not** support in Ivy.
This is because the behaviour can easily be created as a composition like so :code:`ivy.where(mask, ivy.logical_and(x, y), ivy.zeros_like(mask))`, and we prioritize the simplicity, clarity, and function uniqueness in Ivy's API in this case, which comes at the cost of reduced runtime efficiency for some functions when using a NumPy backend.
However, in future releases our automatic graph tracing and graph simplification processes will alleviate these minor inefficiencies entirely from the final computation graph, by fusing multiple operations into one at the API level where possible.
Maximizing Usage of Native Functionality
----------------------------------------
While achieving the objective of having superset behaviour across the backends, the native functionality of frameworks should be made use of as much as possible.
Even if a framework-specific function doesn't provide complete superset behaviour, we should still make use of the partial behaviour that it provides and then add more logic for the remaining part.
This is for efficiency reasons and is more explained under the :ref:`Mixed Function <overview/deep_dive/function_types:Mixed Functions>` section.
In cases when a framework-specific function exists for one or two backends but not the others, we implement a :ref:`Mixed Function <overview/deep_dive/function_types:Mixed Functions>`.
But when the framework-specific functions do not cover all superset functionality, Ivy also allows for a mixed-compositional hybrid approach.
Consider the example of :func:`interpolate`.
Most frameworks contain some kind of interpolation function, usually limited to 2D and/or 3D, but :func:`ivy.interpolate` should be much more general, including interpolations across a larger number of dimensions.
On top of this, different framework-specific functions support different sets of modes for interpolation.
For example, if we look at the framework-specific functions available that serve the purpose of interpolation
1. :func:`torch.nn.functional.interpolate` supports a larger number of dimensions in the input but doesn't support the :code:`gaussian` or :code:`mitchellcubic` modes which are supported by :func:`tf.image.resize`.
2. :func:`tf.image.resize` supports the :code:`gaussian` or :code:`mitchellcubic` modes but doesn't support some other modes in :func:`torch.nn.functional.interpolate` and it also doesn't support larger than a 4-dimensional input.
3. :func:`jax.image.resize` also has missing modes and doesn't support a larger number of dimensions.
4. :code:`numpy` doesn't have an equivalent function for interpolation (:func:`numpy.interp` is very different from the functionality required).
So the ideal superset implementation for :func:`ivy.interpolate` would be supporting the union of all modes supported by different implementations and support a larger number of dimensions in the input.
But there are a few considerations to be made,
1. Implementing all the modes for all the backend-specific implementations would be tedious and repetitive as some modes may not be supported by more than one framework.
2. We would need a completely compositional implementation for the :code:`numpy` backend which doesn't have an equivalent framework-specific function.
3. But also having a single compositional implementation for all backends would be considerably inefficient as compared to the framework-specific functions with overlapping functionality.
As a workaround, we can simply make use of the backend-specific implementations for a certain number of dimensions and modes for each backend, and then have a general compositional implementation which covers all the remaining cases.
This will make sure that we don't introduce any inefficiencies and also avoid re-implementation for all the backends.
Ivy allows this using the `partial_mixed_handler`_ attribute on the backend-specific implementation. So the :code:`torch` backend implementation of :func:`interpolate` would look like the following,
.. code-block:: python
def interpolate(
x: torch.Tensor,
size: Union[Sequence[int], int],
/,
*,
mode: Literal[
"linear",
"bilinear",
"trilinear",
"nearest",
"area",
"nearest_exact",
"tf_area",
"bicubic",
"mitchellcubic",
"lanczos3",
"lanczos5",
"gaussian",
] = "linear",
scale_factor: Optional[Union[Sequence[int], int]] = None,
recompute_scale_factor: Optional[bool] = None,
align_corners: Optional[bool] = None,
antialias: bool = False,
out: Optional[torch.Tensor] = None,
):
return torch.nn.functional.interpolate(
x,
size=size,
mode=mode,
align_corners=align_corners,
antialias=antialias,
scale_factor=scale_factor,
recompute_scale_factor=recompute_scale_factor,
)
interpolate.partial_mixed_handler = lambda *args, mode="linear", **kwargs: mode not in [
"tf_area",
"tf_bicubic",
"mitchellcubic",
"lanczos3",
"lanczos5",
"gaussian",
]
When the backend is set, we use this attribute to apply the `handle_partial_mixed_function`_ decorator to the function.
The :code:`@handle_partial_mixed_function` accepts a function as an input that receives the arguments and keyword arguments passed to the backend-specific implementation.
The input function is expected to be a boolean function where we'd use the backend-specific implementation if :code:`True` and the compositional implementation if :code:`False`.
This provides the flexibility to add any custom logic based on the use-case for maximal use of framework-specific implementations while achieving superset generalization.
**Note**
Even though we are always striving to adhere to the superset, there might be cases where a feature has slipped under the radar.
In case you stumble upon an Ivy function that you think has not included all native framework functionalities in the optimal way, you are invited to let us know in the comment section of `this <https://github.com/unifyai/ivy/issues/6406>`_ dedicated issue.
**Round Up**
This should have hopefully given you a good feel of what should and should not be included when deciding how to design a new Ivy function.
In many cases, there is not a clear right and wrong answer, and we arrive at the final decision via open discussion.
If you find yourself proposing the addition of a new function in Ivy, then we will most likely have this discussion on your Pull Request!
If you have any questions, please feel free to reach out on `discord`_ in the `superset behavior thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/_D6xER3H4NU" class="video">
</iframe>
| ivy/docs/overview/deep_dive/superset_behaviour.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/superset_behaviour.rst",
"repo_id": "ivy",
"token_count": 6358
} | 5 |
One liners
----------
.. grid:: 1 1 3 3
:gutter: 4
.. grid-item-card:: ``ivy.trace_graph()``
:link: one_liners/trace.rst
Traces a ``Callable`` or set of them into an Ivy graph.
.. grid-item-card:: ``ivy.transpile()``
:link: one_liners/transpile.rst
Transpiles a ``Callable`` or set of them from a ``source`` framework to another
framework.
.. grid-item-card:: ``ivy.unify()``
:link: one_liners/unify.rst
Transpiles an object into Ivy code. It's an alias to
``ivy.transpile(..., to="ivy", ...)``
.. toctree::
:hidden:
:maxdepth: -1
one_liners/trace.rst
one_liners/transpile.rst
one_liners/unify.rst
| ivy/docs/overview/one_liners.rst/0 | {
"file_path": "ivy/docs/overview/one_liners.rst",
"repo_id": "ivy",
"token_count": 316
} | 6 |
Contributor Leaderboard
=======================
This page lists all of our amazing Contributors who have contributed to the project! We are grateful for your contributions and we hope to see you grow with the project! The ranks listed here are based on the `level of contribution <contributing/volunteer_program.rst>`_\.
Top Contributors
----------------
.. list-table::
:widths: 50 50 50
:header-rows: 1
* - Name
- Github ID
- Badges
* - samunder singh
- `samthakur587 <https://github.com/samthakur587>`_
- Merging Master Gold, Merging Wizard, Ivy Inspector Bronze
* - V\. Sai Suraj
- `Sai-Suraj-27 <https://github.com/Sai-Suraj-27>`_
- Merging Master Gold, Ivy Inspector Bronze
Core Contributors
-----------------
.. list-table::
:widths: 50 50 50
:header-rows: 1
* - Name
- Github ID
- Badges
* - Sanjay Suthar
- `Sanjay8602 <https://github.com/Sanjay8602>`_
- Merging Master Bronze, Ivy Inspector Bronze
* - Muhammad ishaque
- `MuhammadNizamani <https://github.com/MuhammadNizamani>`_
- Merging Master Bronze, Merging Wizard
* - nitesh kesharwani
- `NiteshK84 <https://github.com/NiteshK84>`_
- Ivy Inspector Bronze
* - sarvesh kesharwani
- `Sarvesh-Kesharwani <https://github.com/Sarvesh-Kesharwani>`_
- Ivy Inspector Bronze
Contributors
------------
.. list-table::
:widths: 50 50 50
:header-rows: 1
* - Name
- Github ID
- Badges
* - Suyash Gupta
- `sgalpha01 <https://github.com/sgalpha01>`_
- Debugging Dynamo, Merging Master, Merging Wizard
* - Garima Saroj
- `AndroGari <https://github.com/AndroGari>`_
- Merging Master, Ivy Inspector
* - Jackson McClintock
- `jacksondm33 <https://github.com/jacksondm33>`_
- Merging Master, Ivy Inspector
* - Mostafa Gamal
- `Mr-Array22 <https://github.com/Mr-Array22>`_
- Merging Master, Ivy Inspector
* - Rahul Prem
- `rp097 <https://github.com/rp097>`_
- Merging Master, Ivy Inspector
* - Rohit Kumar Salla
- `rohitsalla <https://github.com/rohitsalla>`_
- Merging Master, Ivy Inspector
* - Waqar Ahmed
- `waqaarahmed <https://github.com/waqaarahmed>`_
- Merging Master, Ivy Inspector
* - David Adlai Nettey
- `Adlai-1 <https://github.com/Adlai-1>`_
- Merging Master
* - Kacper Kożdoń
- `Kacper-W-Kozdon <https://github.com/Kacper-W-Kozdon>`_
- Merging Master
* - R E Zera Marveen Lyngkhoi
- `fleventy-5 <https://github.com/fleventy-5>`_
- Merging Master
* - Sheroz Khan
- `ksheroz <https://github.com/ksheroz>`_
- Merging Master
| ivy/docs/overview/volunteer_ranks.rst/0 | {
"file_path": "ivy/docs/overview/volunteer_ranks.rst",
"repo_id": "ivy",
"token_count": 1079
} | 7 |
# global
import abc
from typing import Union, Optional, Any
import ivy
# ToDo: implement all methods here as public instance methods
class _ArrayWithDevice(abc.ABC):
def dev(
self: ivy.Array, *, as_native: bool = False
) -> Union[ivy.Device, ivy.NativeDevice]:
"""ivy.Array instance method variant of ivy.dev. This method simply
wraps the function, and so the docstring for ivy.dev also applies to
this method with minimal changes.
Parameters
----------
self
array for which to get the device handle.
as_native
Whether or not to return the dev in native format. Default is ``False``.
Examples
--------
>>> x = ivy.array([[2, 5, 4, 1], [3, 1, 5, 2]])
>>> y = x.dev(as_native=True)
>>> print(y)
cpu
"""
return ivy.dev(self, as_native=as_native)
def to_device(
self: ivy.Array,
device: Union[ivy.Device, ivy.NativeDevice],
*,
stream: Optional[Union[int, Any]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.to_device. This method
simply wraps the function, and so the docstring for ivy.to_device also
applies to this method with minimal changes.
Parameters
----------
self
input array to be moved to the desired device
device
device to move the input array `x` to
stream
stream object to use during copy. In addition to the types
supported in array.__dlpack__(), implementations may choose to
support any library-specific stream object with the caveat that
any code using such an object would not be portable.
out
optional output array, for writing the result to. It must have
a shape that the inputs broadcast to.
Examples
--------
>>> x = ivy.array([2, 5, 4, 1])
>>> y = x.to_device('cpu')
>>> print(y.device)
cpu
"""
return ivy.to_device(self._data, device, stream=stream, out=out)
| ivy/ivy/data_classes/array/device.py/0 | {
"file_path": "ivy/ivy/data_classes/array/device.py",
"repo_id": "ivy",
"token_count": 918
} | 8 |
# global
import abc
from typing import Optional, Union, Tuple
# local
import ivy
class _ArrayWithNormsExperimental(abc.ABC):
def l1_normalize(
self: ivy.Array,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Normalize the array to have unit L1 norm.
Parameters
----------
self
Input array.
axis
Axis or axes along which to normalize. If ``None``,
the whole array is normalized.
out
Optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
The normalized array.
Examples
--------
>>> x = ivy.array([[1., 2.], [3., 4.]])
>>> y = x.l1_normalize(axis=1)
>>> print(y)
ivy.array([[0.33333334, 1.33333337],
[1.28571439, 2.28571439]])
"""
return ivy.l1_normalize(self, axis=axis, out=out)
def l2_normalize(
self: ivy.Array,
axis: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Normalize the array to have unit L2 norm.
Parameters
----------
self
Input array.
axis
Axis along which to normalize. If ``None``, the whole array
is normalized.
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
The normalized array.
Examples
--------
>>> x = ivy.array([[1., 2.], [3., 4.]])
>>> y = x.l2_normalize(axis=1)
>>> print(y)
ivy.array([[0.44721359, 0.89442718],
[0.60000002, 0.80000001]])
"""
return ivy.l2_normalize(self, axis=axis, out=out)
def batch_norm(
self: Union[ivy.NativeArray, ivy.Array],
mean: Union[ivy.NativeArray, ivy.Array],
variance: Union[ivy.NativeArray, ivy.Array],
/,
*,
offset: Optional[Union[ivy.NativeArray, ivy.Array]] = None,
scale: Optional[Union[ivy.NativeArray, ivy.Array]] = None,
training: bool = False,
eps: float = 1e-5,
momentum: float = 1e-1,
data_format: str = "NSC",
out: Optional[Tuple[ivy.Array, ivy.Array, ivy.Array]] = None,
) -> Tuple[ivy.Array, ivy.Array, ivy.Array]:
"""ivy.Array instance method variant of ivy.batch_norm. This method
simply wraps the function, and so the docstring for ivy.batch_norm also
applies to this method with minimal changes.
Parameters
----------
self
Input array of default shape (N, *S, C), where N is the batch dimension,
*S corresponds to any number of spatial dimensions and
C corresponds to the channel dimension.
training
If true, calculate and use the mean and variance of `x`. Otherwise, use the
provided `mean` and `variance`.
mean
Mean array used for input's normalization. It can be of any shape
braodcastable to (N,*S,C).
variance
Variance array used for input's normalization. It can be of any shape
braodcastable to (N,*S,C).
offset
An offset array. If present, will be added to the normalized input.
It can be of any shape broadcastable to (N,*S,C).
scale
A scale array. If present, the scale is applied to the normalized input.
It can be of any shape broadcastable to (N,*S,C).
eps
A small float number to avoid dividing by 0.
momentum
the value used for the running_mean and running_var computation.
Default value is 0.1.
data_format
The ordering of the dimensions in the input, one of "NSC" or "NCS",
where N is the batch dimension, S represents any number of spatial
dimensions and C is the channel dimension. Default is "NSC".
out
optional output arrays, for writing the result to.
Returns
-------
ret
Tuple of arrays containing the
normalized input, running mean, and running variance.
"""
return ivy.batch_norm(
self._data,
mean,
variance,
scale=scale,
offset=offset,
training=training,
eps=eps,
momentum=momentum,
data_format=data_format,
out=out,
)
def instance_norm(
self: Union[ivy.NativeArray, ivy.Array],
mean: Union[ivy.NativeArray, ivy.Array],
variance: Union[ivy.NativeArray, ivy.Array],
/,
*,
offset: Optional[Union[ivy.NativeArray, ivy.Array]] = None,
scale: Optional[Union[ivy.NativeArray, ivy.Array]] = None,
training: bool = False,
eps: float = 1e-5,
momentum: float = 1e-1,
data_format: str = "NSC",
out: Optional[Tuple[ivy.Array, ivy.Array, ivy.Array]] = None,
) -> Tuple[ivy.Array, ivy.Array, ivy.Array]:
"""ivy.Array instance method variant of ivy.instance_norm. This method
simply wraps the function, and so the docstring for ivy.instance_norm
also applies to this method with minimal changes.
Parameters
----------
self
Input array of shape default (N, *S, C), where N is the batch dimension,
*S corresponds to any number of spatial dimensions and
C corresponds to the channel dimension.
mean
Mean array of size C used for input's normalization.
variance
Variance array of size C used for input's normalization.
offset
An offset array of size C. If present, will be added
to the normalized input.
scale
A scale array of size C. If present, the scale is
applied to the normalized input.
training
If true, calculate and use the mean and variance of `x`. Otherwise, use the
provided `mean` and `variance`.
eps
A small float number to avoid dividing by 0.
momentum
the value used for the running_mean and running_var computation.
Default value is 0.1.
data_format
The ordering of the dimensions in the input, one of "NSC" or "NCS",
where N is the batch dimension, S represents any number of spatial
dimensions and C is the channel dimension. Default is "NSC".
out
optional output array, for writing the result to.
Returns
-------
ret
Tuple of array containing
the normalized input, running mean, and running variance.
"""
return ivy.instance_norm(
self._data,
mean,
variance,
scale=scale,
offset=offset,
training=training,
eps=eps,
momentum=momentum,
out=out,
data_format=data_format,
)
def group_norm(
self: Union[ivy.NativeArray, ivy.Array],
num_groups: int = 1,
/,
*,
offset: Optional[Union[ivy.NativeArray, ivy.Array]] = None,
scale: Optional[Union[ivy.NativeArray, ivy.Array]] = None,
eps: Optional[float] = 1e-5,
data_format: Optional[str] = "NSC",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.group_norm. This method
simply wraps the function, and so the docstring for ivy.group_norm also
applies to this method with minimal changes.
Parameters
----------
x
Input array of default shape (N, *S, C), where N is the batch dimension,
*S corresponds to any number of spatial dimensions and
C corresponds to the channel dimension.
num_groups
number of groups to separate the channels into
offset
An offset array of size C. If present, will be added
to the normalized input.
scale
A scale array of size C. If present, the scale is
applied to the normalized input.
eps
A small float number to avoid dividing by 0.
data_format
The ordering of the dimensions in the input, one of "NSC" or "NCS",
where N is the batch dimension, S represents any number of spatial
dimensions and C is the channel dimension. Default is "NSC".
out
optional output arrays, for writing the result to.
Returns
-------
ret
The normalized array.
"""
return ivy.group_norm(
self._data,
num_groups,
scale=scale,
offset=offset,
eps=eps,
out=out,
data_format=data_format,
)
def lp_normalize(
self: ivy.Array,
/,
*,
p: float = 2,
axis: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Normalize the array to have Lp norm.
Parameters
----------
self
Input array.
p
p-norm to use for normalization.
axis
Axis along which to normalize. If ``None``, the whole array
is normalized.
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
The normalized array.
Examples
--------
>>> x = ivy.array([[1., 2.], [3., 4.]])
>>> y = x.lp_normalize(p=2, axis=1)
>>> print(y)
ivy.array([[0.44721359, 0.89442718],
[0.60000002, 0.80000001]])
"""
return ivy.lp_normalize(self, p=p, axis=axis, out=out)
| ivy/ivy/data_classes/array/experimental/norms.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/norms.py",
"repo_id": "ivy",
"token_count": 4754
} | 9 |
# global
import abc
from numbers import Number
from typing import Optional, Union, Tuple
# local
import ivy
class _ArrayWithSearching(abc.ABC):
def argmax(
self: ivy.Array,
/,
*,
axis: Optional[int] = None,
keepdims: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
select_last_index: bool = False,
out: Optional[ivy.Array] = None,
) -> Union[ivy.Array, int]:
"""ivy.Array instance method variant of ivy.argmax. This method simply
wraps the function, and so the docstring for ivy.argmax also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a numeric data type.
axis
axis along which to search. If None, the function must return the index of
the maximum value of the flattened array. Default: ``None``.
keepdims
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the array.
dtype
Optional data type of the output array.
select_last_index
If this is set to True, the index corresponding to the
last occurrence of the maximum value will be returned.
out
If provided, the result will be inserted into this array. It should be of
the appropriate shape and dtype.
Returns
-------
ret
if axis is None, a zero-dimensional array containing the index of the first
occurrence of the maximum value; otherwise, a non-zero-dimensional array
containing the indices of the maximum values. The returned array must have
the default array index data type.
Examples
--------
Using :class:`ivy.Array` instance method:
>>> x = ivy.array([0., 1., 2.])
>>> y = x.argmax()
>>> print(y)
ivy.array(2)
>>> x = ivy.array([[1., -0., -1.], [-2., 3., 2.]])
>>> y = x.argmax(axis=1)
>>> print(y)
ivy.array([0, 1])
>>> x = ivy.array([[4., 0., -1.], [2., -3., 6]])
>>> y = x.argmax(axis=1, keepdims=True)
>>> print(y)
ivy.array([[0], [2]])
>>> x = ivy.array([[4., 0., -1.], [2., -3., 6]])
>>> y = x.argmax(axis=1, dtype=ivy.int64)
>>> print(y, y.dtype)
ivy.array([0, 2]) int64
"""
return ivy.argmax(
self._data,
axis=axis,
keepdims=keepdims,
dtype=dtype,
select_last_index=select_last_index,
out=out,
)
def argmin(
self: ivy.Array,
/,
*,
axis: Optional[int] = None,
keepdims: bool = False,
dtype: Optional[Union[ivy.int32, ivy.int64]] = None,
select_last_index: bool = False,
out: Optional[ivy.Array] = None,
) -> Union[ivy.Array, int]:
"""ivy.Array instance method variant of ivy.argmin. This method simply
wraps the function, and so the docstring for ivy.argmin also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a numeric data type.
axis
axis along which to search. If None, the function must return the index of
the minimum value of the flattened array. Default = None.
keepdims
if True, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible with
the input array (see Broadcasting). Otherwise, if False, the reduced axes
(dimensions) must not be included in the result. Default = False.
dtype
An optional output_dtype from: int32, int64. Defaults to int64.
select_last_index
If this is set to True, the index corresponding to the
last occurrence of the minimum value will be returned.
out
if axis is None, a zero-dimensional array containing the index of the first
occurrence of the minimum value; otherwise, a non-zero-dimensional array
containing the indices of the minimum values. The returned array must have
the default array index data type.
Returns
-------
ret
Array containing the indices of the minimum values across the specified
axis.
Examples
--------
Using :class:`ivy.Array` instance method:
>>> x = ivy.array([0., 1., -1.])
>>> y = x.argmin()
>>> print(y)
ivy.array(2)
>>> x = ivy.array([[0., 1., -1.],[-2., 1., 2.],[1., -2., 0.]])
>>> y= ivy.zeros((3,1), dtype=ivy.int64)
>>> x.argmin(axis=1, keepdims=True, out=y)
>>> print(y)
ivy.array([[2],
[0],
[1]])
"""
return ivy.argmin(
self._data,
axis=axis,
keepdims=keepdims,
dtype=dtype,
select_last_index=select_last_index,
out=out,
)
def nonzero(
self: ivy.Array,
/,
*,
as_tuple: bool = True,
size: Optional[int] = None,
fill_value: Number = 0,
) -> Union[Tuple[ivy.Array], ivy.Array]:
"""ivy.Array instance method variant of ivy.nonzero. This method simply
wraps the function, and so the docstring for ivy.nonzero also applies
to this method with minimal changes.
Parameters
----------
self
input array. Should have a numeric data type.
as_tuple
if True, the output is returned as a tuple of indices, one for each
dimension of the input, containing the indices of the true elements in that
dimension. If False, the coordinates are returned in a (N, ndim) array,
where N is the number of true elements. Default = True.
size
if specified, the function will return an array of shape (size, ndim).
If the number of non-zero elements is fewer than size, the remaining
elements will be filled with fill_value. Default = None.
fill_value
when size is specified and there are fewer than size number of elements,
the remaining elements in the output array will be filled with fill_value.
Default = 0.
Returns
-------
ret
Array containing the indices of the non-zero values.
"""
return ivy.nonzero(
self._data, as_tuple=as_tuple, size=size, fill_value=fill_value
)
def where(
self: ivy.Array,
x1: ivy.Array,
x2: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.where. This method simply
wraps the function, and so the docstring for ivy.where also applies to
this method with minimal changes.
Parameters
----------
self
Where True, yield x1, otherwise yield x2.
x1
input array. Should have a numeric data type.
x2
values from which to choose when condition is False.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
An array with elements from self where condition is True, and elements from
x2 otherwise.
Examples
--------
>>> condition = ivy.array([[True, False], [True, True]])
>>> x1 = ivy.array([[1, 2], [3, 4]])
>>> x2 = ivy.array([[5, 6], [7, 8]])
>>> res = x1.where(condition,x2)
>>> print(res)
ivy.array([[1, 0],
[1, 1]])
"""
return ivy.where(self._data, x1._data, x2._data, out=out)
def argwhere(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.argwhere. This method
simply wraps the function, and so the docstring for ivy.argwhere also
applies to this method with minimal changes.
Parameters
----------
self
input array for which indices are desired
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Indices for where the boolean array is True.
Examples
--------
Using :class:`ivy.Array` instance method:
>>> x = ivy.array([[1, 2], [3, 4]])
>>> res = x.argwhere()
>>> print(res)
ivy.array([[0, 0], [0, 1], [1, 0], [1, 1]])
>>> x = ivy.array([[0, 2], [3, 4]])
>>> res = x.argwhere()
>>> print(res)
ivy.array([[0, 1], [1, 0], [1, 1]])
"""
return ivy.argwhere(self._data, out=out)
| ivy/ivy/data_classes/array/searching.py/0 | {
"file_path": "ivy/ivy/data_classes/array/searching.py",
"repo_id": "ivy",
"token_count": 4169
} | 10 |
# global
from typing import Union, Optional, List, Dict, Literal
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithActivationExperimental(ContainerBase):
@staticmethod
def static_logit(
x: Union[float, int, ivy.Container],
/,
*,
eps: Optional[Union[float, ivy.Container]] = None,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.logit. This method simply
wraps the function, and so the docstring for ivy.logit also applies to
this method with minimal changes.
Parameters
----------
x
Input container.
eps
When eps is None the function outputs NaN where x < 0 or x > 1.
and inf or -inf where x = 1 or x = 0, respectively.
Otherwise if eps is defined, x is clamped to [eps, 1 - eps]
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
Optional output Container.
Returns
-------
ret
Container with logits of the leaves.
Examples
--------
>>> a = ivy.array([1, 0, 0.9])
>>> b = ivy.array([0.1, 2, -0.9])
>>> x = ivy.Container(a=a, b=b)
>>> z = ivy.Container.static_logit(x)
>>> print(z)
{
a: ivy.array([inf, -inf, 2.19722438]),
b: ivy.array([-2.19722462, nan, nan])
}
>>> a = ivy.array([0.3, 2, 0.9])
>>> b = ivy.array([0.1, 1.2, -0.9])
>>> x = ivy.Container(a=a, b=b)
>>> z = ivy.Container.static_logit(x, eps=0.2)
>>> print(z)
{
a: ivy.array([-0.84729779, 1.38629448, 1.38629448]),
b: ivy.array([-1.38629436, 1.38629448, -1.38629436])
}
"""
return ContainerBase.cont_multi_map_in_function(
"logit",
x,
eps=eps,
complex_mode=complex_mode,
out=out,
)
def logit(
self: Union[float, int, ivy.Container],
/,
*,
eps: Optional[Union[float, ivy.Container]] = None,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.logit. This method
simply wraps the function, and so the docstring for ivy.logit also
applies to this method with minimal changes.
Parameters
----------
self
Input container.
eps
When eps is None the function outputs NaN where x < 0 or x > 1.
and inf or -inf where x = 1 or x = 0, respectively.
Otherwise if eps is defined, x is clamped to [eps, 1 - eps]
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
Optional output Container.
Returns
-------
ret
Container with logits of the leaves.
Examples
--------
>>> a = ivy.array([1, 0, 0.9])
>>> b = ivy.array([0.1, 2, -0.9])
>>> x = ivy.Container(a=a, b=b)
>>> z = x.logit()
>>> print(z)
{
a: ivy.array([inf, -inf, 2.19722438]),
b: ivy.array([-2.19722462, nan, nan])
}
>>> a = ivy.array([0.3, 2, 0.9])
>>> b = ivy.array([0.1, 1.2, -0.9])
>>> x = ivy.Container(a=a, b=b)
>>> z = x.logit(eps=0.2)
>>> print(z)
{
a: ivy.array([-0.84729779, 1.38629448, 1.38629448]),
b: ivy.array([-1.38629436, 1.38629448, -1.38629436])
}
"""
return self.static_logit(self, eps=eps, complex_mode=complex_mode, out=out)
@staticmethod
def static_thresholded_relu(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
threshold: Union[int, float, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.thresholded_relu. This
method simply wraps the function, and so the docstring for
ivy.thresholded_relu also applies to this method with minimal changes.
Parameters
----------
x
input container.
threshold
threshold value above which the activation is linear. Default: ``0``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the rectified linear activation unit function
applied element-wise with custom threshold.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = ivy.Container.static_thresholded_relu(x, threshold=0.5)
>>> print(y)
{
a: ivy.array([1., 0.]),
b: ivy.array([0., 0.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"thresholded_relu",
x,
threshold=threshold,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def thresholded_relu(
self: ivy.Container,
/,
*,
threshold: Union[int, float, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.thresholded_relu. This
method simply wraps the function, and so the docstring for
ivy.thresholded_relu also applies to this method with minimal changes.
Parameters
----------
self
input container.
threshold
threshold value above which the activation is linear. Default: ``0``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the rectified linear activation unit function
applied element-wise with custom threshold.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = x.thresholded_relu(threshold=0.5)
>>> print(y)
{
a: ivy.array([1., 0.]),
b: ivy.array([0., 0.])
}
"""
return self.static_thresholded_relu(
self,
threshold=threshold,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_prelu(
x: Union[ivy.NativeArray, ivy.Array, ivy.Container],
slope: Union[float, ivy.NativeArray, ivy.Array, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""
Parameters
----------
x
slope
key_chains
to_apply
prune_unapplied
map_sequences
out
"""
return ContainerBase.cont_multi_map_in_function(
"prelu",
x,
slope,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def prelu(
self: ivy.Container,
slope: Union[float, ivy.NativeArray, ivy.Array, ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""
Parameters
----------
slope
key_chains
to_apply
prune_unapplied
map_sequences
out
"""
return self.static_prelu(
self,
slope,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_relu6(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.relu6. This method simply
wraps the function, and so the docstring for ivy.relu6 also applies to
this method with minimal changes.
Parameters
----------
x
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the rectified linear 6 activation unit function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a = ivy.array([-3., -2., -1., 0., 1., 2., 3., 4., 5.]),
... b = ivy.array([1., 2., 3., 4., 5., 6., 7., 8., 9.]))
>>> y = ivy.Container.static_relu6(x)
>>> print(y)
{
a: ivy.array([0., 0., 0., 0., 1., 2., 3., 4., 5.]),
b: ivy.array([1., 2., 3., 4., 5., 6., 6., 6., 6.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"relu6",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
def relu6(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.relu6. This method
simply wraps the function, and so the docstring for ivy.relu6 also
applies to this method with minimal changes.
Parameters
----------
self
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the rectified linear 6 activation unit function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a = ivy.array([-3., -2., -1., 0., 1., 2., 3., 4., 5.]),
... b= ivy.array([1., 2., 3., 4., 5., 6., 7., 8., 9.]))
>>> y = x.relu()
>>> print(y)
{
a: ivy.array([0., 0., 0., 0., 1., 2., 3., 4., 5.]),
b: ivy.array([1., 2., 3., 4., 5., 6., 7., 8., 9.])
}
"""
return self.static_relu6(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
@staticmethod
def static_logsigmoid(
input: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
) -> ivy.Container:
"""ivy.Container static method variant of ivy.logsigmoid. This method
simply wraps the function, and so the docstring for ivy.logsigmoid also
applies to this method with minimal changes.
Parameters
----------
input
Input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
Returns
-------
Container with Log-sigmoid applied to the leaves.
Examples
--------
>>> a = ivy.array([1, 0, 0.9])
>>> b = ivy.array([0.1, 2, -0.9])
>>> x = ivy.Container(a=a, b=b)
>>> z = ivy.Container.static_logsigmoid(x)
>>> print(z)
{
a: ivy.array([-0.31326169, -0.69314718, -0.34115386]),
b: ivy.array([-0.64439666, -0.126928, -1.24115384])
}
>>> a = ivy.array([0.3, 2.5, 4.9])
>>> b = ivy.array([0.1, 1.2, -9.])
>>> x = ivy.Container(a=a, b=b)
>>> z = ivy.Container.static_logsigmoid(x)
>>> print(z)
{
a: ivy.array([-0.55435526, -0.07888974, -0.00741899]),
b: ivy.array([-0.64439666, -0.26328245, -9.00012302])
}
"""
return ContainerBase.cont_multi_map_in_function(
"logsigmoid",
input,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
)
def logsigmoid(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
) -> ivy.Container:
"""Apply element-wise Log-sigmoid of x i.e. log(1 / (1 + exp(-x)).
Parameters
----------
self
Input container.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
Returns
-------
ret
Container with Log-sigmoid applied to the leaves.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = x.logsigmoid()
>>> print(y)
{
a: ivy.array([-0.31326163, -1.46328258]),
b: ivy.array([-0.51301527, -0.79813886])
}
"""
return self.static_logsigmoid(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
)
@staticmethod
def static_selu(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.selu. This method simply
wraps the function, and so the docstring for ivy.selu also applies to
this method with minimal changes.
Parameters
----------
x
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the scaled exponential linear unit activation function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = ivy.Container.static_selu(x)
>>> print(y)
{
a: ivy.array([1.05070102, -1.22856998]),
b: ivy.array([0.42028043, -0.31868932])
}
"""
return ContainerBase.cont_multi_map_in_function(
"selu",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def selu(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.selu. This method
simply wraps the function, and so the docstring for ivy.selu also
applies to this method with minimal changes.
Parameters
----------
self
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the scaled exponential linear unit activation function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = x.selu()
>>> print(y)
{
a: ivy.array([1.05070102, -1.22856998]),
b: ivy.array([0.42028043, -0.31868932])
}
"""
return self.static_selu(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_silu(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.silu. This method simply
wraps the function, and so the docstring for ivy.silu also applies to
this method with minimal changes.
Parameters
----------
x
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the rectified linear activation unit function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = ivy.Container.static_silu(x)
>>> print(y)
{
a: ivy.array([0.73105854, -0.27777028]),
b: ivy.array([0.23947507, -0.0900332])
}
"""
return ContainerBase.cont_multi_map_in_function(
"silu",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def silu(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.silu. This method
simply wraps the function, and so the docstring for ivy.silu also
applies to this method with minimal changes.
Parameters
----------
self
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the rectified linear activation unit function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = x.silu()
>>> print(y)
{
a: ivy.array([0.73105854, -0.27777028]),
b: ivy.array([0.23947507, -0.0900332])
}
"""
return self._static_silu(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_elu(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
alpha: ivy.Container = 1.0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.elu. This method simply
wraps the function, and so the docstring for ivy.elu also applies to
this method with minimal changes.
Parameters
----------
x
input container.
alpha
scaler for controlling the slope of the function for x <= 0 Default: 1.0
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the elu unit function applied element-wise.
Examples
--------
>>> x = x = ivy.Container(a=ivy.array([0.39, -0.85]), b=ivy.array([1., -0.2]))
>>> y = ivy.Container.static_elu(x)
>>> print(y)
{
a: ivy.array([0.38999999, -0.57]),
b: ivy.array([1., -0.18])
}
"""
return ContainerBase.cont_multi_map_in_function(
"elu",
x,
alpha=alpha,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def elu(
self: ivy.Container,
/,
*,
alpha: ivy.Container = 1.0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.elu. This method simply
wraps the function, and so the docstring for ivy.elu also applies to
this method with minimal changes.
Parameters
----------
self
input container.
alpha
scaler for controlling the slope of the function for x <= 0 Default: 1.0
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the elu unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0.39, -0.85]), b=ivy.array([1., -0.2]))
>>> y = x.elu()
>>> print(y)
{
a: ivy.array([0.38999999, -0.57]),
b: ivy.array([1., -0.18])
}
"""
return self._static_elu(
self,
alpha=alpha,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_hardtanh(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
min_val: ivy.Container = -1.0,
max_val: ivy.Container = 1.0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.hardtanh.This method
simply wrap the function,the docstring for ivy.hardtanh also applies to
this method with minimal changes.
Parameters
----------
x
input container.
min_val
minimum value of the linear region range. Default: -1.
max_val
maximum value of the linear region range. Default: 1.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the hardtanh unit function applied element-wise.
Examples
--------
>>> x = x = ivy.Container(a=ivy.array([0.39, -2.0]), b=ivy.array([2., -0.2]))
>>> y = ivy.Container._static_hardtanh(x)
>>> print(y)
{
a: ivy.array([0.3899, -1.]),
b: ivy.array([1., -0.2])
}
"""
return ContainerBase.cont_multi_map_in_function(
"hardtanh",
x,
min_val=min_val,
max_val=max_val,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def hardtanh(
self: ivy.Container,
/,
*,
min_val: ivy.Container = -1.0,
max_val: ivy.Container = 1.0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.hardtanh.This method
simply wraps the function, so the docstring for ivy.elu also applies to
this method with minimal changes.
Parameters
----------
self
input container.
min_val
minimum value of the linear region range. Default: -1.
max_val
maximum value of the linear region range. Default: 1.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the hardtanh unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0.39, -2.0]), b=ivy.array([2., -0.2]))
>>> y = ivy.Container.hardtanh(x)
>>> print(y)
{
a: ivy.array([0.389999, -1.]),
b: ivy.array([1., -0.2])
}
"""
return self._static_hardtanh(
self,
max_val=max_val,
min_val=min_val,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_tanhshrink(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.tanhshrink. This method
simply wraps the function, and so the docstring for ivy.tanhshrink also
applies to this method with minimal changes.
Parameters
----------
x
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the tanhshrink activation unit function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = ivy.Container._static_tanhshrink(x)
>>> print(y)
{
a: ivy.array([0.23840582, -0.36634541]),
b: ivy.array([0.02005103, -0.00262468])
}
"""
return ContainerBase.cont_multi_map_in_function(
"tanhshrink",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def tanhshrink(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.tanhshrink. This method
simply wraps the function, and so the docstring for ivy.tanhshrink also
applies to this method with minimal changes.
Parameters
----------
self
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the tanhshrink activation unit function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = x.tanhshrink()
>>> print(y)
{
a: ivy.array([0.23840582, -0.36634541]),
b: ivy.array([0.02005103, -0.00262468])
}
"""
return self._static_tanhshrink(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_threshold(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
threshold: ivy.Container,
value: ivy.Container,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.threshold. This method
simply wraps the function, and so the docstring for ivy.threshold also
applies to this method with minimal changes.
Parameters
----------
x
input container.
threshold
threshold value for thresholding operation.
value
value to replace with if thresholding condition is not met.
key_chains
The key-chains to apply or not apply the method to.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
map_sequences
Whether to also map method to sequences (lists, tuples).
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the threshold activation unit function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = x._static_threshold(threshold=0.5, value=0.0)
>>> print(y)
{
a: ivy.array([1., 0.]),
b: ivy.array([0., 0.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"threshold",
x,
threshold=threshold,
value=value,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def threshold(
self: ivy.Container,
/,
*,
threshold: ivy.Container,
value: ivy.Container,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.threshold. This method
simply wraps the function, and so the docstring for ivy.threshold also
applies to this method with minimal changes.
Parameters
----------
self
input container.
threshold
threshold value for thresholding operation.
value
value to replace with if thresholding condition is not met.
key_chains
The key-chains to apply or not apply the method to.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
map_sequences
Whether to also map method to sequences (lists, tuples).
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the threshold activation unit function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = x.threshold(threshold=0.5, value=0.0)
>>> print(y)
{
a: ivy.array([1., 0.]),
b: ivy.array([0., 0.])
}
"""
return self._static_threshold(
self,
threshold=threshold,
value=value,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_softshrink(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
lambd: ivy.Container = 0.5,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = False,
prune_unapplied: Union[bool, ivy.Container] = True,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.softshrink. This method
simply wraps the function, and so the docstring for ivy.softshrink also
applies to this method with minimal changes.
Parameters
----------
x
input container.
lambd
Lambda value for soft shrinkage calculation.
key_chains
The key-chains to apply or not apply the method to.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
map_sequences
Whether to also map method to sequences (lists, tuples).
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Container with soft shrinkage applied to the leaves.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1., -2.]), b=ivy.array([0.4, -0.2]))
>>> y = ivy.Container._static_softshrink(x)
>>> print(y)
{
a: ivy.array([0.5, -1.5]),
b: ivy.array([0., 0.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"softshrink",
x,
lambd=lambd,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def softshrink(
self: ivy.Container,
/,
*,
lambd: ivy.Container = 0.5,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = False,
prune_unapplied: Union[bool, ivy.Container] = True,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""Apply the soft shrinkage function element-wise.
Parameters
----------
self
Input container.
lambd
Lambda value for soft shrinkage calculation.
key_chains
The key-chains to apply or not apply the method to.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
map_sequences
Whether to also map method to sequences (lists, tuples).
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Container with soft shrinkage applied to the leaves.
Examples
--------
>>> import ivy.numpy as np
>>> x = ivy.Container(a=np.array([1., -2.]), b=np.array([0.4, -0.2]))
>>> y = ivy.Container.softshrink(x)
>>> print(y)
{
a: ivy.array([0.5, -1.5]),
b: ivy.array([0., 0.])
}
"""
return self._static_softshrink(
self,
lambd=lambd,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_celu(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
alpha: ivy.Container = 1.0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.celu. This method simply
wraps the function, and so the docstring for ivy.celu also applies to
this method with minimal changes.
Parameters
----------
x
input container.
alpha
array or scalar specifying the alpha value for CELU formlation.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the celu unit function applied element-wise.
Examples
--------
>>> x = x = ivy.Container(a=ivy.array([0.39, -0.85]), b=ivy.array([1., -0.2]))
>>> y = ivy.Container.static_celu(x)
>>> print(y)
{
a: ivy.array([0.38999999, -0.17]),
b: ivy.array([1., -0.04])
}
"""
return ContainerBase.cont_multi_map_in_function(
"celu",
x,
alpha=alpha,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
def celu(
self: ivy.Container,
/,
*,
alpha: ivy.Container = 1.0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.leaky_relu. This method
simply wraps the function, and so the docstring for ivy.leaky_relu also
applies to this method with minimal changes.
Parameters
----------
self
input container.
alpha
array or scalar specifying alpha (negative slope) value for CELU
formulation.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the celu unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0.39, -0.85]), b=ivy.array([1., -0.2]))
>>> y = x.celu()
>>> print(y)
{
a: ivy.array([0.38999999, -0.57]),
b: ivy.array([1., -0.18])
}
"""
return self._static_celu(
self,
alpha=alpha,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
@staticmethod
def _static_scaled_tanh(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
alpha: Union[float, ivy.Container] = 1.7159,
beta: Union[float, ivy.Container] = 0.67,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.scaled_tanh. This method
simply wraps the function, and so the docstring for ivy.scaled_tanh
also applies to this method with minimal changes.
Parameters
----------
x
input container.
alpha
The scaling parameter for the output.
Determines the amplitude of the tanh function.
Default: 1.7159
beta
The scaling parameter for the input.
Determines the slope of the tanh function.
Default: 0.67
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the scaled_tanh function applied.
Examples
--------
>>> x = ivy.Container(a=ivy.array([8.931, -0.85]), b=ivy.array([1., -0.2])))
>>> y = ivy.Container._static_scaled_tanh(x)
>>> y
{
a: ivy.array([1.71587813, -0.88367474]),
b: ivy.array([1.00376701, -0.2285642])
}
>>> x = ivy.Container(a=ivy.array([8.9, -8.9]), b=ivy.array([3., 33.2]))
>>> y = ivy.Container._static_scaled_tanh(x, alpha=2, beta=2.5)
>>> y
{
a: ivy.array([2., -2.]),
b: ivy.array([1.99999881, 2.])
}
>>> x = ivy.Container(a=ivy.array([0.3, -0.3]), b=ivy.array([33.0, -33.0]))
>>> y = ivy.Container._static_scaled_tanh(x, alpha=1.5, beta=25)
>>> y
{
a: ivy.array([1.49999905, -1.49999905]),
b: ivy.array([1.5, -1.5])
}
"""
return ContainerBase.cont_multi_map_in_function(
"scaled_tanh",
x,
alpha=alpha,
beta=beta,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def scaled_tanh(
self: ivy.Container,
/,
*,
alpha: Union[float, ivy.Container] = 1.7159,
beta: Union[float, ivy.Container] = 0.67,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.scaled_tanh. This
method simplywraps the function, and so the docstring for
ivy.scaled_tanh also applies to this method with minimal changes.
Parameters
----------
x
input container.
alpha
The scaling parameter for the output.
Determines the amplitude of the tanh function.
Default: 1.7159
beta
The scaling parameter for the input.
Determines the slope of the tanh function.
Default: 0.67
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the scaled_tanh function applied.
Examples
--------
>>> x = ivy.Container(a=ivy.array([2., 3.]), b=ivy.array([1., 2.]))
>>> x.scaled_tanh()
{
a: ivy.array([1.49570239, 1.65537548]),
b: ivy.array([1.00376701, 1.49570239])
}
>>> x = ivy.Container(a=ivy.array([1., 1.]), b=ivy.array([1., 1.]))
>>> x.scaled_tanh(alpha=30)
{
a: ivy.array([17.54939651, 17.54939651]),
b: ivy.array([17.54939651, 17.54939651])
}
>>> x = ivy.Container(a=ivy.array([20., 21.]), b=ivy.array([3., 1.]))
>>> x.scaled_tanh(alpha=0.1, beta=-0.4)
{
a: ivy.array([-0.09999998, -0.09999999]),
b: ivy.array([-0.08336546, -0.0379949])
}
"""
return self._static_scaled_tanh(
self,
alpha=alpha,
beta=beta,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_hardshrink(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
lambd: ivy.Container = 0.5,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = False,
prune_unapplied: Union[bool, ivy.Container] = True,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.hardshrink. This method
simply wraps the function, and so the docstring for ivy.hardshrink also
applies to this method with minimal changes.
Parameters
----------
x
input container.
lambd
Lambda value for hard shrinkage calculation.
key_chains
The key-chains to apply or not apply the method to.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
map_sequences
Whether to also map method to sequences (lists, tuples).
Returns
-------
ret
Container with hard shrinkage applied to the leaves.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1., -2.]), b=ivy.array([0.4, -0.2]))
>>> y = ivy.Container._static_hardshrink(x)
>>> print(y)
{
a: ivy.array([1., -2.]),
b: ivy.array([0., 0.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"hardshrink",
x,
lambd=lambd,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def hardshrink(
self: ivy.Container,
/,
*,
lambd: ivy.Container = 0.5,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = False,
prune_unapplied: Union[bool, ivy.Container] = True,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""Apply the hard shrinkage function element-wise.
Parameters
----------
self
Input container.
lambd
Lambda value for hard shrinkage calculation.
key_chains
The key-chains to apply or not apply the method to.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
map_sequences
Whether to also map method to sequences (lists, tuples).
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Container with hard shrinkage applied to the leaves.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1., -2.]), b=ivy.array([0.4, -0.2]))
>>> y = ivy.Container.hardshrink(x)
>>> print(y)
{
a: ivy.array([1., -2.]),
b: ivy.array([0., 0.])
}
"""
return self._static_hardshrink(
self,
lambd=lambd,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_hardsilu(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method which acts as a wrapper for
ivy.hardsilu.
Parameters
----------
x
input container
key_chains
The keychains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
a container containing the output of the hardsilu/hardswish function applied
to each element in ``x``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-0.5, -1, 0]), b=ivy.array([0.5, 1., 2]))
>>> y = ivy.Container._static_hardsilu(x)
>>> print(y)
{
a: ivy.array([-0.20833333, -0.33333334, 0.]),
b: ivy.array([0.29166666, 0.66666669, 1.66666663])
}
"""
return ContainerBase.cont_multi_map_in_function(
"hardsilu",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def hardsilu(
self,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method which acts as a wrapper for
ivy.hardsilu.
Parameters
----------
self
input container
key_chains
The keychains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
a container containing the output of the hardsilu/hardswish function applied
to each element in the input container.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-0.5, -1, 0]), b=ivy.array([0.5, 1., 2]))
>>> y = x.hardsilu()
>>> print(y)
{
a: ivy.array([-0.20833333, -0.33333334, 0.]),
b: ivy.array([0.29166666, 0.66666669, 1.66666663])
}
"""
return self._static_hardsilu(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
| ivy/ivy/data_classes/container/experimental/activations.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/activations.py",
"repo_id": "ivy",
"token_count": 33000
} | 11 |
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithSetExperimental(ContainerBase):
pass
| ivy/ivy/data_classes/container/experimental/set.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/set.py",
"repo_id": "ivy",
"token_count": 33
} | 12 |
# global
from typing import Optional, Union, List, Dict, Sequence
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
# ToDo: implement all methods here as public instance methods
class _ContainerWithStatistical(ContainerBase):
@staticmethod
def _static_min(
x: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
initial: Optional[Union[int, float, complex, ivy.Container]] = None,
where: Optional[Union[ivy.Array, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
):
"""ivy.Container static method variant of ivy.min. This method simply
wraps the function, and so the docstring for ivy.min also applies to
this method with minimal changes.
Parameters
----------
self
Input container. Should have a real-valued data type.
axis
axis or axes along which minimum values must be computed.
By default, the minimum value must be computed over the
entire array. If a tuple of integers, minimum values must
be computed over multiple axes. Default: ``None``.
keepdims
optional boolean, if ``True``, the reduced axes
(dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result
must be compatible with the input array
(see :ref:`broadcasting`). Otherwise, if ``False``, the
reduced axes (dimensions) must not be included in the
result. Default: ``False``.
initial
The maximum value of an output element.
Must be present to allow computation on empty slice.
where
Elements to compare for minimum
out
optional output array, for writing the result to.
Returns
-------
ret
if the minimum value was computed over the entire array,
a zero-dimensional array containing the minimum value;
otherwise, a non-zero-dimensional array containing the
minimum values. The returned array must have the same data type
as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>> > x = ivy.Container(a=ivy.array([1, 2, 3]), \
b=ivy.array([2, 3, 4]))
>> > z = x.min()
>> > print(z)
{
a: ivy.array(1),
b: ivy.array(2)
}
>>> x = ivy.Container(a=ivy.array([[1, 2, 3],[-1,0,2]]),
... b=ivy.array([[2, 3, 4], [0, 1, 2]]))
>>> z = x.min(axis=1)
>>> print(z)
{
a:ivy.array([1,-1]),
b:ivy.array([2,0])
}
"""
return ContainerBase.cont_multi_map_in_function(
"min",
x,
axis=axis,
keepdims=keepdims,
initial=initial,
where=where,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def min(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
initial: Optional[Union[int, float, complex, ivy.Container]] = None,
where: Optional[Union[ivy.Array, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.min. This method simply
wraps the function, and so the docstring for ivy.min also applies to
this method with minimal changes.
Parameters
----------
self
Input container. Should have a real-valued data type.
axis
axis or axes along which minimum values must be computed.
By default, the minimum value must be computed over the
entire array. If a tuple of integers, minimum values must
be computed over multiple axes. Default: ``None``.
keepdims
optional boolean, if ``True``, the reduced axes
(dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result
must be compatible with the input array
(see :ref:`broadcasting`). Otherwise, if ``False``, the
reduced axes (dimensions) must not be included in the
result. Default: ``False``.
initial
The maximum value of an output element.
Must be present to allow computation on empty slice.
where
Elements to compare for minimum
out
optional output array, for writing the result to.
Returns
-------
ret
if the minimum value was computed over the entire array,
a zero-dimensional array containing the minimum value;
otherwise, a non-zero-dimensional array containing the
minimum values. The returned array must have the same data type
as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>> > x = ivy.Container(a=ivy.array([1, 2, 3]), \
b=ivy.array([2, 3, 4]))
>> > z = x.min()
>> > print(z)
{
a: ivy.array(1),
b: ivy.array(2)
}
>>> x = ivy.Container(a=ivy.array([[1, 2, 3],[-1,0,2]]),
... b=ivy.array([[2, 3, 4], [0, 1, 2]]))
>>> z = x.min(axis=1)
>>> print(z)
{
a:ivy.array([1,-1]),
b:ivy.array([2,0])
}
"""
return self._static_min(
self,
axis=axis,
keepdims=keepdims,
initial=initial,
where=where,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def max(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.max. This method simply
wraps the function, and so the docstring for ivy.max also applies to
this method with minimal changes.
Parameters
----------
self
Input container. Should have a real-valued data type.
axis
axis or axes along which max values must be computed.
By default, the maximum value must be computed over
the entire array. If a tuple of integers, maximum values
must be computed over multiple axes. Default: ``None``.
keepdims
optional boolean, if ``True``, the reduced axes (dimensions)
must be included in the result as singleton dimensions,
and, accordingly, the result must be compatible with the
input array (see :ref:`broadcasting`). Otherwise, if ``False``,
the reduced axes (dimensions) must not be included in the
result. Default: ``False``.
out
optional output array, for writing the result to.
Returns
-------
ret
if the maximum value was computed over the entire array, a zero-dimensional
array containing the maximum value; otherwise, a non-zero-dimensional array
containing the maximum values. The returned array must have the same
data type as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>> > x = ivy.Container(a=ivy.array([1, 2, 3]), \
b=ivy.array([2, 3, 4]))
>> > z = x.max()
>> > print(z)
{
a: ivy.array(3),
b: ivy.array(4)
}
>>> x = ivy.Container(a=ivy.array([[1, 2, 3],[-1,0,2]]),
... b=ivy.array([[2, 3, 4], [0, 1, 2]]))
>>> z = x.max(axis=1)
>>> print(z)
{
a: ivy.array([3, 2]),
b: ivy.array([4, 2])
}
"""
return self.cont_handle_inplace(
self.cont_map(
lambda x_, _: (
ivy.max(x_, axis=axis, keepdims=keepdims)
if ivy.is_array(x_)
else x_
),
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
),
out=out,
)
def mean(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.mean. This method
simply wraps the function, and so the docstring for ivy.mean also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a floating-point data type.
axis
axis or axes along which arithmetic means must be computed. By default,
the mean must be computed over the entire array. If a Sequence of
integers, arithmetic means must be computed over multiple axes.
Default: ``None``.
keepdims
bool, if ``True``, the reduced axes (dimensions) must be included in the
result as singleton dimensions, and, accordingly, the result must be
compatible with the input array (see :ref:`broadcasting`). Otherwise,
if ``False``, the reduced axes (dimensions) must not be included in
the result. Default: ``False``.
key_chains
The key-chains to apply or not apply the method to.
Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was
not applied. Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
container, if the arithmetic mean was computed over the entire array,
a zero-dimensional array containing the arithmetic mean;
otherwise, a non-zero-dimensional array containing the arithmetic
means. The returned array must have the same data type as ``self``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = x.mean()
>>> print(y)
{
a: ivy.array(1.),
b: ivy.array(4.)
}
>>> x = ivy.Container(a=ivy.array([0.1, 1.1]), b=ivy.array([0.1, 1.1, 2.1]))
>>> y = x.mean(keepdims=True)
>>> print(y)
{
a: ivy.array([0.60000002]),
b: ivy.array([1.10000002])
}
>>> x = ivy.Container(a=ivy.array([[0.1, 1.1]]), b=ivy.array([[2., 4.]]))
>>> y = x.mean(axis=1, keepdims=True)
>>> print(y)
{
a: ivy.array([[0.60000002]]),
b: ivy.array([[3.]])
}
>>> x = ivy.Container(a=ivy.array([-1., 0., 1.]), b=ivy.array([1.1, 0.2, 1.4]))
>>> x.mean(out=x)
>>> print(x)
{
a: ivy.array(0.),
b: ivy.array(0.9)
}
>>> x = ivy.Container(a=ivy.array([0., -1., 1.]), b=ivy.array([1., 1., 1.]))
>>> y = ivy.Container(a=ivy.array(0.), b=ivy.array(0.))
>>> x.mean(out=y)
>>> print(y)
{
a: ivy.array(0.),
b: ivy.array(1.)
}
>>> x = ivy.Container(a=ivy.array([[0., 1., 2.], [3., 4., 5.]]),
... b=ivy.array([[3., 4., 5.], [6., 7., 8.]]))
>>> x.mean(axis=0, out=x)
>>> print(x)
{
a: ivy.array([1.5, 2.5, 3.5]),
b: ivy.array([4.5, 5.5, 6.5])
}
>>> x = ivy.Container(a=ivy.array([[1., 1., 1.], [2., 2., 2.]]),
... b=ivy.array([[3., 3., 3.], [4., 4., 4.]]))
>>> y = ivy.mean(x, axis=1)
>>> print(y)
{
a: ivy.array([1., 2.]),
b: ivy.array([3., 4.])
}
"""
return self.cont_handle_inplace(
self.cont_map(
lambda x_, _: (
ivy.mean(x_, axis=axis, keepdims=keepdims)
if ivy.is_array(x_)
else x_
),
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
),
out=out,
)
def var(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
correction: Union[int, float, ivy.Container] = 0.0,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.var. This method simply
wraps the function, and so the docstring for ivy.var also applies to
this method with minimal changes.
Parameters
----------
self
input container. Should have a floating-point data type.
axis
axis or axes along which variances must be computed. By default, the
variance must be computed over the entire array for each array in the input
container. If a tuple of integers, variances must be computed over
multiple axes. Default: ``None``.
correction
degrees of freedom adjustment. Setting this parameter to a value other than
0 has the effect of adjusting the divisor during the calculation of the
variance according to N-c where N corresponds to the total number of
elements over which the variance is computed and c corresponds to the
provided degrees of freedom adjustment. When computing the variance of a
population, setting this parameter to 0 is the standard choice (i.e.,
the provided array contains data constituting an entire population).
When computing the unbiased sample variance, setting this parameter to 1
is the standard choice (i.e., the provided array contains data sampled from
a larger population; this is commonly referred to as Bessel's correction).
Default: ``0``.
keepdims
if True, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible
with the input array (see Broadcasting). Otherwise, if False, the
reduced axes (dimensions) must not be included in the result.
Default: ``False``.
input array. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to.
Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not
applied. Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
a container containing different arrays depends on parameters. see below
for the types of arrays in the returned container if the variance was
computed over the entire array, a zero-dimensional array containing the
variance; otherwise, a non-zero-dimensional array containing the variances.
The returned container must have the same data type as self.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0.0, 1.0, 2.0]),
... b=ivy.array([3.0, 4.0, 5.0]))
>>> y = x.var()
>>> print(y)
{
a: ivy.array(0.6666667),
b: ivy.array(0.6666667)
}
>>> x = ivy.Container(a=ivy.array([0.0, 1.0, 2.0]),
... b=ivy.array([3.0, 4.0, 5.0]))
>>> y = ivy.Container(a=ivy.array(0.), b=ivy.array(0.))
>>> x.var(out=y)
>>> print(y)
{
a: ivy.array(0.6666667),
b: ivy.array(0.6666667)
}
>>> x = ivy.Container(a=ivy.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]),
... b=ivy.array([[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]]))
>>> y = ivy.Container(a=ivy.array([0., 0., 0.]), b=ivy.array([0., 0., 0.]))
>>> x.var(axis=0, out=y)
>>> print(y)
{
a: ivy.array([2.25, 2.25, 2.25]),
b: ivy.array([2.25, 2.25, 2.25])
}
"""
return self.cont_handle_inplace(
self.cont_map(
lambda x_, _: (
ivy.var(x_, axis=axis, correction=correction, keepdims=keepdims)
if ivy.is_array(x_)
else x_
),
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
),
out=out,
)
@staticmethod
def _static_var(
x: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
correction: Union[int, float, ivy.Container] = 0.0,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.var. This method simply
wraps the function, and so the docstring for ivy.var also applies to
this method with minimal changes.
Parameters
----------
x
input array. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to.
Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was
not applied. Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
if the variance was computed over the entire array,
a zero-dimensional array containing the variance;
otherwise, a non-zero-dimensional array containing the
variances. The returned array must have the same data
type as x.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0.1, 0.2, 0.9]),
... b=ivy.array([0.7, 0.1, 0.9]))
>>> y = ivy.Container.static_var(x)
>>> print(y)
{
a:ivy.array(0.12666667),
b:ivy.array(0.11555555)
}
"""
return ContainerBase.cont_multi_map_in_function(
"var",
x,
key_chains=key_chains,
axis=axis,
correction=correction,
keepdims=keepdims,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_prod(
x: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
):
"""ivy.Container static method variant of ivy.prod. This method simply
wraps the function, and so the docstring for ivy.prod also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a floating-point data type.
axis
axis or axes along which products must be computed. By
default, the product must be computed over the entire
array. If a tuple of integers, products must be
computed over multiple axes. Default: ``None``.
keepdims
bool, if True, the reduced axes (dimensions) must be
included in the result as singleton dimensions, and,
accordingly, the result must be compatible with the
input array (see Broadcasting). Otherwise, if False,
the reduced axes (dimensions) must not be included
in the result. Default: ``False``.
dtype
data type of the returned array.
out
optional output array, for writing the result to.
key_chains
The key-chains to apply or not apply the method to.
Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was
not applied. Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
container, if the product was computed over the entire
array, a zero-dimensional array containing the product;
otherwise, a non-zero-dimensional array containing the
products. The returned array must have the same data type
as ``self``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = ivy.Container.static_prod(x)
>>> print(y)
{
a: ivy.array(0.),
b: ivy.array(60.)
}
>>> x = ivy.Container(a=ivy.array([0.1, 1.1]), b=ivy.array([0.1, 1.1, 2.1]))
>>> y = ivy.Container.static_prod(x, keepdims=True)
>>> print(y)
{
a: ivy.array([0.11000001]),
b: ivy.array([0.23100001])
}
>>> x = ivy.Container(a=ivy.array([[2, 1]]), b=ivy.array([[2, 3]]))
>>> y = ivy.Container.static_prod(x, axis=1, keepdims=True)
>>> print(y)
{
a: ivy.array([[2]]),
b: ivy.array([[6]])
}
>>> x = ivy.Container(a=ivy.array([-1, 0, 1]), b=ivy.array([1.1, 0.2, 1.4]))
>>> ivy.Container.static_prod(x, out=x)
>>> print(x)
{
a: ivy.array(0),
b: ivy.array(0.30800003)
}
>>> x = ivy.Container(a=ivy.array([0., -1., 1.]), b=ivy.array([1., 1., 1.]))
>>> y = ivy.Container(a=ivy.array(0.), b=ivy.array(0.))
>>> ivy.Container.static_prod(x, out=y)
>>> print(y)
{
a: ivy.array(-0.),
b: ivy.array(1.)
}
>>> x = ivy.Container(a=ivy.array([[0., 1., 2.], [3., 4., 5.]]),
... b=ivy.array([[3., 4., 5.], [6., 7., 8.]]))
>>> ivy.Container.static_prod(x, axis=0, out=x)
>>> print(x)
{
a: ivy.array([0., 4., 10.]),
b: ivy.array([18., 28., 40.])
}
>>> x = ivy.Container(a=ivy.array([[1., 1., 1.], [2., 2., 2.]]),
... b=ivy.array([[3., 3., 3.], [4., 4., 4.]]))
>>> y = ivy.Container.static_prod(x, axis=1)
>>> print(y)
{
a: ivy.array([1., 8.]),
b: ivy.array([27., 64.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"prod",
x,
axis=axis,
dtype=dtype,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def prod(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.prod. This method
simply wraps the function, and so the docstring for ivy.prod also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a floating-point data type.
axis
axis or axes along which products must be computed. By
default, the product must be computed over the entire
array. If a tuple of integers, products must be
computed over multiple axes. Default: ``None``.
keepdims
bool, if True, the reduced axes (dimensions) must be
included in the result as singleton dimensions, and,
accordingly, the result must be compatible with the
input array (see Broadcasting). Otherwise, if False,
the reduced axes (dimensions) must not be included
in the result. Default: ``False``.
dtype
data type of the returned array.
out
optional output array, for writing the result to.
key_chains
The key-chains to apply or not apply the method to.
Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was
not applied. Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
container, if the product was computed over the entire
array, a zero-dimensional array containing the product;
otherwise, a non-zero-dimensional array containing the
products. The returned array must have the same data type
as ``self``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = x.prod()
>>> print(y)
{
a: ivy.array(0.),
b: ivy.array(60.)
}
>>> x = ivy.Container(a=ivy.array([0.1, 1.1]), b=ivy.array([0.1, 1.1, 2.1]))
>>> y = x.prod(keepdims=True)
>>> print(y)
{
a: ivy.array([0.11000001]),
b: ivy.array([0.23100001])
}
>>> x = ivy.Container(a=ivy.array([[2, 1]]), b=ivy.array([[2, 3]]))
>>> y = x.prod(axis=1, keepdims=True)
>>> print(y)
{
a: ivy.array([[2]]),
b: ivy.array([[6]])
}
>>> x = ivy.Container(a=ivy.array([-1, 0, 1]), b=ivy.array([1.1, 0.2, 1.4]))
>>> y = ivy.Container(a=ivy.array(0.), b=ivy.array(0.))
>>> x.prod(out=y)
>>> print(y)
{
a: ivy.array(0),
b: ivy.array(0.30800003)
}
>>> x = ivy.Container(a=ivy.array([0., -1., 1.]), b=ivy.array([1., 1., 1.]))
>>> y = ivy.Container(a=ivy.array(0.), b=ivy.array(0.))
>>> x.prod(out=y)
>>> print(y)
{
a: ivy.array(-0.),
b: ivy.array(1.)
}
>>> x = ivy.Container(a=ivy.array([[0., 1., 2.], [3., 4., 5.]]),
... b=ivy.array([[3., 4., 5.], [6., 7., 8.]]))
>>> y = ivy.Container(a=ivy.zeros(3), b=ivy.zeros(3))
>>> x.prod(axis=0, out=y)
>>> print(y)
{
a: ivy.array([0., 4., 10.]),
b: ivy.array([18., 28., 40.])
}
>>> x = ivy.Container(a=ivy.array([[1., 1., 1.], [2., 2., 2.]]),
... b=ivy.array([[3., 3., 3.], [4., 4., 4.]]))
>>> y = x.prod(axis=1)
>>> print(y)
{
a: ivy.array([1., 8.]),
b: ivy.array([27., 64.])
}
"""
return self._static_prod(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_sum(
x: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"sum",
x,
axis=axis,
dtype=dtype,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def sum(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return self._static_sum(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def std(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
correction: Union[int, float, ivy.Container] = 0.0,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.std. This method simply
wraps the function, and so the docstring for ivy.std also applies to
this method with minimal changes.
Parameters
----------
self
input container.
axis
axis or axes along which standard deviation must be computed.
By default, the product must be computed over the entire
array. If a tuple of integers, products must be
computed over multiple axes. Default: ``None``.
correction
degrees of freedom adjustment. Setting this parameter to a
value other than ``0`` has the effect of adjusting the
divisor during the calculation of the standard deviation
according to ``N-c`` where ``N`` corresponds to the total
number of elements over which the standard deviation is
computed and ``c`` corresponds to the provided degrees of
freedom adjustment. When computing the standard deviation
of a population, setting this parameter to ``0`` is the
standard choice (i.e., the provided array contains data
constituting an entire population). When computing
the corrected sample standard deviation, setting this
parameter to ``1`` is the standard choice (i.e., the
provided array contains data sampled from a larger
population; this is commonly referred to as Bessel's
correction). Default: ``0``.
keepdims
bool, if True, the reduced axes (dimensions) must be
included in the result as singleton dimensions, and,
accordingly, the result must be compatible with the
input array (see Broadcasting). Otherwise, if False,
the reduced axes (dimensions) must not be included
in the result. Default: ``False``.
out
optional output array, for writing the result to.
key_chains
The key-chains to apply or not apply the method to.
Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was
not applied. Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
container, if the standard deviation was computed over the
entire array, a zero-dimensional array containing the
standard deviation; otherwise, a non-zero-dimensional array
containing the respectve standard deviations. The returned
array must have the same data type as ``self``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 2.]), b=ivy.array([-4., 5.]))
>>> y = x.std()
>>> print(y)
{
a: ivy.array(1.),
b: ivy.array(4.5)
}
>>> x = ivy.Container(a=ivy.array([0.1, 1.1]), b=ivy.array([0.1, 1.1, 2.1]))
>>> y = x.std(keepdims=True)
>>> print(y)
{
a: ivy.array([0.5]),
b: ivy.array([0.81649649])
}
>>> x = ivy.Container(a=ivy.array([[2., 1.]]), b=ivy.array([[2., -2.]]))
>>> y = x.std(axis=1, keepdims=True)
>>> print(y)
{
a: ivy.array([[0.5]]),
b: ivy.array([[2.]])
}
>>> x = ivy.Container(a=ivy.array([-1., 1., 1.]), b=ivy.array([1.1, 0.2, 1.4]))
>>> x.std(out=x)
>>> print(x)
{
a: ivy.array(0.94280904),
b: ivy.array(0.509902)
}
>>> x = ivy.Container(a=ivy.array([0., -2., 1.]), b=ivy.array([1., 1., 1.]))
>>> y = ivy.Container(a=ivy.array(0.), b=ivy.array(0.))
>>> x.std(out=y)
>>> print(y)
{
a: ivy.array(1.2472192),
b: ivy.array(0.)
}
>>> x = ivy.Container(a=ivy.array([[-1., 1., 2.], [2., 2., 2.]]),
... b=ivy.array([[3., 0., -3.], [4., 1., 4.]]))
>>> y = x.std(axis=1)
>>> print(y)
{
a: ivy.array([1.2472192, 0.]),
b: ivy.array([2.44948983, 1.41421354])
}
"""
return self.cont_handle_inplace(
self.cont_map(
lambda x_, _: (
ivy.std(x_, axis=axis, correction=correction, keepdims=keepdims)
if ivy.is_array(x_)
else x_
),
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
),
out=out,
)
# Extra #
# ----- #
@staticmethod
def _static_cumsum(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
axis: Union[int, ivy.Container] = 0,
exclusive: Union[bool, ivy.Container] = False,
reverse: Union[bool, ivy.Container] = False,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.cumsum. This method
simply wraps the function, and so the docstring for ivy.cumsum also
applies to this method with minimal changes.
Parameters
----------
x
Input array or container to apply cumsum.
axis
Axis along which the cumulative sum is computed. Default is ``0``.
exclusive
Whether to perform cumsum exclusively. Default is ``False``.
reverse
Whether to perform the cumsum from last to first element in the selected
axis. Default is ``False`` (from first to last element)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
dtype
Data type of the returned array. Default is ``None``.
out
Optional output container. Default is ``None``.
Returns
-------
ret
Container whose leaves hold the result of applying cumsum
at each original leaf arrays along the specified axis.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[1, 2, 3], [2, 4, 5]]),
... b=ivy.array([[4, 5, 6], [2, 3, 1 ]]))
>>> y = ivy.Container.static_cumsum(x, axis=0)
>>> print(y)
{
a: ivy.array([[1, 2, 3],
[3, 6, 8]]),
b: ivy.array([[4, 5, 6],
[6, 8, 7]])
}
>>> x = ivy.Container(a=ivy.array([[1, 3, 5]]),
... b=ivy.array([[3, 5, 7]]))
>>> y = ivy.Container.static_cumsum(x, axis=0,
... exclusive=False, reverse=True, dtype='float32')
>>> print(y)
{
a: ivy.array([[1., 3., 5.]]),
b: ivy.array([[3., 5., 7.]])
}
>>> x = ivy.Container(a=ivy.array([[1, 3, 4]]),
... b=ivy.array([[3, 5, 8],
... [5, 6, 5]]),
... c=ivy.array([[2, 4, 1],
... [3, 6, 9],
... [0, 2, 3]]))
>>> y = ivy.Container(a = ivy.zeros((1, 3)),
... b = ivy.zeros((2, 3)),
... c = ivy.zeros((3,3)))
>>> ivy.cumsum(x,axis=1,exclusive=True, reverse=False, out=y)
>>> print(y)
{
a: ivy.array([[0, 1, 4]]),
b: ivy.array([[0, 3, 8],
[0, 5, 11]]),
c: ivy.array([[0, 2, 6],
[0, 3, 9],
[0, 0, 2]])
}
>>> x = ivy.Container(a=ivy.array([[1, 3, 4], [5, 7, 8], [9, 10, 11]]),
... b=ivy.array([[3, 4, 5], [4, 5, 6], [5, 6, 7]]))
>>> y = ivy.Container(a= ivy.zeros((3, 3)), b= ivy.zeros((3, 3)))
>>> ivy.Container.static_cumsum(x, axis=1, exclusive=True, reverse=True, out=y)
>>> print(y)
{
a: ivy.array([[7, 4, 0],
[15, 8, 0],
[21, 11, 0]]),
b: ivy.array([[9, 5, 0],
[11, 6, 0],
[13, 7, 0]])
}
>>> x = ivy.Container(a=ivy.array([[1],
... [1]]),
... b=ivy.array([[6, 8, 7],
... [2, 0, 1]]),
... c=ivy.array([[1, 2],
... [3, 4],
... [6, 4]]))
>>> ivy.Container.static_cumsum(x, axis=0, out=x)
>>> print(x)
{
a: ivy.array([[1],
[2]]),
b: ivy.array([[6, 8, 7],
[8, 8, 8]]),
c: ivy.array([[1, 2],
[4, 6],
[10, 10]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"cumsum",
x,
axis=axis,
exclusive=exclusive,
reverse=reverse,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
def cumsum(
self: ivy.Container,
axis: Union[int, ivy.Container] = 0,
exclusive: Union[bool, ivy.Container] = False,
reverse: Union[bool, ivy.Container] = False,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.cumsum. This method
simply wraps the function, and so the docstring for ivy.cumsum also
applies to this method with minimal changes.
Parameters
----------
self
Input container to apply cumsum at leaves.
axis
Axis along which the cumulative sum is computed. Default is ``0``.
exclusive
Whether to perform cumsum exclusively. Default is ``False``.
reverse
Whether to perform the cumsum from last to first element in the selected
axis. Default is ``False`` (from first to last element)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
dtype
Data type of the returned array. Default is ``None``.
out
Optional output container. Default is ``None``.
Returns
-------
ret
Container whose leaves hold the result of applying cumsum
at each original leaf arrays along the specified axis.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[1, 2, 3],
... [2, 4, 5]]),
... b=ivy.array([[4, 5, 6],
... [2, 3, 1 ]]))
>>> y = x.cumsum(axis=0, dtype='float64')
>>> print(y)
{
a: ivy.array([[1., 2., 3.],
[3., 6., 8.]]),
b: ivy.array([[4., 5., 6.],
[6., 8., 7.]])
}
>>> x = ivy.Container(a=ivy.array([[1, 3, 4],
... [5, 7, 8],
... [9, 10, 11]]),
... b=ivy.array([[3, 4, 5],
... [4, 5, 6],
... [5, 6, 7]]))
>>> y = ivy.Container(a= ivy.zeros((3, 3)), b= ivy.zeros((3, 3)))
>>> x.cumsum(axis=1, exclusive=False, reverse=True, out=y)
>>> print(y)
{
a: ivy.array([[8, 7, 4],
[20, 15, 8],
[30, 21, 11]]),
b: ivy.array([[12, 9, 5],
[15, 11, 6],
[18, 13, 7]])
}
>>> x = ivy.Container(a=ivy.array([[1, 3, 4]]),
... b=ivy.array([[3, 5, 8],
... [5, 6, 5]]),
... c=ivy.array([[2, 4, 1],
... [3, 6, 9],
... [0, 2, 3]]))
>>> y = ivy.Container(a = ivy.zeros((1, 3)),
... b = ivy.zeros((2, 3)),
... c = ivy.zeros((3,3)))
>>> x.cumsum(axis=1,exclusive=True, reverse=False, out=y)
>>> print(y)
{
a: ivy.array([[0, 1, 4]]),
b: ivy.array([[0, 3, 8],
[0, 5, 11]]),
c: ivy.array([[0, 2, 6],
[0, 3, 9],
[0, 0, 2]])
}
>>> x = ivy.Container(a=ivy.array([[0, 3, 2],
... [5, 10, 2],
... [1, 10, 1]]),
... b=ivy.array([[2, 4, 5],
... [4, 5, 5],
... [0, 1, 3]]))
>>> y = x.cumsum(axis=1,exclusive=True, reverse=True, dtype='int64')
>>> print(y)
{
a: ivy.array([[5, 2, 0],
[12, 2, 0],
[11, 1, 0]]),
b: ivy.array([[9, 5, 0],
[10, 5, 0],
[4, 3, 0]])
}
>>> x = ivy.Container(a=ivy.array([[0],
... [5]]),
... b=ivy.array([[6, 8, 7],
... [4, 2, 3]]),
... c=ivy.array([[1, 2],
... [3, 4],
... [6, 4]]))
>>> x.cumsum(axis=0, out=x)
>>> print(x)
{
a: ivy.array([[0],
[5]]),
b: ivy.array([[6, 8, 7],
[10, 10, 10]]),
c: ivy.array([[1, 2],
[4, 6],
[10, 10]])
}
"""
return self._static_cumsum(
self,
axis=axis,
exclusive=exclusive,
reverse=reverse,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
@staticmethod
def _static_cumprod(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axis: Union[int, ivy.Container] = 0,
exclusive: Union[bool, ivy.Container] = False,
reverse: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.cumprod. This method
simply wraps the function, and so the docstring for ivy.cumprod also
applies to this method with minimal changes.
Parameters
----------
x
Input array or container to cumprod.
axis
Axis to cumprod along. Default is ``0``.
exclusive
Whether to exclude the first element of the input array.
Default is ``False``.
reverse
Whether to perform the cumprod from last to first element in the selected
axis. Default is ``False`` (from first to last element)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
dtype
Data type of the returned array. Default is ``None``.
out
Optional output container. Default is ``None``.
Returns
-------
ret
Containers with arrays cumprod at leaves along specified axis.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([4, 5, 6]))
>>> y = ivy.Container.static_cumprod(x, axis=0)
>>> print(y)
{
a: ivy.array([1, 2, 6]),
b: ivy.array([4, 20, 120])
}
>>> x = ivy.Container(a=ivy.array([[2, 3], [5, 7], [11, 13]]),
b=ivy.array([[3, 4], [4, 5], [5, 6]]))
>>> y = ivy.Container(a = ivy.zeros((3, 2)), b = ivy.zeros((3, 2)))
>>> ivy.Container.static_cumprod(x, axis=1, exclusive=True, out=y)
>>> print(y)
{
a: ivy.array([[1, 2],
[1, 5],
[1, 11]]),
b: ivy.array([[1, 3],
[1, 4],
[1, 5]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"cumprod",
x,
axis=axis,
exclusive=exclusive,
reverse=reverse,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
def cumprod(
self: ivy.Container,
/,
*,
axis: Union[int, ivy.Container] = 0,
exclusive: Union[bool, ivy.Container] = False,
reverse: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.cumprod. This method
simply wraps the function, and so the docstring for ivy.cumprod also
applies to this method with minimal changes.
Parameters
----------
self
Input container to cumprod at leaves.
axis
Axis along which the cumulative product is computed. Default is ``0``.
exclusive
Whether to exclude the first element of the input array.
Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
dtype
Data type of the returned array. Default is ``None``.
out
Optional output container. Default is ``None``.
Returns
-------
ret
Containers with arrays cumprod at leaves along specified axis.
Examples
--------
With one :class:`ivy.Container` instances:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([4, 5, 6]))
>>> y = x.cumprod(axis=0)
>>> print(y)
{
a: ivy.array([1, 2, 6]),
b: ivy.array([4, 20, 120])
}
>>> x = ivy.Container(a=ivy.array([[2, 3], [5, 7], [11, 13]]),
b=ivy.array([[3, 4], [4, 5], [5, 6]]))
>>> y = ivy.Container(a = ivy.zeros((3, 2)), b = ivy.zeros((3, 2)))
>>> x.cumprod(axis=1, exclusive=True, out=y)
{
a: ivy.array([[1, 2],
[1, 5],
[1, 11]]),
b: ivy.array([[1, 3],
[1, 4],
[1, 5]])
}
"""
return self._static_cumprod(
self,
axis=axis,
exclusive=exclusive,
reverse=reverse,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
def einsum(
self: ivy.Container,
equation: Union[str, ivy.Container],
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""
>>> x = ivy.Container(a=ivy.array([[0, 1, 0],[1, 1, 0],[1, 1, 1]]),
... b=ivy.array([[0, 1, 2],[4, 5, 6],[8, 9, 10]]))
>>> y = x.einsum('ii')
>>> print(y)
{
a: ivy.array(2),
b: ivy.array(15)
}
"""
return self.cont_handle_inplace(
self.cont_map(
lambda x_, _: ivy.einsum(equation, x_) if ivy.is_array(x_) else x_,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
),
out=out,
)
| ivy/ivy/data_classes/container/statistical.py/0 | {
"file_path": "ivy/ivy/data_classes/container/statistical.py",
"repo_id": "ivy",
"token_count": 30430
} | 13 |
extern crate bindgen;
use std::env;
use std::path::{Path, PathBuf};
fn make_shared_lib<P: AsRef<Path>>(xla_dir: P) {
let os = env::var("CARGO_CFG_TARGET_OS").expect("Unable to get TARGET_OS");
println!("cargo:rerun-if-changed=xla_rs/xla_rs.cc");
println!("cargo:rerun-if-changed=xla_rs/xla_rs.h");
match os.as_str() {
"linux" | "macos" => {
cc::Build::new()
.cpp(true)
.pic(true)
.warnings(false)
.include(xla_dir.as_ref().join("include"))
.flag("-std=c++17")
.flag("-Wno-deprecated-declarations")
.flag("-DLLVM_ON_UNIX=1")
.file("xla_rs/xla_rs.cc")
.compile("xla_rs");
}
"windows" => {
cc::Build::new()
.cpp(true)
.pic(true)
.warnings(false)
.include(xla_dir.as_ref().join("include"))
.file("xla_rs/xla_rs.cc")
.compile("xla_rs");
}
_ => panic!("Unsupported OS"),
};
}
fn env_var_rerun(name: &str) -> Option<String> {
println!("cargo:rerun-if-env-changed={name}");
env::var(name).ok()
}
fn main() {
let xla_dir = env_var_rerun("XLA_EXTENSION_DIR")
.map_or_else(|| env::current_dir().unwrap().join("xla_extension"), PathBuf::from);
println!("cargo:rerun-if-changed=xla_rs/xla_rs.h");
println!("cargo:rerun-if-changed=xla_rs/xla_rs.cc");
let bindings = bindgen::Builder::default()
.header("xla_rs/xla_rs.h")
.parse_callbacks(Box::new(bindgen::CargoCallbacks))
.generate()
.expect("Unable to generate bindings");
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
bindings.write_to_file(out_path.join("c_xla.rs")).expect("Couldn't write bindings!");
// Exit early on docs.rs as the C++ library would not be available.
if std::env::var("DOCS_RS").is_ok() {
return;
}
make_shared_lib(&xla_dir);
// The --copy-dt-needed-entries -lstdc++ are helpful to get around some
// "DSO missing from command line" error
// undefined reference to symbol '_ZStlsIcSt11char_traitsIcESaIcEERSt13basic_ostreamIT_T0_ES7_RKNSt7__cxx1112basic_stringIS4_S5_T1_EE@@GLIBCXX_3.4.21'
println!("cargo:rustc-link-arg=-Wl,--copy-dt-needed-entries");
println!("cargo:rustc-link-arg=-Wl,-lstdc++");
println!("cargo:rustc-link-search=native={}", xla_dir.join("lib").display());
println!("cargo:rustc-link-lib=static=xla_rs");
println!("cargo:rustc-link-arg=-Wl,-rpath={}", xla_dir.join("lib").display());
println!("cargo:rustc-link-lib=xla_extension");
}
| ivy/ivy/engines/XLA/rust_api/build.rs/0 | {
"file_path": "ivy/ivy/engines/XLA/rust_api/build.rs",
"repo_id": "ivy",
"token_count": 1358
} | 14 |
from . import XLA
from .XLA import *
| ivy/ivy/engines/__init__.py/0 | {
"file_path": "ivy/ivy/engines/__init__.py",
"repo_id": "ivy",
"token_count": 13
} | 15 |
from ivy.utils.exceptions import IvyNotImplementedException
def if_else(cond, body_fn, orelse_fn, vars):
raise IvyNotImplementedException()
def while_loop(test_fn, body_fn, vars):
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/control_flow_ops.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/control_flow_ops.py",
"repo_id": "ivy",
"token_count": 80
} | 16 |
from typing import Union, Optional, Tuple
import mxnet as mx
from ivy.utils.exceptions import IvyNotImplementedException
def l2_normalize(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[int] = None,
out: Optional[None] = None,
) -> None:
raise IvyNotImplementedException()
def batch_norm(
x: Union[(None, mx.ndarray.NDArray)],
mean: Union[(None, mx.ndarray.NDArray)],
variance: Union[(None, mx.ndarray.NDArray)],
/,
*,
scale: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
offset: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
training: bool = False,
eps: float = 1e-05,
momentum: float = 0.1,
out: Optional[None] = None,
) -> Tuple[
(
Union[(None, mx.ndarray.NDArray)],
Union[(None, mx.ndarray.NDArray)],
Union[(None, mx.ndarray.NDArray)],
)
]:
raise IvyNotImplementedException()
def instance_norm(
x: Union[(None, mx.ndarray.NDArray)],
mean: Union[(None, mx.ndarray.NDArray)],
variance: Union[(None, mx.ndarray.NDArray)],
/,
*,
scale: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
offset: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
training: bool = False,
eps: float = 1e-05,
momentum: float = 0.1,
out: Optional[None] = None,
) -> Tuple[
(
Union[(None, mx.ndarray.NDArray)],
Union[(None, mx.ndarray.NDArray)],
Union[(None, mx.ndarray.NDArray)],
)
]:
raise IvyNotImplementedException()
def lp_normalize(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
p: float = 2,
axis: Optional[int] = None,
out: Optional[None] = None,
) -> None:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/experimental/norms.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/experimental/norms.py",
"repo_id": "ivy",
"token_count": 779
} | 17 |
from typing import Tuple, Union, Optional
import mxnet as mx
from ivy.utils.exceptions import IvyNotImplementedException
def unique_all(
x: Union[(None, mx.ndarray.NDArray)], /, *, axis: Optional[int] = None
) -> Tuple[
(
Union[(None, mx.ndarray.NDArray)],
Union[(None, mx.ndarray.NDArray)],
Union[(None, mx.ndarray.NDArray)],
Union[(None, mx.ndarray.NDArray)],
)
]:
raise IvyNotImplementedException()
def unique_counts(
x: Union[(None, mx.ndarray.NDArray)], /
) -> Tuple[(Union[(None, mx.ndarray.NDArray)], Union[(None, mx.ndarray.NDArray)])]:
raise IvyNotImplementedException()
def unique_inverse(
x: Union[(None, mx.ndarray.NDArray)], /, *, axis: Optional[int] = None
) -> Tuple[(Union[(None, mx.ndarray.NDArray)], Union[(None, mx.ndarray.NDArray)])]:
raise IvyNotImplementedException()
def unique_values(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/set.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/set.py",
"repo_id": "ivy",
"token_count": 464
} | 18 |
"""Paddle activation functions.
Collection of Paddle activation functions, wrapped to fit Ivy syntax and
signature.
"""
from typing import Optional, Union, Literal
# global
import paddle
import paddle.nn.functional as F
# local
import ivy.functional.backends.paddle as paddle_backend
import ivy
from ivy.func_wrapper import (
with_unsupported_device_and_dtypes,
with_supported_dtypes,
with_supported_device_and_dtypes,
)
from . import backend_version
@with_supported_dtypes(
{
"2.6.0 and below": (
"float32",
"float64",
"complex64",
)
},
backend_version,
)
def relu(
x: paddle.Tensor, /, *, complex_mode="jax", out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if paddle.is_complex(x):
return paddle.complex(F.relu(x.real()), F.relu(x.imag()))
return F.relu(x)
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64", "complex")}},
backend_version,
)
def leaky_relu(
x: paddle.Tensor,
/,
*,
alpha: float = 0.2,
complex_mode="jax",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if paddle.is_complex(x):
return paddle.complex(
F.leaky_relu(x.real(), negative_slope=alpha),
F.leaky_relu(x.imag(), negative_slope=alpha),
)
return F.leaky_relu(x, negative_slope=alpha)
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64", "complex")}},
backend_version,
)
def gelu(
x: paddle.Tensor,
/,
*,
approximate: bool = False,
complex_mode="jax",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if paddle.is_complex(x):
sqrt_2_over_pi = 0.7978845608
# the other magic number comes directly from the formula in
# https://doi.org/10.48550/arXiv.1606.08415
return (
0.5
* x
* (1 + paddle_backend.tanh(sqrt_2_over_pi * (x + 0.044715 * x * x * x)))
)
return F.gelu(x, approximate=approximate)
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64", "complex")}},
backend_version,
)
def sigmoid(
x: paddle.Tensor, /, *, complex_mode="jax", out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if paddle.is_complex(x):
return 1.0 / (1.0 + paddle_backend.exp(-x))
return F.sigmoid(x)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("bfloat16", "float16")}}, backend_version
)
def softmax(
x: paddle.Tensor,
/,
*,
axis: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if axis is None:
axis = -1
if paddle.is_complex(x):
amax = paddle_backend.max(x, axis=axis, keepdims=True)
else:
amax = paddle.max(x, axis, keepdim=True)
exp_x = paddle_backend.exp(paddle.subtract(x, amax))
return paddle.divide(exp_x, paddle.sum(exp_x, axis=axis, keepdim=True))
def softplus(
x: paddle.Tensor,
/,
*,
beta: Optional[Union[int, float]] = None,
threshold: Optional[Union[int, float]] = None,
complex_mode="jax",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if beta is not None and beta != 1:
x_beta = x * beta
res = (
ivy.add(
ivy.log1p(ivy.exp(-ivy.abs(x_beta))),
ivy.maximum(x_beta, 0),
)
) / beta
else:
x_beta = x
res = ivy.add(
ivy.log1p(ivy.exp(-ivy.abs(x_beta))),
ivy.maximum(x_beta, 0),
)
if threshold is not None:
return ivy.where(x_beta > threshold, x, res).astype(x.dtype)
return res.astype(x.dtype)
# Softsign
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float16", "bfloat16")}}, backend_version
)
def softsign(
x: paddle.Tensor,
/,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return F.softsign(x)
softsign.support_native_out = True
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float16", "bfloat16")}}, backend_version
)
def log_softmax(
x: paddle.Tensor,
/,
*,
axis: Optional[int] = -1,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[paddle.Tensor] = None,
):
x_max = paddle_backend.max(x, axis=axis, keepdims=True)
sub_tmp = paddle_backend.subtract(x, x_max)
ret = paddle_backend.sum(paddle_backend.exp(sub_tmp), axis=axis, keepdims=True)
ret = paddle_backend.log(ret)
ret = paddle_backend.subtract(sub_tmp, ret)
return ret
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64", "complex")}},
backend_version,
)
def mish(
x: paddle.Tensor,
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if paddle.is_complex(x):
return x * paddle_backend.tanh(paddle_backend.log1p(paddle_backend.exp(x)))
return F.mish(x)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float16",)}}, backend_version
)
def hardswish(
x: paddle.Tensor,
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return F.hardswish(x)
| ivy/ivy/functional/backends/paddle/activations.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/activations.py",
"repo_id": "ivy",
"token_count": 2467
} | 19 |
# global
from typing import Optional, Union, Tuple, List, Literal, Sequence, Callable
import paddle
from ivy.functional.ivy.layers import (
_handle_padding,
_depth_max_pooling_helper,
_validate_max_pool_params,
)
from ivy.utils.exceptions import IvyNotImplementedException, IvyValueError
from ivy.func_wrapper import (
with_supported_device_and_dtypes,
with_unsupported_dtypes,
with_supported_dtypes,
)
from .. import backend_version
import ivy
# local
def _determine_depth_max_pooling(x, kernel, strides, dims, data_format="channel_first"):
# Determine depth pooling
kernel, strides, depth_pooling = _depth_max_pooling_helper(
x.shape, kernel, strides, dims=dims, data_format=data_format
)
if depth_pooling:
x = paddle.transpose(x, (0, 2, 1, *range(3, dims + 2)))
return x, kernel, strides, depth_pooling
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("float32", "float64"),
"gpu": ("bfloat16", "float16", "float32", "float64"),
}
},
backend_version,
)
def max_pool1d(
x: paddle.Tensor,
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
dilation: Union[int, Tuple[int]] = 1,
ceil_mode: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
dims = 1
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
if data_format == "NWC":
x = paddle.transpose(x, perm=(0, 2, 1))
kernel = [kernel[i] for i in [0, 2, 1]] if len(kernel) == (dims + 2) else kernel
strides = (
[strides[i] for i in [0, 2, 1]] if len(strides) == (dims + 2) else strides
)
padding = (
[padding[i] for i in [0, 2, 1]]
if isinstance(padding, list) and len(padding) == (dims + 2)
else padding
)
# Determine depthwise pooling
x, kernel, strides, depth_pooling = _determine_depth_max_pooling(
x, kernel, strides, dims, data_format="channel_first"
)
# TODO: Add support for pooling with dilation in the paddle backend.
# It's currently not natively supported in the fromework.
if max(dilation) > 1:
raise NotImplementedError(
"Max pooling with dilation is currently not supported in the 'paddle'"
" backend"
)
padding = (
[item for sublist in padding for item in sublist]
if not isinstance(padding, str)
else padding
) # to work directly with paddle's max_pool1d function
res = paddle.nn.functional.max_pool1d(
x, kernel, strides, padding=padding, ceil_mode=ceil_mode
)
if depth_pooling:
res = paddle.transpose(res, perm=(0, 2, 1))
if data_format == "NWC":
res = paddle.transpose(res, perm=(0, 2, 1))
return res
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("float32", "float64"),
"gpu": ("bfloat16", "float16", "float32", "float64"),
}
},
backend_version,
)
def max_pool2d(
x: paddle.Tensor,
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
dilation: Union[int, Tuple[int, ...]] = 1,
ceil_mode: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
dims = 2
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
if data_format == "NHWC":
x = paddle.transpose(x, perm=[0, 3, 1, 2])
kernel = (
[kernel[i] for i in [0, 3, 1, 2]] if len(kernel) == (dims + 2) else kernel
)
strides = (
[strides[i] for i in [0, 3, 1, 2]]
if len(strides) == (dims + 2)
else strides
)
padding = (
[padding[i] for i in [0, 3, 1, 2]]
if isinstance(padding, list) and len(padding) == (dims + 2)
else padding
)
# Determine depthwise pooling
x, kernel, strides, depth_pooling = _determine_depth_max_pooling(
x, kernel, strides, dims, data_format="channel_first"
)
# TODO: Add support for pooling with dilation in the paddle backend.
# It's currently not natively supported in the fromework.
if max(dilation) > 1:
raise NotImplementedError(
"Max pooling with dilation is currently not supported in the 'paddle'"
" backend"
)
padding = (
[item for sublist in padding for item in sublist]
if not isinstance(padding, str)
else padding
) # paddle's expected format
res = paddle.nn.functional.max_pool2d(
x, kernel, strides, padding=padding, ceil_mode=ceil_mode
)
if depth_pooling:
res = paddle.transpose(res, perm=[0, 2, 1, 3])
if data_format == "NHWC":
res = paddle.transpose(res, perm=[0, 2, 3, 1])
return res
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("float32", "float64"),
"gpu": ("bfloat16", "float16", "float32", "float64"),
}
},
backend_version,
)
def max_pool3d(
x: paddle.Tensor,
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
dilation: Union[int, Tuple[int, ...]] = 1,
ceil_mode: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
dims = 3
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
if data_format == "NDHWC":
x = paddle.transpose(x, perm=(0, 4, 1, 2, 3))
kernel = (
[kernel[i] for i in [0, 4, 1, 2, 3]]
if len(kernel) == (dims + 2)
else kernel
)
strides = (
[strides[i] for i in [0, 4, 1, 2, 3]]
if len(strides) == (dims + 2)
else strides
)
padding = (
[padding[i] for i in [0, 4, 1, 2, 3]]
if isinstance(padding, list) and len(padding) == (dims + 2)
else padding
)
# Determine depthwise pooling
x, kernel, strides, depth_pooling = _determine_depth_max_pooling(
x, kernel, strides, dims, data_format="channel_first"
)
# TODO: Add support for pooling with dilation in the paddle backend.
# It's currently not natively supported in the fromework.
if max(dilation) > 1:
raise NotImplementedError(
"Max pooling with dilation is currently not supported in the 'paddle'"
" backend"
)
padding = (
[item for sublist in padding for item in sublist]
if not isinstance(padding, str)
else padding
) # paddle's expected format
res = paddle.nn.functional.max_pool3d(
x, kernel, strides, padding=padding, ceil_mode=ceil_mode
)
if depth_pooling:
res = paddle.transpose(res, perm=[0, 2, 1, 3, 4])
if data_format == "NDHWC":
res = paddle.transpose(res, perm=[0, 2, 3, 4, 1])
return res
def avg_pool1d(
x: paddle.Tensor,
kernel: Union[int, Tuple[int]],
strides: Union[int, Tuple[int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
raise IvyNotImplementedException()
def avg_pool2d(
x: paddle.Tensor,
kernel: Union[int, Tuple[int], Tuple[int, int]],
strides: Union[int, Tuple[int], Tuple[int, int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
raise IvyNotImplementedException()
def avg_pool3d(
x: paddle.Tensor,
kernel: Union[int, Tuple[int], Tuple[int, int, int]],
strides: Union[int, Tuple[int], Tuple[int, int, int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
raise IvyNotImplementedException()
def dct(
x: paddle.Tensor,
/,
*,
type: Optional[Literal[1, 2, 3, 4]] = 2,
n: Optional[int] = None,
axis: Optional[int] = -1,
norm: Optional[Literal["ortho"]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
raise IvyNotImplementedException()
@with_unsupported_dtypes(
{"2.6.0 and below": ("bfloat16", "bool", "float16")}, backend_version
)
def fft(
x: paddle.Tensor,
dim: int,
/,
*,
norm: Optional[str] = "backward",
n: Optional[Union[int, Tuple[int]]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if not isinstance(dim, int):
raise IvyValueError(f"Expecting <class 'int'> instead of {type(dim)}")
if n is None:
n = x.shape[dim]
if dim < -x.ndim or dim >= x.ndim:
raise IvyValueError(
f"Invalid dim {dim}, expecting a value ranging from {-x.ndim} to {x.ndim-1}"
)
if not isinstance(n, int):
raise TypeError(f"Expecting int type for 'n', instead of {type(n)}")
if n <= 1:
raise IvyValueError(f"Invalid number of data points {n}, expecting more than 1")
valid_norm_modes = ["backward", "ortho", "forward"]
if norm not in valid_norm_modes:
raise IvyValueError(
f"Unrecognized normalization mode {norm}, expecting one of"
f" {valid_norm_modes}"
)
ret = paddle.fft.fft(x, n, dim, norm=norm)
# to make it compatible with other backends
if x.dtype == paddle.int64:
ret = ret.astype("complex128")
return ret
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("bfloat16", "float32", "float64"),
"gpu": ("bfloat16", "float16", "float32", "float64"),
}
},
backend_version,
)
def dropout1d(
x: paddle.Tensor,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NWC",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
axis = data_format.index("C") - 3 + x.ndim
return paddle.nn.functional.dropout(x, p=prob, axis=axis, training=training)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("bfloat16", "float32", "float64"),
"gpu": ("bfloat16", "float16", "float32", "float64"),
}
},
backend_version,
)
def dropout2d(
x: paddle.Tensor,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NHWC",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
axis = data_format.index("C") - 4 + x.ndim
return paddle.nn.functional.dropout(x, p=prob, axis=axis, training=training)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("bfloat16", "float32", "float64"),
"gpu": ("bfloat16", "float16", "float32", "float64"),
}
},
backend_version,
)
def dropout3d(
x: paddle.Tensor,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NDHWC",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
axis = data_format.index("C") - 5 + x.ndim
return paddle.nn.functional.dropout(x, p=prob, axis=axis, training=training)
def ifft(
x: paddle.Tensor,
dim: int,
*,
norm: Optional[str] = "backward",
n: Optional[Union[int, Tuple[int]]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
raise IvyNotImplementedException()
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("int8", "float32", "float64"),
"gpu": ("int8", "bfloat16", "float16", "float32", "float64"),
},
"2.4.2 and below": {
"cpu": ("int8", "float32", "float64"),
"gpu": ("int8", "float16", "float32", "float64"),
},
},
backend_version,
)
def embedding(
weights: paddle.Tensor,
indices: paddle.Tensor,
/,
*,
max_norm: Optional[int] = None,
out=None,
) -> paddle.Tensor:
ivy.utils.assertions.check_equal(
weights.ndim, 2, message="weights must be 2-d", as_array=False
)
embeddings = paddle.nn.functional.embedding(x=indices, weight=weights)
if max_norm is not None:
norms = paddle.linalg.norm(embeddings, axis=-1, keepdim=True)
embeddings = paddle.where(
norms > max_norm, embeddings * max_norm / norms, embeddings
)
embeddings = paddle.where(
norms < -max_norm, embeddings * -max_norm / norms, embeddings
)
return embeddings
def interpolate(
x: paddle.Tensor,
size: Union[Sequence[int], int],
/,
*,
mode: Optional[Literal["linear", "bilinear", "trilinear"]] = "linear",
scale_factor: Optional[Union[Sequence[int], int]] = None,
recompute_scale_factor: Optional[bool] = None,
align_corners: bool = False,
antialias: Optional[bool] = False,
out: Optional[paddle.Tensor] = None,
):
raise IvyNotImplementedException()
def adaptive_max_pool2d(
input: paddle.Tensor, output_size: Union[Sequence[int], int]
) -> paddle.Tensor:
squeeze = input.ndim == 3
x = paddle.unsqueeze(input, axis=0) if squeeze else input
ret = paddle.nn.functional.adaptive_max_pool2d(x, output_size)
return paddle.squeeze(ret, axis=0) if squeeze else ret
def ifftn(
x: paddle.Tensor,
s: Optional[Union[int, Tuple[int]]] = None,
axes: Optional[Union[int, Tuple[int]]] = None,
*,
norm: Optional[str] = "backward",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.fft.ifftn(x, s, axes, norm)
def rfft(
x: paddle.Tensor,
/,
*,
n: Optional[int] = None,
axis: int = -1,
norm: Literal["backward", "ortho", "forward"] = "backward",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if x.dtype in [paddle.complex64, paddle.complex128]:
x = x.real()
if x.dtype == paddle.float16:
x = x.astype(paddle.float32)
ret = paddle.fft.rfft(x, n=n, axis=axis, norm=norm)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
@with_unsupported_dtypes(
{"2.6.0 and below": ("bfloat16", "float16", "complex64", "complex128", "bool")},
backend_version,
)
def rfftn(
x: paddle.Tensor,
s: Optional[Union[int, Tuple[int]]] = None,
axes: Optional[Union[int, Tuple[int]]] = None,
*,
norm: Optional[str] = "backward",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
result = paddle.fft.rfftn(x, s, axes, norm)
return result.astype("complex128")
@with_supported_dtypes(
{
"2.6.0 and below": (
"complex64",
"complex128",
)
},
backend_version,
)
def fft2(
x: paddle.Tensor,
*,
dim: Optional[Union[int, Tuple[int]]] = None,
norm: Optional[str] = "backward",
s: Optional[Union[int, Tuple[int]]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
res = paddle.fft.fft2(x, s, dim, norm)
return res.astype("complex128")
# stft
@with_supported_dtypes(
{
"2.6.0 and below": (
"complex64",
"complex128",
)
},
backend_version,
)
def stft(
signals: paddle.Tensor,
frame_length: int,
frame_step: int,
/,
*,
fft_length: Optional[int] = None,
window_fn: Optional[Callable] = None,
pad_end: Optional[bool] = False,
name: Optional[str] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if not isinstance(frame_length, int):
raise IvyValueError(f"Expecting <class 'int'> instead of {type(frame_length)}")
if frame_length < 1:
raise IvyValueError(
f"Invalid data points {frame_length}, expecting frame_length larger than or"
" equal to 1"
)
if not isinstance(frame_step, int):
raise IvyValueError(f"Expecting <class 'int'> instead of {type(frame_step)}")
if frame_step < 1:
raise IvyValueError(
f"Invalid data points {frame_length}, expecting frame_length larger than or"
" equal to 1"
)
if fft_length is not None:
if not isinstance(fft_length, int):
raise IvyValueError(
f"Expecting <class 'int'> instead of {type(fft_length)}"
)
if fft_length < 1:
raise IvyValueError(
f"Invalid data points {frame_length}, expecting frame_length larger"
" than or equal to 1"
)
input_dtype = signals.dtype
if input_dtype == paddle.float32:
dtype = "complex64"
elif input_dtype == paddle.float64:
dtype = "complex128"
def stft_1D(signals, frame_length, frame_step, fft_length, pad_end):
if fft_length is None:
fft_length = 1
while fft_length < frame_length:
fft_length *= 2
num_samples = signals.shape[-1]
if pad_end:
num_samples = signals.shape[-1]
num_frames = -(-num_samples // frame_step)
pad_length = max(
0, frame_length + frame_step * (num_frames - 1) - num_samples
)
signals = paddle.nn.functional.pad(signals, (0, pad_length))
else:
num_frames = 1 + (num_samples - frame_length) // frame_step
stft_result = []
if window_fn is None:
window = 1
else:
window = window_fn(frame_length)
for i in range(num_frames):
start = i * frame_step
end = start + frame_length
frame = signals[..., start:end]
windowed_frame = frame * window
pad_length = fft_length - frame_length
windowed_frame = paddle.nn.functional.pad(windowed_frame, (0, pad_length))
windowed_frame = paddle.to_tensor(windowed_frame)
fft_frame = fft(windowed_frame, -1)
slit = int(fft_length // 2 + 1)
stft_result.append(fft_frame[..., 0:slit])
stft = paddle.to_tensor(stft_result)
return stft
def stft_helper(nested_list, frame_length, frame_step, fft_length):
nested_list = nested_list
if len(nested_list.shape) > 1:
return [
stft_helper(sublist, frame_length, frame_step, fft_length)
for sublist in nested_list
]
else:
return stft_1D(nested_list, frame_length, frame_step, fft_length, pad_end)
to_return = stft_helper(signals, frame_length, frame_step, fft_length)
result = paddle.to_tensor(to_return)
return result.astype(dtype)
def sliding_window(
input: paddle.Tensor,
kernel_size: Union[int, Tuple[int, int]],
/,
*,
stride: Union[int, Tuple[int, int]] = 1,
dilation: Union[int, Tuple[int, int]] = 1,
padding: Union[str, int, Tuple[int, int]] = 0,
) -> paddle.Tensor:
if input.ndim != 4:
# convert input to 4D tensor as unfold only accepts 4D data
input_shape = input.shape
extend_dims = max(0, 4 - len(input_shape))
new_shape = (1,) * extend_dims + tuple(input_shape)
input = input.reshape(new_shape).astype("float32")
stride = [stride] * 2 if isinstance(stride, int) else list(stride)
dilation = [dilation] * 2 if isinstance(dilation, int) else list(dilation)
kernel_size = (
[kernel_size] * 2 if isinstance(kernel_size, int) else list(kernel_size)
)
if len(kernel_size) < 2:
kernel_size = list((kernel_size) * 2)
# check padding and convert to right format
if isinstance(padding, str):
# convert padding from str to seq
if padding.upper() == "SAME":
pad_vals = []
for dim in input.shape:
pad_val = _handle_padding(
dim,
stride[0] if isinstance(stride, tuple) else stride,
kernel_size[0],
padding,
)
pad_vals.append(pad_val)
padding = pad_vals[:2]
else:
padding = 0
else:
padding = (padding,) * 2 if isinstance(padding, int) else padding
return paddle.nn.functional.unfold(
input, kernel_size, strides=stride, paddings=padding, dilations=dilation
)
| ivy/ivy/functional/backends/paddle/experimental/layers.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/experimental/layers.py",
"repo_id": "ivy",
"token_count": 9647
} | 20 |
# global
from numbers import Number
from typing import Union, Optional, Tuple, List, Sequence, Iterable
import math
import paddle
# local
import ivy
import ivy.functional.backends.paddle as paddle_backend
from ivy.func_wrapper import (
with_unsupported_device_and_dtypes,
with_unsupported_dtypes,
with_supported_dtypes,
)
# noinspection PyProtectedMember
from . import backend_version
from ...ivy.manipulation import _calculate_out_shape
# Array API Standard #
# -------------------#
@with_unsupported_dtypes(
{"2.6.0 and below": ("bfloat16", "float16")},
backend_version,
)
def concat(
xs: Union[Tuple[paddle.Tensor, ...], List[paddle.Tensor]],
/,
*,
axis: Optional[int] = 0,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
dtypes_list = list(set(map(lambda x: x.dtype, xs)))
dtype = dtypes_list.pop()
if len(dtypes_list) > 0:
for d in dtypes_list:
dtype = ivy.promote_types(dtype, d)
xs = list(map(lambda x: x.cast("int32" if dtype == paddle.int16 else dtype), xs))
if all(0 in x.shape for x in xs):
shapes = [x.shape for x in xs]
if any(len(s) != len(shapes[0]) for s in shapes):
raise ivy.exceptions.IvyValueError(
"all the input arrays must have the same number of dimensions"
)
axis = axis + len(xs[0].shape) if axis < 0 else axis
sizes = [[v for i, v in enumerate(s) if i != axis] for s in shapes]
if any(s != sizes[0] for s in sizes):
raise ivy.exceptions.IvyValueError(
"the input arrays must have the same size along the specified axis"
)
ret = paddle.empty(
[*shapes[0][:axis], sum(s[axis] for s in shapes), *shapes[0][axis + 1 :]],
dtype=dtype,
)
else:
ret = paddle.concat(xs, axis=axis)
if dtype == paddle.int16:
ret = ret.cast("int16")
return ret
@with_supported_dtypes(
{
"2.6.0 and below": (
"int32",
"int64",
"float64",
"complex128",
"float32",
"complex64",
"bool",
)
},
backend_version,
)
def expand_dims(
x: paddle.Tensor,
/,
*,
copy: Optional[bool] = None,
axis: Union[int, Sequence[int]] = 0,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
out_shape = _calculate_out_shape(axis, x.shape)
if 0 in x.shape:
return paddle.empty(out_shape, dtype=x.dtype)
# reshape since unsqueeze sets a maximum limit of dimensions
return x.reshape(out_shape)
@with_unsupported_dtypes(
{"2.6.0 and below": ("bfloat16", "float16", "int16", "int8", "uint8")},
backend_version,
)
def flip(
x: paddle.Tensor,
/,
*,
copy: Optional[bool] = None,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if axis is None:
axis = list(range(x.ndim))
return paddle.flip(x, axis)
@with_supported_dtypes(
{
"2.6.0 and below": (
"int32",
"int64",
"float64",
"complex128",
"float32",
"complex64",
"bool",
)
},
backend_version,
)
def permute_dims(
x: paddle.Tensor,
/,
axes: Tuple[int, ...],
*,
copy: Optional[bool] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.transpose(x, axes)
def _reshape_fortran_paddle(x, shape):
if len(x.shape) > 0:
x = paddle_backend.permute_dims(x, list(reversed(range(x.ndim))))
return paddle_backend.permute_dims(
paddle.reshape(x, shape[::-1]), list(range(len(shape)))[::-1]
)
def reshape(
x: paddle.Tensor,
/,
shape: Union[ivy.NativeShape, Sequence[int]],
*,
copy: Optional[bool] = None,
order: Optional[str] = "C",
allowzero: Optional[bool] = True,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if 0 in x.shape:
if -1 in shape:
shape = [
(
s
if s != -1
else math.prod(x.shape) // math.prod([s for s in shape if s != -1])
)
for s in shape
]
return paddle.empty(shape, dtype=x.dtype)
if len(shape) == 0:
out_scalar = True
shape = [1]
else:
out_scalar = False
if not allowzero:
shape = [
new_s if con else old_s
for new_s, con, old_s in zip(shape, paddle.to_tensor(shape) != 0, x.shape)
]
if len(x.shape) == 0:
x = paddle.reshape(x, shape=[1])
if order == "F":
ret = _reshape_fortran_paddle(x, shape)
if out_scalar:
return paddle_backend.squeeze(ret, axis=0)
return ret
ret = paddle.reshape(x, shape)
if out_scalar:
return paddle_backend.squeeze(ret, axis=0)
return ret
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64", "int32", "int64")},
backend_version,
)
def roll(
x: paddle.Tensor,
/,
shift: Union[int, Sequence[int]],
*,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.roll(x, shift, axis)
@with_unsupported_dtypes(
{"2.6.0 and below": ("bfloat16", "float16", "int16")}, backend_version
)
def squeeze(
x: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
copy: Optional[bool] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if isinstance(axis, list):
axis = tuple(axis)
if len(x.shape) == 0:
if axis is None or axis == 0 or axis == -1:
return x
raise ivy.utils.exceptions.IvyException(
f"tried to squeeze a zero-dimensional input by axis {axis}"
)
if x.ndim > 6:
# Paddle squeeze sets a maximum limit of 6 dims in the input
x_shape = x.shape
x_shape.pop(axis)
return paddle_backend.reshape(x, x_shape)
return paddle.squeeze(x, axis=axis)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("int16", "uint8", "int8", "float16")}},
backend_version,
)
def stack(
arrays: Union[Tuple[paddle.Tensor], List[paddle.Tensor]],
/,
*,
axis: int = 0,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
dtype_list = set(map(lambda x: x.dtype, arrays))
dtype = dtype_list.pop()
if len(dtype_list) > 0:
for d in dtype_list:
dtype = ivy.promote_types(dtype, d)
arrays = list(map(lambda x: x.cast(dtype), arrays))
first_shape = arrays[0].shape
if any(arr.shape != first_shape for arr in arrays):
raise ValueError("Shapes of all inputs must match")
if 0 in first_shape:
return ivy.empty(
first_shape[:axis] + [len(arrays)] + first_shape[axis:], dtype=dtype
)
if dtype in [paddle.int8, paddle.int16, paddle.uint8, paddle.float16, paddle.bool]:
arrays = list(map(lambda x: x.cast("float32"), arrays))
return paddle.stack(arrays, axis=axis).cast(dtype)
elif dtype in [
paddle.complex64,
paddle.complex128,
]:
arrays = list(map(lambda x: x.cast(dtype), arrays))
real_list = list(map(lambda x: x.real(), arrays))
imag_list = list(map(lambda x: x.imag(), arrays))
re_stacked = paddle.stack(real_list, axis=axis)
imag_stacked = paddle.stack(imag_list, axis=axis)
return paddle.complex(re_stacked, imag_stacked)
else:
return paddle.stack(arrays, axis=axis)
# Extra #
# ------#
@with_unsupported_dtypes({"2.6.0 and below": ("int16",)}, backend_version)
def split(
x: paddle.Tensor,
/,
*,
copy: Optional[bool] = None,
num_or_size_splits: Optional[Union[int, List[int], paddle.Tensor]] = None,
axis: Optional[int] = 0,
with_remainder: Optional[bool] = False,
) -> List[paddle.Tensor]:
if x.shape == ():
if num_or_size_splits is not None and num_or_size_splits != 1:
raise ivy.utils.exceptions.IvyException(
"input array had no shape, but num_sections specified was"
f" {num_or_size_splits}"
)
return [x]
if num_or_size_splits is None:
num_or_size_splits = x.shape[axis]
elif isinstance(num_or_size_splits, paddle.Tensor):
num_or_size_splits = num_or_size_splits.cast("int32")
num_or_size_splits = num_or_size_splits.tolist()
elif isinstance(num_or_size_splits, int):
num_chunks = x.shape[axis] // num_or_size_splits
remainder = x.shape[axis] % num_or_size_splits
if remainder != 0:
if with_remainder:
num_or_size_splits = [num_or_size_splits] * num_chunks + [remainder]
else:
raise ivy.utils.exceptions.IvyException(
"Split size is not compatible with input shape"
)
if isinstance(num_or_size_splits, (list, tuple)):
if sum(num_or_size_splits) < x.shape[axis]:
num_or_size_splits + type(num_or_size_splits)([-1])
elif sum(num_or_size_splits) > x.shape[axis]:
raise ivy.utils.exceptions.IvyException(
"total split size is not compatible with input shape,"
f" got {sum(num_or_size_splits)} which is more than x.shape[axis]"
)
if paddle.is_complex(x):
imag_list = paddle.split(x.imag(), num_or_size_splits, axis)
real_list = paddle.split(x.real(), num_or_size_splits, axis)
return [paddle.complex(a, b) for a, b in zip(real_list, imag_list)]
return paddle.split(x, num_or_size_splits, axis)
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64", "int32", "int64")},
backend_version,
)
def repeat(
x: paddle.Tensor,
/,
repeats: Union[int, Iterable[int]],
*,
axis: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
# handle the case when repeats contains 0 as paddle doesn't support it
if (isinstance(repeats, Number) and repeats == 0) or (
isinstance(repeats, paddle.Tensor) and repeats.size == 1 and repeats.item() == 0
):
if axis is None:
return paddle.to_tensor([], dtype=x.dtype)
else:
shape = x.shape
shape[axis] = 0
return paddle.zeros(shape=shape).cast(x.dtype)
if isinstance(repeats, paddle.Tensor) and repeats.size == 1:
repeats = repeats.item()
if axis is not None:
axis %= x.ndim
if paddle.is_complex(x):
return paddle.complex(
paddle.repeat_interleave(x.real(), repeats=repeats, axis=axis),
paddle.repeat_interleave(x.imag(), repeats=repeats, axis=axis),
)
return paddle.repeat_interleave(x, repeats=repeats, axis=axis)
@with_unsupported_dtypes(
{"2.6.0 and below": ("bfloat16", "float16", "int16", "int8", "uint8")},
backend_version,
)
def tile(
x: paddle.Tensor, /, repeats: Sequence[int], *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if x.ndim >= 7:
repeats = (
repeats.numpy().tolist() if isinstance(repeats, paddle.Tensor) else repeats
)
new_shape = [*x.shape[:5], -1]
reshaped_tensor = paddle.reshape(x, new_shape)
new_repeats = repeats[:5] + [math.prod(repeats[5:])]
tiled_reshaped_tensor = tile(reshaped_tensor, new_repeats)
tiled_shape = tuple(s * r for s, r in zip(x.shape, repeats))
result = paddle.reshape(tiled_reshaped_tensor, tiled_shape)
return result
if ivy.min(repeats) == 0:
# This logic is to mimic other backends behaviour when a 0 in repeat
# is received since paddle doesn't natively support it
if len(repeats) < x.ndim:
shape = x.shape
shape[-len(repeats) :] = paddle_backend.multiply(
shape[-len(repeats) :], repeats
).tolist()
elif len(repeats) > x.ndim:
shape = (
repeats.tolist()
if isinstance(repeats, paddle.Tensor)
else list(repeats)
)
shape[-x.ndim - 1 :] = paddle_backend.multiply(
shape[-x.ndim - 1 :], repeats
).tolist()
else:
shape = paddle_backend.multiply(x.shape, repeats).tolist()
return paddle.zeros(shape).cast(x.dtype)
return paddle.tile(x, repeats)
@with_unsupported_dtypes(
{
"2.6.0 and below": (
"bfloat16",
"float16",
"int8",
"int16",
"uint8",
)
},
backend_version,
)
def constant_pad(
x: paddle.Tensor,
/,
pad_width: List[List[int]],
*,
value: Number = 0.0,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
paddings = []
pad_width = list(pad_width)
for item in pad_width:
if len(item) != 2:
raise ivy.utils.exceptions.IvyException("Length of each item should be 2")
else:
paddings.append(item[0])
paddings.append(item[1])
return paddle.nn.functional.pad(x=x, pad=paddings, value=value)
def zero_pad(
x: paddle.Tensor,
/,
pad_width: List[List[int]],
*,
out: Optional[paddle.Tensor] = None,
):
return paddle_backend.constant_pad(x, pad_width=pad_width, value=0)
@with_supported_dtypes(
{
"2.6.0 and below": (
"bool",
"int32",
"int64",
"float16",
"bfloat16",
"float32",
"float64",
"complex64",
"complex128",
)
},
backend_version,
)
def swapaxes(
x: paddle.Tensor,
axis0: int,
axis1: int,
/,
*,
copy: Optional[bool] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
axes = [x for x in range(x.ndim)]
axes[axis0], axes[axis1] = axes[axis1], axes[axis0]
return paddle_backend.permute_dims(x, axes)
def clip(
x: paddle.Tensor,
/,
x_min: Optional[Union[Number, paddle.Tensor]] = None,
x_max: Optional[Union[Number, paddle.Tensor]] = None,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if x_min is None and x_max is None:
raise ValueError("At least one of the x_min or x_max must be provided")
promoted_type = x.dtype
if x_min is not None:
if not hasattr(x_min, "dtype"):
x_min = ivy.array(x_min).data
promoted_type = ivy.as_native_dtype(ivy.promote_types(x.dtype, x_min.dtype))
x = paddle_backend.maximum(
paddle.cast(x, promoted_type), paddle.cast(x_min, promoted_type)
)
if x_max is not None:
if not hasattr(x_max, "dtype"):
x_max = ivy.array(x_max).data
promoted_type = ivy.as_native_dtype(
ivy.promote_types(promoted_type, x_max.dtype)
)
x = paddle_backend.minimum(
paddle.cast(x, promoted_type), paddle.cast(x_max, promoted_type)
)
return x
@with_unsupported_dtypes(
{"2.6.0 and below": ("int16", "int8", "uint8", "bfloat16")}, backend_version
)
def unstack(
x: paddle.Tensor,
/,
*,
copy: Optional[bool] = None,
axis: int = 0,
keepdims: bool = False,
) -> List[paddle.Tensor]:
if x.ndim == 0:
return [x]
if axis is not None:
axis %= x.ndim
else:
axis = 0
if paddle.is_complex(x):
real_list = paddle.unbind(x.real(), axis)
imag_list = paddle.unbind(x.imag(), axis)
ret = [paddle.complex(a, b) for a, b in zip(real_list, imag_list)]
else:
ret = paddle.unbind(x, axis)
if keepdims:
return [paddle_backend.expand_dims(r, axis=axis) for r in ret]
return ret
| ivy/ivy/functional/backends/paddle/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/manipulation.py",
"repo_id": "ivy",
"token_count": 7636
} | 21 |
# global
import tensorflow as tf
from typing import Union, Optional
# invert_permutation
def invert_permutation(
x: Union[tf.Tensor, tf.Variable, list, tuple],
/,
) -> Union[tf.Tensor, tf.Variable]:
return tf.cast(tf.math.invert_permutation(x), tf.int64)
# lexsort
def lexsort(
keys: Union[tf.Tensor, tf.Variable],
/,
*,
axis: int = -1,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
shape = keys.shape.as_list()
if len(shape) == 1:
return tf.cast(tf.argsort(keys, axis=axis, stable=True), tf.int64)
if shape[0] == 0:
raise TypeError("need sequence of keys with len > 0 in lexsort")
if len(shape) == 2 and shape[1] == 1:
return tf.cast(tf.convert_to_tensor([0]), tf.int64)
result = tf.argsort(keys[0], axis=axis, stable=True)
if shape[0] == 1:
return tf.cast(result, tf.int64)
for i in range(1, shape[0]):
key = keys[i]
ind = tf.gather(key, result)
temp = tf.argsort(ind, axis=axis, stable=True)
result = tf.gather(result, temp)
return tf.cast(result, tf.int64)
| ivy/ivy/functional/backends/tensorflow/experimental/sorting.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/experimental/sorting.py",
"repo_id": "ivy",
"token_count": 494
} | 22 |
from .experimental import random, statistical
from . import elementwise
from .elementwise import *
from .experimental.random import *
from .experimental.statistical import *
name = "tf_probability"
incompatible_sub_backends = ()
| ivy/ivy/functional/backends/tensorflow/sub_backends/tf_probability/__init__.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/sub_backends/tf_probability/__init__.py",
"repo_id": "ivy",
"token_count": 63
} | 23 |
import xformers
from . import layers
from .layers import *
name = "xformers"
incompatible_sub_backends = ()
| ivy/ivy/functional/backends/torch/sub_backends/xformers/__init__.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/sub_backends/xformers/__init__.py",
"repo_id": "ivy",
"token_count": 38
} | 24 |
# global
import ivy
from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def cond(pred, true_fun, false_fun, *operands, operand=None, linear=None):
if operand is not None:
if operands:
raise ivy.utils.exceptions.IvyException(
"if `operand` is passed, positional `operands` should not be passed"
)
operands = (operand,)
if pred:
return true_fun(*operands)
return false_fun(*operands)
@to_ivy_arrays_and_back
def fori_loop(lower, upper, body_fun, init_val):
if not callable(body_fun):
raise ivy.exceptions.IvyException(
"jax.lax.fori_loop: Argument body_fun should be callable."
)
val = init_val
for i in range(lower, upper):
val = body_fun(i, val)
return val
@to_ivy_arrays_and_back
def map(f, xs):
return ivy.stack([f(x) for x in xs])
@to_ivy_arrays_and_back
def scan(f, init, xs, length=None, reverse=False, unroll=1):
if not (callable(f)):
raise ivy.exceptions.IvyException(
"jax.lax.scan: Argument f should be callable."
)
if xs is None and length is None:
raise ivy.exceptions.IvyException(
"jax.lax.scan: Either xs or length must be provided."
)
if length is not None and (not isinstance(length, int) or length < 0):
raise ivy.exceptions.IvyException(
"jax.lax.scan: length must be a non-negative integer."
)
if xs is None:
xs = [None] * length
carry = init
ys = []
for x in xs:
carry, y = f(carry, x)
ys.append(y)
return carry, ivy.stack(ys)
@to_ivy_arrays_and_back
def switch(index, branches, *operands, operand=None):
if operand is not None:
if operands:
raise ivy.utils.exceptions.IvyException(
"if `operand` is passed, positional `operands` should not be passed"
)
operands = (operand,)
index = max(index, 0)
index = min(len(branches) - 1, index)
return branches[index](*operands)
@to_ivy_arrays_and_back
def while_loop(cond_fun, body_fun, init_val):
if not (callable(body_fun) and callable(cond_fun)):
raise ivy.exceptions.IvyException(
"jax.lax.while_loop: Arguments body_fun and cond_fun should be callable."
)
val = init_val
while cond_fun(val):
val = body_fun(val)
return val
| ivy/ivy/functional/frontends/jax/lax/control_flow_operators.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/lax/control_flow_operators.py",
"repo_id": "ivy",
"token_count": 1124
} | 25 |
from ivy.functional.frontends.jax.numpy import asarray
from ivy.functional.frontends.numpy import (
dtype,
generic,
number,
inexact,
complexfloating,
floating,
integer,
signedinteger,
unsignedinteger,
)
class _ScalarMeta(type):
def __hash__(self):
return hash(self.dtype.type)
def __eq__(self, other):
return id(self) == id(other) or self.dtype.type == other
def __ne__(self, other):
return not (self == other)
def __call__(self, x):
return asarray(x, dtype=self.dtype)
def __instancecheck__(self, instance):
return isinstance(instance, self.dtype.type)
# --- Helpers --- #
# --------------- #
def _make_scalar_type(scalar_type):
meta = _ScalarMeta(scalar_type, (object,), {"dtype": dtype(scalar_type)})
return meta
bfloat16 = _make_scalar_type("bfloat16")
bool_ = _make_scalar_type("bool_")
complex128 = _make_scalar_type("complex128")
complex64 = _make_scalar_type("complex64")
float16 = _make_scalar_type("float16")
float32 = _make_scalar_type("float32")
float64 = _make_scalar_type("float64")
int16 = _make_scalar_type("int16")
int32 = _make_scalar_type("int32")
int64 = _make_scalar_type("int64")
int8 = _make_scalar_type("int8")
uint16 = _make_scalar_type("uint16")
uint32 = _make_scalar_type("uint32")
uint64 = _make_scalar_type("uint64")
uint8 = _make_scalar_type("uint8")
compex_ = complex128
complexfloating = complexfloating
float_ = float64
floating = floating
generic = generic
inexact = inexact
int_ = int64
integer = integer
number = number
signedinteger = signedinteger
uint = uint64
unsignedinteger = unsignedinteger
| ivy/ivy/functional/frontends/jax/numpy/scalars.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/numpy/scalars.py",
"repo_id": "ivy",
"token_count": 656
} | 26 |
import ivy
from ivy.functional.frontends.mxnet.func_wrapper import (
to_ivy_arrays_and_back,
)
from ivy.functional.frontends.numpy.func_wrapper import handle_numpy_dtype
@handle_numpy_dtype
@to_ivy_arrays_and_back
def array(object, dtype=None, ctx=None):
if not ivy.is_array(object) and not dtype:
return ivy.array(object, dtype="float32", device=ctx)
return ivy.array(object, dtype=dtype, device=ctx)
| ivy/ivy/functional/frontends/mxnet/numpy/creation.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mxnet/numpy/creation.py",
"repo_id": "ivy",
"token_count": 171
} | 27 |
# global
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
outputs_to_frontend_arrays,
to_ivy_arrays_and_back,
handle_numpy_dtype,
)
class nd_grid:
def __init__(self, sparse=False):
self.sparse = sparse
self.grids = []
self.shapes = []
def __getitem__(self, key):
if isinstance(key, slice):
start, stop, step = self._split_slice(key)
ret = ivy.arange(start, stop, step)
return (
ivy.native_array(ret, dtype="int64")
if ivy.is_int_dtype(ret)
else ivy.native_array(ret, dtype="float64")
)
# more than one slice , key is tuple
self.grids = []
self.shapes = []
for k in key:
start, stop, step = self._split_slice(k)
ret = ivy.arange(start, stop, step)
self.grids.append(ret)
self.shapes.append(ivy.shape(ret)[0])
self._process_arrays()
return self._ret_grids()
def _split_slice(self, slice):
start = slice.start
stop = slice.stop
step = slice.step
if start is None:
start = 0
elif stop is None:
stop = start
start = 0
if isinstance(step, complex):
step = abs(stop - start) / (int(abs(step)) - 1)
stop += step
elif step is None:
step = 1
return start, stop, step
def _process_arrays(self):
total_arr = len(self.grids)
current_arr = total_arr
while current_arr != 0:
arr = self._shape_array(self.grids[current_arr - 1], current_arr, total_arr)
if self.sparse:
self.grids[current_arr - 1] = arr
else:
self.grids[current_arr - 1] = arr[0]
current_arr -= 1
def _init_array(self, array, current, total):
rep = 1
for i in range(current, total):
rep *= self.shapes[i]
return ivy.repeat(array, rep, axis=0)
def _shape_array(self, array, current, total):
# ogrid
if self.sparse:
new_shape = [1] * total
new_shape[current - 1] = self.shapes[current - 1]
return ivy.reshape(array, new_shape)
# mgrid
if current != total:
array = self._init_array(array, current, total)
while current != 1:
new_shape = [1] + self.shapes[current - 1 : total]
array = ivy.reshape(array, new_shape)
array = ivy.repeat(array, self.shapes[current - 2], axis=0)
current -= 1
array = ivy.reshape(array, [1] + self.shapes)
return array
def _ret_grids(self):
is_float = False
for grid in self.grids:
if ivy.is_float_dtype(grid):
is_float = True
break
# ogrid
if self.sparse:
for i in range(0, len(self.grids)):
self.grids[i] = (
ivy.native_array(self.grids[i], dtype="float64")
if is_float
else ivy.native_array(self.grids[i], dtype="int64")
)
return self.grids
# mgrid
return (
ivy.native_array(self.grids, dtype="float64")
if is_float
else ivy.native_array(self.grids, dtype="int64")
)
class MGrid(nd_grid):
def __init__(self):
super().__init__(sparse=False)
class OGrid(nd_grid):
def __init__(self):
super().__init__(sparse=True)
@handle_numpy_dtype
@outputs_to_frontend_arrays
def arange(start, stop=None, step=1, dtype=None, *, like=None):
return ivy.arange(start, stop, step, dtype=dtype)
@handle_numpy_dtype
@to_ivy_arrays_and_back
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
cr = ivy.log(stop / start) / (num - 1 if endpoint else num)
x = ivy.linspace(
0, cr * (num - 1 if endpoint else num), num, endpoint=endpoint, axis=axis
)
x = ivy.exp(x)
x = start * x
x[0] = start
if endpoint:
x[-1] = stop
return x.asarray(dtype=dtype)
@handle_numpy_dtype
@to_ivy_arrays_and_back
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0):
ret = ivy.linspace(start, stop, num, axis=axis, endpoint=endpoint, dtype=dtype)
if retstep:
if endpoint:
num -= 1
step = ivy.divide(ivy.subtract(stop, start), num)
return ret, step
return ret
@handle_numpy_dtype
@to_ivy_arrays_and_back
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0):
if not endpoint:
interval = (stop - start) / num
stop -= interval
return ivy.logspace(start, stop, num, base=base, axis=axis, dtype=dtype)
@to_ivy_arrays_and_back
def meshgrid(*xi, copy=True, sparse=False, indexing="xy"):
# Todo: add sparse check
ret = ivy.meshgrid(*xi, indexing=indexing)
if copy:
return [ivy.copy_array(x) for x in ret]
return ret
mgrid = MGrid()
ogrid = OGrid()
| ivy/ivy/functional/frontends/numpy/creation_routines/numerical_ranges.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/creation_routines/numerical_ranges.py",
"repo_id": "ivy",
"token_count": 2563
} | 28 |
from . import arithmetic_operations
from .arithmetic_operations import *
from . import trigonometric_functions
from .trigonometric_functions import *
from . import hyperbolic_functions
from .hyperbolic_functions import *
from . import rounding
from .rounding import *
from . import sums_products_differences
from .sums_products_differences import *
from . import exponents_and_logarithms
from .exponents_and_logarithms import *
from . import other_special_functions
from .other_special_functions import *
from . import floating_point_routines
from .floating_point_routines import *
from . import rational_routines
from .rational_routines import *
from . import handling_complex_numbers
from .handling_complex_numbers import *
from . import extrema_finding
from .extrema_finding import *
from . import miscellaneous
from .miscellaneous import *
| ivy/ivy/functional/frontends/numpy/mathematical_functions/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/mathematical_functions/__init__.py",
"repo_id": "ivy",
"token_count": 239
} | 29 |
# global
import struct
import warnings
# local
import ivy
import ivy.functional.frontends.numpy as np_frontend
from ivy.functional.frontends.numpy.func_wrapper import _to_ivy_array
from ivy.func_wrapper import (
with_supported_device_and_dtypes,
)
# --- Classes ---#
# ---------------#
class ndarray:
def __init__(self, shape, dtype="float32", order=None, _init_overload=False):
if isinstance(dtype, np_frontend.dtype):
dtype = dtype.ivy_dtype
# in this case shape is actually the desired array
if _init_overload:
self._ivy_array = (
ivy.array(shape) if not isinstance(shape, ivy.Array) else shape
)
else:
self._ivy_array = ivy.empty(shape=shape, dtype=dtype)
ivy.utils.assertions.check_elem_in_list(
order,
["C", "F", None],
message="order must be one of 'C', 'F'",
)
if order == "F":
self._f_contiguous = True
else:
self._f_contiguous = False
def __repr__(self):
return str(self.ivy_array.__repr__()).replace(
"ivy.array", "ivy.frontends.numpy.ndarray"
)
# Properties #
# ---------- #
@property
def ivy_array(self):
return self._ivy_array
@property
def T(self):
return np_frontend.transpose(self)
@property
def shape(self):
return tuple(self.ivy_array.shape.shape)
@property
def size(self):
return self.ivy_array.size
@property
def dtype(self):
return np_frontend.dtype(self.ivy_array.dtype)
@property
def ndim(self):
return len(self.shape)
@property
def flat(self):
self = self.flatten()
return self
# Setters #
# --------#
@ivy_array.setter
def ivy_array(self, array):
self._ivy_array = (
ivy.array(array) if not isinstance(array, ivy.Array) else array
)
# Instance Methods #
# ---------------- #
def astype(self, dtype, order="K", casting="unsafe", subok=True, copy=True):
ivy.utils.assertions.check_elem_in_list(
order,
["C", "F", "A", "K"],
message="order must be one of 'C', 'F', or 'A'",
)
if copy and self._f_contiguous:
ret = np_frontend.array(self.ivy_array, order="F")
else:
ret = np_frontend.array(self.ivy_array) if copy else self
dtype = np_frontend.to_ivy_dtype(dtype)
if np_frontend.can_cast(ret, dtype, casting=casting):
ret.ivy_array = ret.ivy_array.astype(dtype)
else:
raise ivy.utils.exceptions.IvyException(
f"Cannot cast array data from dtype('{ret.ivy_array.dtype}')"
f" to dtype('{dtype}') according to the rule '{casting}'"
)
if order == "F":
ret._f_contiguous = True
elif order == "C":
ret._f_contiguous = False
return ret
def argmax(
self,
/,
*,
axis=None,
out=None,
keepdims=False,
):
return np_frontend.argmax(
self,
axis=axis,
out=out,
keepdims=keepdims,
)
def reshape(self, newshape, /, *, order="C"):
ivy.utils.assertions.check_elem_in_list(
order,
["C", "F", "A"],
message="order must be one of 'C', 'F', or 'A'",
)
if (order == "A" and self._f_contiguous) or order == "F":
return np_frontend.reshape(self, newshape, order="F")
else:
return np_frontend.reshape(self, newshape, order="C")
def resize(self, newshape, /, *, refcheck=True):
return np_frontend.resize(self, newshape, refcheck)
def transpose(self, axes, /):
if axes and isinstance(axes[0], tuple):
axes = axes[0]
return np_frontend.transpose(self, axes=axes)
def swapaxes(self, axis1, axis2, /):
return np_frontend.swapaxes(self, axis1, axis2)
def all(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
if not (dtype is None or ivy.is_bool_dtype(dtype)):
raise TypeError(
"No loop matching the specified signature and "
"casting was found for ufunc logical_or"
)
return np_frontend.all(self, axis, out, keepdims, where=where)
def any(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
if not (dtype is None or ivy.is_bool_dtype(dtype)):
raise TypeError(
"No loop matching the specified signature and "
"casting was found for ufunc logical_or"
)
return np_frontend.any(self, axis, out, keepdims, where=where)
def argsort(self, *, axis=-1, kind=None, order=None):
return np_frontend.argsort(self, axis=axis, kind=kind, order=order)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
return np_frontend.mean(
self,
axis=axis,
dtype=dtype,
out=out,
keepdims=keepdims,
where=where,
)
def min(self, *, axis=None, out=None, keepdims=False, initial=None, where=True):
return np_frontend.amin(
self,
axis=axis,
out=out,
keepdims=keepdims,
initial=initial,
where=where,
)
def max(self, *, axis=None, out=None, keepdims=False, initial=None, where=True):
return np_frontend.amax(
self,
axis=axis,
out=out,
keepdims=keepdims,
initial=initial,
where=where,
)
def argmin(
self,
/,
*,
axis=None,
keepdims=False,
out=None,
):
return np_frontend.argmin(
self,
axis=axis,
keepdims=keepdims,
out=out,
)
def clip(
self,
min,
max,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
return np_frontend.clip(
self,
min,
max,
out=out,
where=where,
casting=casting,
order=order,
dtype=dtype,
subok=subok,
)
def compress(self, condition, axis=None, out=None):
return np_frontend.compress(
condition=condition,
a=self,
axis=axis,
out=out,
)
def conjugate(
self,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
return np_frontend.conjugate(
self.ivy_array,
out=out,
where=where,
casting=casting,
order=order,
dtype=dtype,
subok=subok,
)
def cumprod(self, *, axis=None, dtype=None, out=None):
return np_frontend.cumprod(
self,
axis=axis,
dtype=dtype,
out=out,
)
def cumsum(self, *, axis=None, dtype=None, out=None):
return np_frontend.cumsum(
self,
axis=axis,
dtype=dtype,
out=out,
)
def dot(self, b, out=None):
return np_frontend.dot(self, b, out=out)
def diagonal(self, *, offset=0, axis1=0, axis2=1):
return np_frontend.diagonal(
self,
offset=offset,
axis1=axis1,
axis2=axis2,
)
def sort(self, *, axis=-1, kind=None, order=None):
return np_frontend.sort(self, axis=axis, kind=kind, order=order)
def copy(self, order="C"):
return np_frontend.copy(self, order=order)
def nonzero(
self,
):
return np_frontend.nonzero(self)[0]
def ravel(self, order="C"):
ivy.utils.assertions.check_elem_in_list(
order,
["C", "F", "A", "K"],
message="order must be one of 'C', 'F', 'A', or 'K'",
)
if (order in ["K", "A"] and self._f_contiguous) or order == "F":
return np_frontend.ravel(self, order="F")
else:
return np_frontend.ravel(self, order="C")
def flatten(self, order="C"):
ivy.utils.assertions.check_elem_in_list(
order,
["C", "F", "A", "K"],
message="order must be one of 'C', 'F', 'A', or 'K'",
)
if (order in ["K", "A"] and self._f_contiguous) or order == "F":
return np_frontend.ravel(self, order="F")
else:
return np_frontend.ravel(self, order="C")
def fill(self, num, /):
self.ivy_array = np_frontend.full(self.shape, num).ivy_array
return None
def repeat(self, repeats, axis=None):
return np_frontend.repeat(self, repeats, axis=axis)
def searchsorted(self, v, side="left", sorter=None):
return np_frontend.searchsorted(self, v, side=side, sorter=sorter)
def squeeze(self, axis=None):
return np_frontend.squeeze(self, axis=axis)
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return np_frontend.std(
self,
axis=axis,
dtype=dtype,
out=out,
ddof=ddof,
keepdims=keepdims,
where=where,
)
def tobytes(self, order="C"):
return _to_bytes_helper(self.ivy_array, order=order)
def tostring(self, order="C"):
warnings.warn(
"DeprecationWarning: tostring() is deprecated. Use tobytes() instead."
)
return self.tobytes(order=order)
def prod(
self,
*,
axis=None,
dtype=None,
out=None,
keepdims=False,
initial=None,
where=True,
):
return np_frontend.prod(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
initial=initial,
where=where,
out=out,
)
def sum(
self,
*,
axis=None,
dtype=None,
out=None,
keepdims=False,
initial=None,
where=True,
):
return np_frontend.sum(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
initial=initial,
where=where,
out=out,
)
def tofile(self, fid, /, sep="", format_="%s"):
if self.ndim == 0:
string = str(self)
else:
string = sep.join([str(item) for item in self.tolist()])
with open(fid, "w") as f:
f.write(string)
def tolist(self) -> list:
return self._ivy_array.to_list()
@with_supported_device_and_dtypes(
{
"1.26.3 and below": {
"cpu": (
"int64",
"float32",
"float64",
"bfloat16",
"complex64",
"complex128",
"uint64",
)
}
},
"numpy",
)
def trace(self, *, offset=0, axis1=0, axis2=1, out=None):
return np_frontend.trace(
self,
offset=offset,
axis1=axis1,
axis2=axis2,
out=out,
)
def view(self):
return np_frontend.reshape(self, tuple(self.shape))
def __add__(self, value, /):
return np_frontend.add(self, value)
def __radd__(self, value, /):
return np_frontend.add(self, value)
def __sub__(self, value, /):
return np_frontend.subtract(self, value)
def __mul__(self, value, /):
return np_frontend.multiply(self, value)
def __rmul__(self, value, /):
return np_frontend.multiply(value, self)
def __truediv__(self, value, /):
return np_frontend.true_divide(self, value)
def __floordiv__(self, value, /):
return np_frontend.floor_divide(self, value)
def __rtruediv__(self, value, /):
return np_frontend.true_divide(value, self)
def __pow__(self, value, /):
return np_frontend.power(self, value)
def __and__(self, value, /):
return np_frontend.logical_and(self, value)
def __or__(self, value, /):
return np_frontend.logical_or(self, value)
def __xor__(self, value, /):
return np_frontend.logical_xor(self, value)
def __matmul__(self, value, /):
return np_frontend.matmul(self, value)
def __copy__(
self,
):
return np_frontend.copy(self)
def __deepcopy__(self, memo, /):
return self.ivy_array.__deepcopy__(memo)
def __neg__(
self,
):
return np_frontend.negative(self)
def __pos__(
self,
):
return np_frontend.positive(self)
def __bool__(
self,
):
if isinstance(self.ivy_array, int):
return self.ivy_array != 0
temp = ivy.squeeze(ivy.asarray(self.ivy_array), axis=None)
if ivy.get_num_dims(temp) > 1:
raise ValueError(
"The truth value of an array with more than one element is ambiguous. "
"Use a.any() or a.all()"
)
return temp != 0
def __ne__(self, value, /):
return np_frontend.not_equal(self, value)
def __len__(self):
return len(self.ivy_array)
def __eq__(self, value, /):
return np_frontend.equal(self, value)
def __ge__(self, value, /):
return np_frontend.greater_equal(self, value)
def __gt__(self, value, /):
return np_frontend.greater(self, value)
def __le__(self, value, /):
return np_frontend.less_equal(self, value)
def __lt__(self, value, /):
return np_frontend.less(self, value)
def __int__(
self,
):
if "complex" in self.dtype.name:
raise TypeError(
"int() argument must be a string, a bytes-like object or a number, not"
" 'complex"
)
return int(self.ivy_array)
def __float__(
self,
):
if "complex" in self.dtype.name:
raise TypeError(
"float() argument must be a string or a real number, not 'complex"
)
return float(self.ivy_array)
def __complex__(
self,
):
return complex(self.ivy_array)
def __contains__(self, key, /):
return np_frontend.any(self == key)
def __iadd__(self, value, /):
return np_frontend.add(self, value, out=self)
def __isub__(self, value, /):
return np_frontend.subtract(self, value, out=self)
def __imul__(self, value, /):
return np_frontend.multiply(self, value, out=self)
def __itruediv__(self, value, /):
return np_frontend.true_divide(self, value, out=self)
def __ifloordiv__(self, value, /):
return np_frontend.floor_divide(self, value, out=self)
def __ipow__(self, value, /):
return np_frontend.power(self, value, out=self)
def __iand__(self, value, /):
return np_frontend.logical_and(self, value, out=self)
def __ior__(self, value, /):
return np_frontend.logical_or(self, value, out=self)
def __ixor__(self, value, /):
return np_frontend.logical_xor(self, value, out=self)
def __imod__(self, value, /):
return np_frontend.mod(self, value, out=self)
def __invert__(self, /):
return ivy.bitwise_invert(self.ivy_array)
def __abs__(self):
return np_frontend.absolute(self)
def __array__(self, dtype=None, /):
if not dtype:
return ivy.to_numpy(self.ivy_array)
return ivy.to_numpy(self.ivy_array).astype(dtype)
def __array_wrap__(self, array, context=None, /):
return np_frontend.array(array)
def __getitem__(self, key, /):
ivy_args = ivy.nested_map(_to_ivy_array, [self, key])
ret = ivy.get_item(*ivy_args)
return np_frontend.ndarray(ret, _init_overload=True)
def __setitem__(self, key, value, /):
key, value = ivy.nested_map(_to_ivy_array, [key, value])
self.ivy_array[key] = value
def __iter__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d ndarray not supported")
for i in range(self.shape[0]):
yield self[i]
def __mod__(self, value, /):
return np_frontend.mod(self, value, out=self)
def ptp(self, *, axis=None, out=None, keepdims=False):
xmax = self.max(axis=axis, out=out, keepdims=keepdims)
xmin = self.min(axis=axis, out=out, keepdims=keepdims)
return np_frontend.subtract(xmax, xmin)
def item(self, *args):
if len(args) == 0:
return self[0].ivy_array.to_scalar()
elif len(args) == 1 and isinstance(args[0], int):
index = args[0]
return self.ivy_array.flatten()[index].to_scalar()
else:
out = self
for index in args:
out = out[index]
return out.ivy_array.to_scalar()
def __rshift__(self, value, /):
return ivy.bitwise_right_shift(self.ivy_array, value)
def __lshift__(self, value, /):
return ivy.bitwise_left_shift(self.ivy_array, value)
def __ilshift__(self, value, /):
return ivy.bitwise_left_shift(self.ivy_array, value, out=self)
def round(self, decimals=0, out=None):
return np_frontend.round(self, decimals=decimals, out=out)
def var(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return np_frontend.var(
self,
axis=axis,
dtype=dtype,
out=out,
ddof=ddof,
keepdims=keepdims,
where=where,
)
def __irshift__(self, value, /):
return ivy.bitwise_right_shift(self.ivy_array, value, out=self)
# --- Helpers --- #
# --------------- #
# tobytes helper function
def _to_bytes_helper(array, order="C"):
def _integers_bytes_repr(item_val, /, *, dtype=None):
if dtype == ivy.int8:
return item_val.to_bytes(1, byteorder="big", signed=True)
elif dtype == ivy.int16:
return struct.pack("h", item_val)
elif dtype == ivy.int32:
return struct.pack("i", item_val)
elif dtype == ivy.int64:
return struct.pack("q", item_val)
def _float_bytes_repr(item_val, /, *, dtype=None):
if dtype == ivy.float16:
return struct.pack("e", item_val)
elif dtype == ivy.float32:
return struct.pack("f", item_val)
return struct.pack("d", item_val)
def _bool_bytes_repr(item_val, /):
return struct.pack("?", item_val)
def _complex_bytes_repr(item_val, /, *, dtype=None):
if dtype == ivy.complex64:
# complex64 is represented as two 32-bit floats
return struct.pack("ff", item_val.real, item_val.imag)
elif dtype == ivy.complex128:
# complex128 is represented as two 64-bit floats
return struct.pack("dd", item_val.real, item_val.imag)
def _unsigned_int_bytes_repr(item_val, /, *, dtype=None):
if dtype == ivy.uint8:
return item_val.to_bytes(1, byteorder="little", signed=False)
elif dtype == ivy.uint16:
return struct.pack("H", item_val)
elif dtype == ivy.uint32:
return struct.pack("I", item_val)
elif dtype == ivy.uint64:
return struct.pack("Q", item_val)
if ivy.get_num_dims(array) == 0:
scalar_value = ivy.to_scalar(array)
dtype = ivy.dtype(array)
if ivy.is_int_dtype(dtype) and not ivy.is_uint_dtype(dtype):
return _integers_bytes_repr(scalar_value, dtype=dtype)
elif ivy.is_float_dtype(dtype):
return _float_bytes_repr(scalar_value, dtype=dtype)
elif ivy.is_bool_dtype(dtype):
return _bool_bytes_repr(scalar_value)
elif ivy.is_complex_dtype(dtype):
return _complex_bytes_repr(scalar_value, dtype=dtype)
elif ivy.is_uint_dtype(dtype):
return _unsigned_int_bytes_repr(scalar_value, dtype=dtype)
else:
raise TypeError("Unsupported data type for the array.")
else:
if order == "F":
array = np_frontend.ravel(array, order="F").ivy_array
array = ivy.flatten(array)
if ivy.is_int_dtype(array) and not ivy.is_uint_dtype(array):
bytes_reprs = [
_integers_bytes_repr(item, dtype=ivy.dtype(array))
for item in array.to_list()
]
return b"".join(bytes_reprs)
elif ivy.is_float_dtype(array):
bytes_reprs = [
_float_bytes_repr(item, dtype=ivy.dtype(array))
for item in array.to_list()
]
return b"".join(bytes_reprs)
elif ivy.is_bool_dtype(array):
bytes_reprs = [_bool_bytes_repr(item) for item in array.to_list()]
return b"".join(bytes_reprs)
elif ivy.is_complex_dtype(array):
bytes_reprs = [
_complex_bytes_repr(item, dtype=ivy.dtype(array))
for item in array.to_list()
]
return b"".join(bytes_reprs)
elif ivy.is_uint_dtype(array):
bytes_reprs = [
_unsigned_int_bytes_repr(item, dtype=ivy.dtype(array))
for item in array.to_list()
]
return b"".join(bytes_reprs)
else:
raise ValueError("Unsupported data type for the array.")
| ivy/ivy/functional/frontends/numpy/ndarray/ndarray.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/ndarray/ndarray.py",
"repo_id": "ivy",
"token_count": 11430
} | 30 |
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_supported_dtypes
@with_supported_dtypes({"1.26.3 and below": ("int64",)}, "numpy")
@to_ivy_arrays_and_back
def bincount(x, /, weights=None, minlength=0):
return ivy.bincount(x, weights=weights, minlength=minlength)
| ivy/ivy/functional/frontends/numpy/statistics/histograms.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/statistics/histograms.py",
"repo_id": "ivy",
"token_count": 132
} | 31 |
# global
import ivy
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.functional.frontends.paddle import promote_types_of_paddle_inputs
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
@with_supported_dtypes({"2.4.1 and above": ("int64",)}, "paddle")
@to_ivy_arrays_and_back
def bincount(x, weights=None, minlength=0, name=None):
return ivy.bincount(x, weights=weights, minlength=minlength)
# bmm
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def bmm(x, y, transpose_x=False, transpose_y=False, name=None):
if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:
raise RuntimeError("input must be 3D matrices")
x, y = promote_types_of_paddle_inputs(x, y)
return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
# cholesky
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def cholesky(x, /, *, upper=False, name=None):
return ivy.cholesky(x, upper=upper)
# cholesky_solve
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def cholesky_solve(x, y, /, *, upper=False, name=None):
if upper:
y = ivy.matrix_transpose(y)
Y = ivy.solve(y, x)
return ivy.solve(ivy.matrix_transpose(y), Y)
# cond
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def cond(x, p=None, name=None):
ret = ivy.cond(x, p=p, out=name)
if ret.shape == ():
ret = ret.reshape((1,))
return ret
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
)
@to_ivy_arrays_and_back
def cross(x, y, /, *, axis=9, name=None):
x, y = promote_types_of_paddle_inputs(x, y)
return ivy.cross(x, y, axis=axis)
# diagonal
@with_supported_dtypes(
{
"2.6.0 and below": (
"int32",
"int64",
"float64",
"complex128",
"float32",
"complex64",
"bool",
)
},
"paddle",
)
@to_ivy_arrays_and_back
def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
return ivy.diagonal(x, offset=offset, axis1=axis1, axis2=axis2)
@with_supported_dtypes({"2.4.1 and above": ("float64", "float32")}, "paddle")
@to_ivy_arrays_and_back
def dist(x, y, p=2):
ret = ivy.vector_norm(ivy.subtract(x, y), ord=p)
return ivy.reshape(ret, (1,))
# dot
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def dot(x, y, name=None):
x, y = promote_types_of_paddle_inputs(x, y)
out = ivy.multiply(x, y)
return ivy.sum(out, axis=ivy.get_num_dims(x) - 1, keepdims=False)
# eig
@to_ivy_arrays_and_back
def eig(x, name=None):
return ivy.eig(x)
# eigh
@to_ivy_arrays_and_back
def eigh(x, UPLO="L", name=None):
return ivy.eigh(x, UPLO=UPLO)
# eigvals
@to_ivy_arrays_and_back
def eigvals(x, name=None):
return ivy.eigvals(x)
# eigvalsh
@to_ivy_arrays_and_back
def eigvalsh(x, UPLO="L", name=None):
return ivy.eigvalsh(x, UPLO=UPLO)
@to_ivy_arrays_and_back
def lu_unpack(lu_data, lu_pivots, unpack_datas=True, unpack_pivots=True, *, out=None):
A = lu_data
n = A.shape
m = len(lu_pivots)
pivot_matrix = ivy.eye(m)
L = ivy.tril(A)
L.fill_diagonal(1.000)
U = ivy.triu(A)
for i in range(m):
if i != lu_pivots[i] - 1:
pivot_matrix[[i, lu_pivots[i] - 1]] = pivot_matrix[[lu_pivots[i] - 1, i]]
P = pivot_matrix
if not unpack_datas:
L = ivy.zeros(n)
U = ivy.zeros(n)
if not unpack_pivots:
P = ivy.zeros(n)
else:
P = pivot_matrix
result = f"P={P}\n" + f"L={L}\n" + f"U={U}"
return result
elif not unpack_pivots:
P = ivy.zeros(n)
result = f"P={P}\n" + f"L={L}\n" + f"U={U}"
return result
else:
result = f"P={P}\n" + f"L={L}\n" + f"U={U}"
return result
# matmul
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
x, y = promote_types_of_paddle_inputs(x, y)
return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
# matrix_power
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def matrix_power(x, n, name=None):
return ivy.matrix_power(x, n)
# mv
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def mv(x, vec, name=None):
return ivy.dot(x, vec)
# norm
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def norm(x, p="fro", axis=None, keepdim=False, name=None):
if axis is None and p is not None:
if p == "fro":
p = 2
ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)
if keepdim:
ret = ret.reshape([1] * len(x.shape))
return ret
if isinstance(axis, tuple):
axis = list(axis)
if isinstance(axis, list) and len(axis) == 1:
axis = axis[0]
if isinstance(axis, int):
if p == "fro":
p = 2
if p in [0, 1, 2, ivy.inf, -ivy.inf]:
ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)
elif isinstance(p, (int, float)):
ret = ivy.pow(
ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),
float(1.0 / p),
)
elif isinstance(axis, list) and len(axis) == 2:
if p == 0:
raise ValueError
elif p == 1:
ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)
elif p in [2, "fro"]:
ret = ivy.matrix_norm(x, ord="fro", axis=axis, keepdims=keepdim)
elif p == ivy.inf:
ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)
elif p == -ivy.inf:
ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)
elif isinstance(p, (int, float)) and p > 0:
ret = ivy.pow(
ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),
float(1.0 / p),
)
else:
raise ValueError
else:
raise ValueError
return ret
# pinv
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def pinv(x, rcond=1e-15, hermitian=False, name=None):
# TODO: Add hermitian functionality
return ivy.pinv(x, rtol=rcond)
# qr
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def qr(x, mode="reduced", name=None):
return ivy.qr(x, mode=mode)
# solve
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def solve(x, y, name=None):
return ivy.solve(x, y)
# transpose
@with_unsupported_dtypes({"2.6.0 and below": ("uint8", "int8", "int16")}, "paddle")
@to_ivy_arrays_and_back
def transpose(x, perm, name=None):
return ivy.permute_dims(x, axes=perm)
| ivy/ivy/functional/frontends/paddle/linalg.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/linalg.py",
"repo_id": "ivy",
"token_count": 3636
} | 32 |
# global
import ivy
from ivy.func_wrapper import with_supported_dtypes
from ivy.func_wrapper import with_supported_device_and_dtypes, with_unsupported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def multinomial(x, num_samples=1, replacement=False, name=None):
n = num_samples + 1
return ivy.multinomial(n, num_samples, probs=x, replace=replacement)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def normal(mean=0.0, std=1.0, shape=None, name=None):
return ivy.random_normal(mean=mean, std=std, shape=shape)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def poisson(x, name=None):
return ivy.poisson(x, shape=None, device=None, dtype=None, seed=None, out=None)
@with_supported_device_and_dtypes(
{
"2.6.0 and above": {
"cpu": (
"bfloat16",
"float32",
"float64",
),
"gpu": (
"bfloat16",
"float16",
"float32",
"float64",
),
},
"2.4.2 and below": {
"cpu": (
"float32",
"float64",
),
"gpu": (
"float16",
"float32",
"float64",
),
},
},
"paddle",
)
@to_ivy_arrays_and_back
def rand(shape, dtype=None, name=None):
return ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=None)
@to_ivy_arrays_and_back
def randint(low=0, high=None, shape=[1], dtype=None, name=None):
return ivy.randint(low, high, shape=shape, dtype=dtype)
@with_unsupported_dtypes(
{"2.6.0 and below": ("int16", "float16", "bfloat16", "uint8")},
"paddle",
)
@to_ivy_arrays_and_back
def randint_like(x, low=0, high=None, dtype=None, name=None):
if high is None:
high = low
low = 0
if high <= 0:
raise ivy.exceptions.IvyError(
"If high is None, low must be greater than 0, but received low = 0."
)
return ivy.randint(low, high, shape=x.shape, dtype=dtype, seed=None)
def randn(shape, dtype=None, name=None):
if dtype not in ["float32", "float64"]:
raise ivy.exceptions.IvyError(
"Unsupported dtype for randn, only float32 and float64 are supported, "
)
return ivy.random_normal(shape=shape, dtype=dtype, seed=None)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def standard_normal(shape, dtype=None, name=None):
return ivy.random_normal(mean=0, std=1, shape=shape, dtype=dtype)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)
| ivy/ivy/functional/frontends/paddle/random.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/random.py",
"repo_id": "ivy",
"token_count": 1565
} | 33 |
from . import func_wrapper
from .func_wrapper import *
from . import series
from .series import *
from . import index
from .index import *
from . import dataframe
from .dataframe import *
from . import generic
from .generic import *
| ivy/ivy/functional/frontends/pandas/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/pandas/__init__.py",
"repo_id": "ivy",
"token_count": 61
} | 34 |
import ivy
from ._splitter import SplitRecord
EPSILON = ivy.finfo(ivy.double).eps
INFINITY = ivy.inf
INTPTR_MAX = ivy.iinfo(ivy.int32).max
TREE_LEAF = -1
TREE_UNDEFINED = -2
_TREE_LEAF = TREE_LEAF
_TREE_UNDEFINED = TREE_UNDEFINED
class Node:
def __init__(self):
self.left_child = None
self.right_child = None
self.feature = None
self.threshold = None
self.impurity = None
self.n_node_samples = None
self.weighted_n_node_samples = None
self.missing_go_to_left = None
class Tree:
def __init__(self, n_features, n_classes, n_outputs):
self.max_depth = 0
self.node_count = 0
self.capacity = 0
self.nodes = []
self.value = None
self.n_features = n_features
self.n_outputs = n_outputs
self.n_classes = ivy.zeros(n_outputs, dtype=ivy.int32)
self.max_n_classes = ivy.max(n_classes)
self.value_stride = n_outputs * self.max_n_classes
for k in range(n_outputs):
self.n_classes[k] = n_classes[k]
def _resize(self, capacity):
self._resize_c(capacity)
def _resize_c(self, capacity=INTPTR_MAX):
if capacity == self.capacity and len(self.nodes) != 0:
return 0
if capacity == INTPTR_MAX:
if self.capacity == 0:
capacity = 3
else:
capacity = 2 * self.capacity
if self.value is None:
self.value = ivy.zeros(
(capacity, int(self.n_outputs), int(self.max_n_classes)),
dtype=ivy.float32,
)
else:
self.value = ivy.concat(
[
self.value,
ivy.zeros(
(
int(capacity - self.capacity),
int(self.n_outputs),
int(self.max_n_classes),
),
dtype=ivy.float32,
),
]
)
if capacity < self.node_count:
self.node_count = capacity
self.capacity = capacity
return 0
def _add_node(
self,
parent,
is_left,
is_leaf,
feature,
threshold,
impurity,
n_node_samples,
weighted_n_node_samples,
missing_go_to_left,
):
node_id = self.node_count
if node_id >= self.capacity:
self._resize_c()
node = Node()
node.impurity = impurity
node.n_node_samples = n_node_samples
node.weighted_n_node_samples = weighted_n_node_samples
if parent != _TREE_UNDEFINED:
if is_left:
self.nodes[parent].left_child = node_id
else:
self.nodes[parent].right_child = node_id
if is_leaf:
node.left_child = _TREE_LEAF
node.right_child = _TREE_LEAF
node.feature = _TREE_UNDEFINED
node.threshold = _TREE_UNDEFINED
else:
node.feature = feature
node.threshold = threshold
node.missing_go_to_left = missing_go_to_left
self.nodes.append(node)
self.node_count += 1
return node_id
def predict(self, X):
X_applied = self.apply(X)
out = ivy.take(self.value, X_applied, axis=0)
if self.n_outputs == 1:
out = out.reshape((X.shape[0], self.max_n_classes))
return out
def apply(self, X):
return self._apply_dense(X)
def _apply_dense(self, X):
X_tensor = X
n_samples = X.shape[0]
out = ivy.zeros(n_samples, dtype="int32")
for i in range(n_samples):
node = self.nodes[0] # root node
while node.left_child != _TREE_LEAF:
X_i_node_feature = X_tensor[i, node.feature]
if ivy.isnan(X_i_node_feature):
if node.missing_go_to_left:
node = self.nodes[node.left_child]
else:
node = self.nodes[node.right_child]
elif X_i_node_feature <= node.threshold:
node = self.nodes[node.left_child]
else:
node = self.nodes[node.right_child]
out[i] = self.nodes.index(node) # terminal node index
return out
class StackRecord:
def __init__(
self,
start,
end,
depth,
parent,
is_left,
impurity,
n_constant_features,
):
self.start = start
self.end = end
self.depth = depth
self.parent = parent
self.is_left = is_left
self.impurity = impurity
self.n_constant_features = n_constant_features
class TreeBuilder:
def build(
self,
tree,
X,
y,
sample_weight=None,
missing_values_in_feature_mask=None,
):
pass
class DepthFirstTreeBuilder(TreeBuilder):
def __init__(
self,
splitter,
min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
min_impurity_decrease,
):
self.splitter = splitter
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_leaf = min_weight_leaf
self.max_depth = max_depth
self.min_impurity_decrease = min_impurity_decrease
def build(
self, tree, X, y, sample_weight=None, missing_values_in_feature_mask=None
):
if tree.max_depth <= 10:
init_capacity = int(2 ** (tree.max_depth + 1)) - 1
else:
init_capacity = 2047
tree._resize(init_capacity)
splitter = self.splitter
max_depth = self.max_depth
min_samples_leaf = self.min_samples_leaf
min_weight_leaf = self.min_weight_leaf
min_samples_split = self.min_samples_split
min_impurity_decrease = self.min_impurity_decrease
splitter.init(X, y, sample_weight, missing_values_in_feature_mask)
weighted_n_node_samples = 0.0
split = SplitRecord()
first = 1
max_depth_seen = -1
builder_stack = []
# Push root node onto stack
builder_stack.append(
StackRecord(
start=0,
end=splitter.n_samples,
depth=0,
parent=-2,
is_left=False,
impurity=INFINITY,
n_constant_features=0,
)
)
while len(builder_stack) > 0:
stack_record = builder_stack.pop()
start = stack_record.start
end = stack_record.end
depth = stack_record.depth
parent = stack_record.parent
is_left = stack_record.is_left
impurity = stack_record.impurity
n_constant_features = stack_record.n_constant_features
n_node_samples = end - start
_, weighted_n_node_samples = splitter.node_reset(
start, end, weighted_n_node_samples
)
is_leaf = (
depth >= max_depth
or n_node_samples < min_samples_split
or n_node_samples < 2 * min_samples_leaf
or weighted_n_node_samples < 2 * min_weight_leaf
)
if first:
impurity = splitter.node_impurity()
first = 0
is_leaf = is_leaf or impurity <= EPSILON
if not is_leaf:
_, n_constant_features, split = splitter.node_split(
impurity, split, n_constant_features
)
is_leaf = (
is_leaf
or split.pos >= end
or (split.improvement + EPSILON < min_impurity_decrease)
)
node_id = tree._add_node(
parent,
is_left,
is_leaf,
split.feature,
split.threshold,
impurity,
n_node_samples,
weighted_n_node_samples,
split.missing_go_to_left,
)
tree.value = splitter.node_value(tree.value, node_id)
if not is_leaf:
# Push right child on stack
builder_stack.append(
StackRecord(
start=split.pos,
end=end,
depth=depth + 1,
parent=node_id,
is_left=False,
impurity=split.impurity_right,
n_constant_features=n_constant_features,
)
)
# Push left child on stack
builder_stack.append(
StackRecord(
start=start,
end=split.pos,
depth=depth + 1,
parent=node_id,
is_left=True,
impurity=split.impurity_left,
n_constant_features=n_constant_features,
)
)
if depth > max_depth_seen:
max_depth_seen = depth
tree.max_depth = max_depth_seen
| ivy/ivy/functional/frontends/sklearn/tree/_tree.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/tree/_tree.py",
"repo_id": "ivy",
"token_count": 5357
} | 35 |
import ivy
import ivy.functional.frontends.tensorflow as tf_frontend
from ivy.functional.frontends.tensorflow.func_wrapper import to_ivy_arrays_and_back
from ivy import with_supported_dtypes
ACTIVATION_FUNCTIONS = [
"gelu",
"leaky_relu",
"log_softmax",
"relu",
"sigmoid",
"silu",
"softmax",
"softplus",
]
# --- Helpers --- #
# --------------- #
# note: defined to avoid AST call extraction of
# 'tf_frontend.keras.activations.__dict__.items()
# or 'tf_frontend.keras.activations.__dict__.values()'
def _get_tf_keras_activations():
return tf_frontend.keras.activations.__dict__.items()
# --- Main --- #
# ------------ #
@with_supported_dtypes(
{"2.15.0 and below": ("float16", "float32", "float64")},
"tensorflow",
)
def deserialize(name, custom_objects=None):
if name is None:
return None
elif isinstance(name, str):
if custom_objects and name in custom_objects:
return custom_objects.get(name)
# To replicate tensorflow framework
elif (
ivy.current_backend().__name__.split(".")[-1] == "tensorflow"
and name in tf_frontend.keras.activations.__dict__
): # noqa
return tf_frontend.keras.activations.__dict__[name]
# On other backends, query the function from global ivy dict
elif name in ACTIVATION_FUNCTIONS:
return ivy.__dict__[name]
else:
raise ValueError(f"Unknown activation function: {name}.")
else:
raise ValueError(f"Could not interpret activation function: {name}")
@with_supported_dtypes(
{"2.15.0 and below": ("bfloat16", "float16", "float32", "float64")},
"tensorflow",
)
@to_ivy_arrays_and_back
def elu(x, alpha=1.0):
zeros = ivy.zeros_like(x, dtype=ivy.dtype(x))
ones = ivy.ones_like(x, dtype=ivy.dtype(x))
alpha = ivy.astype(ivy.array(alpha), ivy.dtype(x))
ret_val = ivy.where(
x > zeros, x, ivy.multiply(alpha, ivy.subtract(ivy.exp(x), ones))
)
return ret_val
@to_ivy_arrays_and_back
def gelu(x, approximate=False):
return ivy.gelu(x, approximate=approximate)
def get(identifier):
if identifier is None:
return tf_frontend.keras.activations.linear
elif isinstance(identifier, str):
return tf_frontend.keras.activations.deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError(f"Could not interpret function identifier: {identifier}")
@to_ivy_arrays_and_back
def hard_sigmoid(x):
dtype_in = x.dtype
point_two = ivy.full(x.shape, 0.2)
point_five = ivy.full(x.shape, 0.5)
x = ivy.multiply(x, point_two)
x = ivy.add(x, point_five)
x = ivy.clip(x, 0.0, 1.0)
x = ivy.asarray(x, dtype=dtype_in)
return x
@to_ivy_arrays_and_back
def linear(x):
return ivy.array(x)
@to_ivy_arrays_and_back
def relu(x, alpha=0.0, max_value=None, threshold=0.0):
return ivy.relu(x)
@with_supported_dtypes(
{"2.15.0 and below": ("float16", "float32", "float64")},
"tensorflow",
)
@to_ivy_arrays_and_back
def selu(x):
return ivy.selu(x)
@with_supported_dtypes(
{"2.15.0 and below": ("float16", "float32", "float64")},
"tensorflow",
)
def serialize(activation, use_legacy_format=False, custom_objects=None):
# If the activation function is None, return None
if activation is None:
return None
# If the activation function is already a string, return it
elif isinstance(activation, str):
return activation
# If the activation function is callable (a function), get its name
elif callable(activation):
# Check if the function is in the custom_objects dictionary
if custom_objects:
for name, custom_func in custom_objects.items():
if custom_func == activation:
return name
tf_keras_frontend_activations = _get_tf_keras_activations()
# Check if the function is in the ACTIVATION_FUNCTIONS list
if activation.__name__ in ACTIVATION_FUNCTIONS:
return activation.__name__
# Check if the function is in the TensorFlow frontend activations
elif activation in [fn for name, fn in tf_keras_frontend_activations]:
for name, tf_func in tf_keras_frontend_activations:
if tf_func == activation:
return name
else:
raise ValueError(f"Unknown activation function: {activation}.")
else:
raise ValueError(f"Could not interpret activation function: {activation}")
@to_ivy_arrays_and_back
def sigmoid(x):
return ivy.sigmoid(x)
@to_ivy_arrays_and_back
def softmax(x, axis=-1):
return ivy.softmax(x, axis=axis)
@to_ivy_arrays_and_back
def softplus(x):
return ivy.softplus(x)
@to_ivy_arrays_and_back
def softsign(x):
return ivy.divide(x, ivy.add(1, ivy.abs(x)))
@to_ivy_arrays_and_back
def swish(x):
return ivy.multiply(x, ivy.sigmoid(x))
@to_ivy_arrays_and_back
def tanh(x):
return ivy.tanh(x)
| ivy/ivy/functional/frontends/tensorflow/keras/activations.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/keras/activations.py",
"repo_id": "ivy",
"token_count": 2187
} | 36 |
import ivy
from ivy.functional.frontends.tensorflow.func_wrapper import (
to_ivy_arrays_and_back,
handle_tf_dtype,
)
from ivy.func_wrapper import with_supported_dtypes
# dct
@to_ivy_arrays_and_back
def dct(input, type=2, n=None, axis=-1, norm=None, name=None):
return ivy.dct(input, type=type, n=n, axis=axis, norm=norm)
# idct
@to_ivy_arrays_and_back
def idct(input, type=2, n=None, axis=-1, norm=None, name=None):
inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type]
return ivy.dct(input, type=inverse_type, n=n, axis=axis, norm=norm)
# kaiser_bessel_derived_window
@handle_tf_dtype
@to_ivy_arrays_and_back
def kaiser_bessel_derived_window(
window_length, beta=12.0, dtype=ivy.float32, name=None
):
return ivy.kaiser_bessel_derived_window(window_length, beta=beta, dtype=dtype)
@with_supported_dtypes(
{"2.15.0 and below": ("float32", "float64", "float16", "bfloat16")},
"tensorflow",
)
@handle_tf_dtype
@to_ivy_arrays_and_back
def kaiser_window(window_length, beta=12.0, dtype=ivy.float32, name=None):
return ivy.kaiser_window(window_length, periodic=False, beta=beta, dtype=dtype)
# stft
@to_ivy_arrays_and_back
def stft(
signals,
frame_length,
frame_step,
fft_length=None,
window_fn=None,
pad_end=False,
name=None,
):
signals = ivy.asarray(signals)
return ivy.stft(
signals,
frame_length,
frame_step,
fft_length=fft_length,
window_fn=window_fn,
pad_end=pad_end,
name=name,
)
@with_supported_dtypes(
{"2.15.0 and below": ("float16", "float32", "float64", "bfloat16")},
"tensorflow",
)
@to_ivy_arrays_and_back
def vorbis_window(window_length, dtype=ivy.float32, name=None):
return ivy.vorbis_window(window_length, dtype=dtype, out=None)
kaiser_bessel_derived_window.supported_dtypes = (
"float32",
"float64",
"float16",
"bfloat16",
)
| ivy/ivy/functional/frontends/tensorflow/signal.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/signal.py",
"repo_id": "ivy",
"token_count": 868
} | 37 |
from . import functional
from . import modules
from .modules import *
from . import parameter
from .parameter import Parameter
| ivy/ivy/functional/frontends/torch/nn/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/__init__.py",
"repo_id": "ivy",
"token_count": 29
} | 38 |
# global
import ivy
from ivy.func_wrapper import (
with_unsupported_dtypes,
with_supported_dtypes,
)
import ivy.functional.frontends.torch as torch_frontend
from ivy.functional.frontends.torch.func_wrapper import (
to_ivy_arrays_and_back,
)
erfc = torch_frontend.special.erfc
@to_ivy_arrays_and_back
def abs(input, *, out=None):
return ivy.abs(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def acos(input, *, out=None):
return ivy.acos(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def acosh(input, *, out=None):
return ivy.acosh(input, out=out)
@with_supported_dtypes(
{"1.12.0 and below": ("float32", "float64", "int32", "int64")}, "jax"
)
@to_ivy_arrays_and_back
def add(input, other, *, alpha=1, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.add(input, other, alpha=alpha, out=out)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def addcdiv(input, tensor1, tensor2, *, value=1, out=None):
return ivy.add(input, ivy.multiply(value, ivy.divide(tensor1, tensor2)), out=out)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def addcmul(input, tensor1, tensor2, *, value=1, out=None):
return ivy.add(input, ivy.multiply(value, ivy.multiply(tensor1, tensor2)), out=out)
@to_ivy_arrays_and_back
def angle(input, *, out=None):
return ivy.angle(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def asin(input, *, out=None):
return ivy.asin(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def asinh(input, *, out=None):
return ivy.asinh(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def atan(input, *, out=None):
return ivy.atan(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def atan2(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.atan2(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def atanh(input, *, out=None):
return ivy.atanh(input, out=out)
@to_ivy_arrays_and_back
def bitwise_and(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.bitwise_and(input, other, out=out)
@to_ivy_arrays_and_back
def bitwise_left_shift(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.bitwise_left_shift(input, other, out=out)
@to_ivy_arrays_and_back
def bitwise_not(input, *, out=None):
return ivy.bitwise_invert(input, out=out)
@to_ivy_arrays_and_back
def bitwise_or(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.bitwise_or(input, other, out=out)
@to_ivy_arrays_and_back
def bitwise_right_shift(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.bitwise_right_shift(input, other, out=out)
@to_ivy_arrays_and_back
def bitwise_xor(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.bitwise_xor(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def ceil(input, *, out=None):
return ivy.ceil(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
@to_ivy_arrays_and_back
def clamp(input, min=None, max=None, *, out=None):
ivy.utils.assertions.check_all_or_any_fn(
min,
max,
fn=ivy.exists,
type="any",
limit=[1, 2],
message="at most one of min or max can be None",
)
if min is None:
return ivy.minimum(input, max, out=out)
if max is None:
return ivy.maximum(input, min, out=out)
return ivy.clip(input, min, max, out=out)
@to_ivy_arrays_and_back
def conj_physical(input, *, out=None):
return ivy.conj(input, out=out)
@with_unsupported_dtypes({"1.12.0 and below": ("float16",)}, "jax")
@to_ivy_arrays_and_back
def copysign(input, other, *, out=None):
return ivy.copysign(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def cos(input, *, out=None):
return ivy.cos(input, out=out)
@to_ivy_arrays_and_back
def cosh(input, *, out=None):
return ivy.cosh(input, out=out)
@to_ivy_arrays_and_back
def deg2rad(input, *, out=None):
return ivy.array(input * ivy.pi / 180, out=out)
@to_ivy_arrays_and_back
def div(input, other, *, rounding_mode=None, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
if rounding_mode is not None:
promoted = input.dtype
if rounding_mode == "trunc":
return ivy.astype(ivy.trunc_divide(input, other, out=out), promoted)
else:
return ivy.astype(ivy.floor_divide(input, other, out=out), promoted)
else:
return ivy.divide(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
@to_ivy_arrays_and_back
def erf(input, *, out=None):
return ivy.erf(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def exp(input, *, out=None):
return ivy.exp(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def exp2(input, out=None):
return ivy.exp2(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def expm1(input, out=None):
return ivy.expm1(input, out=out)
@to_ivy_arrays_and_back
def flipud(input):
return ivy.flipud(input)
@with_unsupported_dtypes({"1.12.0 and below": ("bfloat16", "float16")}, "jax")
@to_ivy_arrays_and_back
def float_power(input, exponent, *, out=None):
input, exponent = torch_frontend.promote_types_of_torch_inputs(input, exponent)
return ivy.float_power(input, exponent, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def floor(input, *, out=None):
return ivy.floor(input, out=out)
@to_ivy_arrays_and_back
def floor_divide(input, other, *, out=None):
return ivy.floor_divide(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def fmod(x1, x2, out=None):
return ivy.fmod(x1, x2, out=out)
@to_ivy_arrays_and_back
def frac(input, *, out=None):
return input - ivy.sign(input) * ivy.floor(ivy.abs(input))
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def frexp(input, *, out=None):
return ivy.frexp(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
@to_ivy_arrays_and_back
def gradient(input, *, spacing=1, dim=None, edge_order=1):
return ivy.gradient(input, spacing=spacing, edge_order=edge_order, axis=dim)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def hypot(input, other, *, out=None):
return ivy.hypot(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def i0(input, *, out=None):
return ivy.i0(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
@to_ivy_arrays_and_back
def igamma(input, other, *, out=None):
return ivy.igamma(input, x=other, out=out)
@to_ivy_arrays_and_back
def imag(input):
return ivy.imag(input)
@with_supported_dtypes({"2.2 and below": ("float16", "float32", "float64")}, "torch")
@to_ivy_arrays_and_back
def ldexp(input, other, *, out=None):
value = ivy.pow(2, other, out=out)
value = ivy.multiply(input, value, out=out)
return value
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def lerp(input, end, weight, *, out=None):
return ivy.lerp(input, end, weight, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def lgamma(input, *, out=None):
return ivy.lgamma(input, out=out)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def log(input, *, out=None):
return ivy.log(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def log10(input, *, out=None):
return ivy.log10(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def log1p(input, *, out=None):
return ivy.log1p(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def log2(input, *, out=None):
return ivy.log2(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def logaddexp(x1, x2, out=None):
return ivy.logaddexp(x1, x2, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def logaddexp2(x1, x2, out=None):
return ivy.logaddexp2(x1, x2, out=out)
@to_ivy_arrays_and_back
def logical_and(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.logical_and(input, other, out=out)
@to_ivy_arrays_and_back
def logical_not(input, *, out=None):
return ivy.logical_not(input, out=out)
@to_ivy_arrays_and_back
def logical_or(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.logical_or(input, other, out=out)
@to_ivy_arrays_and_back
def logical_xor(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.logical_xor(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def logit(input, eps=None, *, out=None):
return ivy.logit(input, eps=eps, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
@to_ivy_arrays_and_back
def masked_fill(input, mask, value):
return ivy.where(mask, value, input, out=input)
@to_ivy_arrays_and_back
def mul(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.multiply(input, other, out=out)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def mvlgamma(input, p, *, out=None):
ivy.assertions.check_greater(
p, 1, allow_equal=True, message="p has to be greater than or equal to 1"
)
c = 0.25 * p * (p - 1) * ivy.log(ivy.pi, out=out)
b = 0.5 * ivy.arange((1 - p), 1, 1, dtype=input.dtype, device=input.device, out=out)
return (
ivy.sum(
ivy.lgamma(ivy.expand_dims(input, axis=-1) + b, out=out), axis=-1, out=out
)
+ c
)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "tensorflow")
@to_ivy_arrays_and_back
def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None):
return ivy.nan_to_num(input, nan=nan, posinf=posinf, neginf=neginf, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bool",)}, "torch")
@to_ivy_arrays_and_back
def negative(input, *, out=None):
return ivy.negative(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, "torch")
@to_ivy_arrays_and_back
def nextafter(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.nextafter(input, other, out=out)
@to_ivy_arrays_and_back
def positive(input, *, out=None):
return ivy.positive(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bool",)}, "torch")
@to_ivy_arrays_and_back
def pow(input, exponent, *, out=None):
if not ivy.is_array(exponent):
if (
any(dtype in str(input.dtype) for dtype in ["int8", "int16"])
and isinstance(exponent, int)
) or ("float16" in str(input.dtype) and isinstance(exponent, float)):
exponent = ivy.array(exponent, dtype=input.dtype)
else:
exponent = torch_frontend.as_tensor(exponent).ivy_array
input, exponent = torch_frontend.promote_types_of_torch_inputs(input, exponent)
ret_dtype = input.dtype
if not ivy.is_int_dtype(exponent) and ivy.is_int_dtype(ret_dtype):
ret_dtype = exponent.dtype
ret = ivy.pow(input, exponent)
if ivy.any(input == 0) and ivy.is_int_dtype(exponent):
ret = ivy.where(ivy.bitwise_and(input == 0, exponent < 0), 0, ret, out=out)
return ret.astype(ret_dtype)
@to_ivy_arrays_and_back
def rad2deg(input, *, out=None):
return ivy.rad2deg(input, out=out)
@to_ivy_arrays_and_back
def real(input):
return ivy.real(input)
@to_ivy_arrays_and_back
def reciprocal(input, *, out=None):
return ivy.reciprocal(input)
@to_ivy_arrays_and_back
def remainder(input, other, *, out=None):
if ivy.is_array(input) and ivy.isscalar(other):
other = ivy.full(input.shape, other)
return ivy.remainder(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
@to_ivy_arrays_and_back
def round(input, *, decimals=0, out=None):
m = ivy.full(input.shape, 10.0**decimals)
upscale = ivy.multiply(input, m)
rounded = ivy.round(upscale)
return ivy.divide(rounded, m, out=out).astype(input.dtype)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def rsqrt(input, *, out=None):
return ivy.reciprocal(ivy.sqrt(input), out=out)
@to_ivy_arrays_and_back
def sgn(input, *, out=None):
if ivy.is_complex_dtype(input.dtype):
input_abs = ivy.abs(input, out=out)
# TODO wrap this in Where function after solve it's errors
if input_abs == 0:
return 0
else:
return ivy.divide(input, input_abs, out=out)
else:
return ivy.sign(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def sigmoid(input, *, out=None):
return ivy.sigmoid(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
@to_ivy_arrays_and_back
def sign(input, *, out=None):
return ivy.sign(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
@to_ivy_arrays_and_back
def signbit(input, *, out=None):
return ivy.signbit(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def sin(input, *, out=None):
return ivy.sin(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def sinc(input, *, out=None):
return ivy.sinc(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def sinh(input, *, out=None):
return ivy.sinh(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def sqrt(input, *, out=None):
return ivy.sqrt(input, out=out)
@to_ivy_arrays_and_back
def square(input, *, out=None):
return ivy.square(input, out=out)
@to_ivy_arrays_and_back
def subtract(input, other, *, alpha=1, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.subtract(input, other * alpha, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def tan(input, *, out=None):
return ivy.tan(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def tanh(input, *, out=None):
return ivy.tanh(input, out=out)
@to_ivy_arrays_and_back
def true_divide(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.divide(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def trunc(input, *, out=None):
return ivy.trunc(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "tensorflow")
@to_ivy_arrays_and_back
def xlogy(input, other, *, out=None):
return ivy.xlogy(input, other, out=out)
absolute = abs
arccos = acos
arccosh = acosh
arcsin = asin
arcsinh = asinh
arctan = atan
arctan2 = atan2
arctanh = atanh
clip = clamp
divide = div
fix = trunc
multiply = mul
sub = subtract
| ivy/ivy/functional/frontends/torch/pointwise_ops.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/pointwise_ops.py",
"repo_id": "ivy",
"token_count": 7460
} | 39 |
import ivy
def coordinate_delta(sum_grad, sum_hess, w, reg_alpha, reg_lambda):
mask = ivy.where(sum_hess < 1e-5, 0.0, 1.0)
sum_grad_l2 = sum_grad + reg_lambda * w
sum_hess_l2 = sum_hess + reg_lambda
tmp = w - sum_grad_l2 / sum_hess_l2
return ivy.where(
tmp >= 0,
ivy.fmax(-(sum_grad_l2 + reg_alpha) / sum_hess_l2, -w) * mask,
ivy.fmin(-(sum_grad_l2 - reg_alpha) / sum_hess_l2, -w) * mask,
)
def coordinate_delta_bias(sum_grad, sum_hess):
return -sum_grad / sum_hess
def get_bias_gradient(gpair):
# filter out pairs with negative hessians(should not be included in the sum)
mask = ivy.where(gpair[:, 1] < 0.0, 0.0, 1.0)
sum_grad = ivy.sum(gpair[:, 0] * mask)
sum_hess = ivy.sum(gpair[:, 1] * mask)
return sum_grad, sum_hess
def update_bias_residual(dbias, gpair):
# ToDo: skip update where dbias==0 and modify for several biases
# filter out updates where hessians are less than zero
mask = ivy.where(gpair[:, 1] < 0.0, 0.0, 1.0)
# we only update gradients, hessians remain the same
return ivy.expand_dims(gpair[:, 0] + gpair[:, 1] * mask * dbias, axis=1)
| ivy/ivy/functional/frontends/xgboost/linear/coordinate_common.py/0 | {
"file_path": "ivy/ivy/functional/frontends/xgboost/linear/coordinate_common.py",
"repo_id": "ivy",
"token_count": 515
} | 40 |
# global
from typing import Union, Tuple, Optional
# local
import ivy
from ivy.func_wrapper import (
handle_array_function,
to_native_arrays_and_back,
handle_out_argument,
handle_nestable,
handle_array_like_without_promotion,
handle_device,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
# Array API Standard #
# -------------------#
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_array_function
@handle_device
def unique_all(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[int] = None,
by_value: bool = True,
) -> Tuple[
Union[ivy.Array, ivy.NativeArray],
Union[ivy.Array, ivy.NativeArray],
Union[ivy.Array, ivy.NativeArray],
Union[ivy.Array, ivy.NativeArray],
]:
"""Return the unique elements of an input array ``x``, the first occurring
indices for each unique element in ``x``, the indices from the set of
unique elements that reconstruct ``x``, and the corresponding counts for
each unique element in ``x``.
.. admonition:: Data-dependent output shape
:class: important
The shapes of two of the output arrays for this function depend on the data
values in the input array; hence, array libraries which build computation graphs
(e.g., JAX, Dask, etc.) may find this function difficult to implement without
knowing array values. Accordingly, such libraries may choose to omit this
function. See :ref:`data-dependent-output-shapes` section for more details.
.. note::
Uniqueness should be determined based on value equality (i.e., ``x_i == x_j``).
For input arrays having floating-point data types, value-based equality implies
the following behavior.
- As ``nan`` values compare as ``False``, ``nan`` values should be considered
distinct.
- As ``-0`` and ``+0`` compare as ``True``, signed zeros should not be
considered distinct, and the corresponding unique element will be
implementation-dependent (e.g., an implementation could choose to return
``-0`` if ``-0`` occurs before ``+0``).
As signed zeros are not distinct, using ``inverse_indices`` to reconstruct the
input array is not guaranteed to return an array having the exact same values.
Each ``nan`` value should have a count of one, while the counts for signed zeros
should be aggregated as a single count.
Parameters
----------
x
input array.
axis
the axis to apply unique on. If None, the unique elements of the flattened ``x``
are returned.
by_value
If False, the unique elements will be sorted in the same order that they occur
in ''x''. Otherwise, they will be sorted by value.
Returns
-------
ret
a namedtuple ``(values, indices, inverse_indices, counts)`` whose
- first element must have the field name ``values`` and must be an array
containing the unique elements of ``x``. The array must have the same data
type as ``x``.
- second element must have the field name ``indices`` and must be an array
containing the indices (first occurrences) of ``x`` that result in ``values``.
The array must have the same length as ``values`` and must have the default
array index data type.
- third element must have the field name ``inverse_indices`` and must be an
array containing the indices of ``values`` that reconstruct ``x``. The array
must have the same length as the ``axis`` dimension of ``x`` and must have the
default array index data type.
- fourth element must have the field name ``counts`` and must be an array
containing the number of times each unique element occurs in ``x``. The
returned array must have the same length as ``values`` and must have the
default array index data type.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.unique_all.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.randint(0, 10, shape=(2, 2), seed=0)
>>> z = ivy.unique_all(x)
>>> print(z)
Results(values=ivy.array([1, 2, 5, 9]),
indices=ivy.array([3, 2, 1, 0]),
inverse_indices=ivy.array([[3, 2], [1, 0]]),
counts=ivy.array([1, 1, 1, 1]))
>>> x = ivy.array([[ 2.1141, 0.8101, 0.9298, 0.8460],
... [-1.2119, -0.3519, -0.6252, 0.4033],
... [ 0.7443, 0.2577, -0.3707, -0.0545],
... [-0.3238, 0.5944, 0.0775, -0.4327]])
>>> x[range(4), range(4)] = ivy.nan #Introduce NaN values
>>> z = ivy.unique_all(x)
>>> print(z)
Results(values=ivy.array([-1.2119 , -0.62519997, -0.3238 , -0.0545 ,
0.0775 , 0.2577 , 0.40329999, 0.59439999, 0.74430001, 0.81010002,
0.84600002, 0.92979997, nan, nan, nan, nan]),
indices=ivy.array([ 4, 6, 12, 11, 14, 9, 7, 13, 8, 1, 3, 2, 0, 5,
10, 15]),
inverse_indices=ivy.array([[12, 9, 11, 10],
[ 0, 12, 1, 6],
[ 8, 5, 12, 3],
[ 2, 7, 4, 12]]),
counts=ivy.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))
"""
return ivy.current_backend(x).unique_all(x, axis=axis, by_value=by_value)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_array_function
@handle_device
def unique_inverse(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[int] = None,
) -> Tuple[Union[ivy.Array, ivy.NativeArray], Union[ivy.Array, ivy.NativeArray]]:
"""Return the unique elements of an input array ``x``, and the indices from
the set of unique elements that reconstruct ``x``.
.. admonition:: Data-dependent output shape
:class: important
The shapes of two of the output arrays for this function depend on the data
values in the input array; hence, array libraries which build computation graphs
(e.g., JAX, Dask, etc.) may find this function difficult to implement without
knowing array values. Accordingly, such libraries may choose to omit this
function. See :ref:`data-dependent-output-shapes` section for more details.
.. note::
Uniqueness should be determined based on value equality (i.e., ``x_i == x_j``).
For input arrays having floating-point data types, value-based equality implies
the following behavior.
- As ``nan`` values compare as ``False``, ``nan`` values should be considered
distinct.
- As ``-0`` and ``+0`` compare as ``True``, signed zeros should not be
considered distinct, and the corresponding unique element will be
implementation-dependent (e.g., an implementation could choose to return
``-0`` if ``-0`` occurs before ``+0``).
As signed zeros are not distinct, using ``inverse_indices`` to reconstruct the
input array is not guaranteed to return an array having the exact same values.
Parameters
----------
x
the array that will be inputted into the "unique_inverse" function
axis
the axis to apply unique on. If None, the unique elements of the flattened ``x``
are returned.
Returns
-------
ret
a namedtuple ``(values, inverse_indices)`` whose
- first element must have the field name ``values`` and must be an array
containing the unique elements of ``x``. The array must have the same data
type as ``x``.
- second element must have the field name ``inverse_indices`` and must be an
array containing the indices of ``values`` that reconstruct ``x``. The array
must have the same shape as ``x`` and must have the default array index data
type.
.. note::
The order of unique elements is not specified and may vary between
implementations.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.unique_inverse.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([4,5,3,2,4,1,3])
>>> y = ivy.unique_inverse(x)
>>> print(y)
Results(values=ivy.array([1, 2, 3, 4, 5]),
inverse_indices=ivy.array([3, 4, 2, 1, 3, 0, 2]))
>>> x = ivy.array([0.5,0.3,0.8,0.2,1.2,2.4,0.3])
>>> y = ivy.ivy.unique_inverse(x)
>>> print(y)
Results(values=ivy.array([0.2, 0.3, 0.5, 0.8, 1.2, 2.4]),
inverse_indices=ivy.array([2, 1, 3, 0, 4, 5, 1]))
"""
return ivy.current_backend(x).unique_inverse(x, axis=axis)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def unique_values(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the unique elements of an input array ``x``.
.. admonition:: Data-dependent output shape
:class: important
The shapes of two of the output arrays for this function depend on the data
values in the input array; hence, array libraries which build computation graphs
(e.g., JAX, Dask, etc.) may find this function difficult to implement without
knowing array values. Accordingly, such libraries may choose to omit this
function. See :ref:`data-dependent-output-shapes` section for more details.
.. note::
Uniqueness should be determined based on value equality (i.e., ``x_i == x_j``).
For input arrays having floating-point data types, value-based equality implies
the following behavior.
- As ``nan`` values compare as ``False``, ``nan`` values should be considered
distinct.
- As ``-0`` and ``+0`` compare as ``True``, signed zeros should not be
considered distinct, and the corresponding unique element will be
implementation-dependent (e.g., an implementation could choose to return
``-0`` if ``-0`` occurs before ``+0``).
Parameters
----------
x
input array. If ``x`` has more than one dimension, the function must flatten
``x`` and return the unique elements of the flattened array.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the set of unique elements in ``x``. The returned array must
have the same data type as ``x``.
.. note::
The order of unique elements is not specified and may vary between
implementations.
Raises
------
TypeError
If `x` is not an instance of `ivy.Array` or `ivy.NativeArray`.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.unique_values.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` inputs:
>>> import ivy
>>> a = ivy.array([1, 1, 2, 2, 3, 4, 4, 5])
>>> ivy.unique_values(a)
array([1, 2, 3, 4, 5])
>>> b = ivy.array([1, 2, 3, 4, 5])
>>> ivy.unique_values(b)
array([1, 2, 3, 4, 5])
>>> c = ivy.array([1.0, 1.0, 2.0, 2.0, 3.0, 4.0, 4.0, 5.0, -0.0, 0.0, float('nan'),
... float('nan')])
>>> ivy.unique_values(c)
array([0., 1., 2., 3., 4., 5., nan, -0.])
"""
return ivy.current_backend(x).unique_values(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_array_function
@handle_device
def unique_counts(
x: Union[ivy.Array, ivy.NativeArray],
/,
) -> Tuple[Union[ivy.Array, ivy.NativeArray], Union[ivy.Array, ivy.NativeArray]]:
"""Return the unique elements of an input array ``x`` and the corresponding
counts for each unique element in ``x``.
.. admonition:: Data-dependent output shape
:class: important
The shapes of two of the output arrays for this function depend on the data
values in the input array; hence, array libraries which build computation graphs
(e.g., JAX, Dask, etc.) may find this function difficult to implement without
knowing array values. Accordingly, such libraries may choose to omit this
function. See :ref:`data-dependent-output-shapes` section for more details.
.. note::
Uniqueness should be determined based on value equality (i.e., ``x_i == x_j``).
For input arrays having floating-point data types, value-based equality implies
the following behavior.
- As ``nan`` values compare as ``False``, ``nan`` values should be considered
distinct.
- As ``-0`` and ``+0`` compare as ``True``, signed zeros should not be
considered distinct, and the corresponding unique element will be
implementation-dependent (e.g., an implementation could choose to return
``-0`` if ``-0`` occurs before ``+0``).
Parameters
----------
x
input array. If ``x`` has more than one dimension, the function must flatten
``x`` and return the unique elements of the flattened array.
Returns
-------
ret
a namedtuple ``(values, counts)`` whose
- first element must have the field name ``values`` and must be an
array containing the unique elements of ``x``.
The array must have the same data type as ``x``.
- second element must have the field name ``counts`` and must be an array
containing the number of times each unique element occurs in ``x``.
The returned array must have same shape as ``values`` and must
have the default array index data type.
.. note::
The order of unique elements is not specified and may vary between
implementations.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.unique_counts.htmll>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1,2,1,3,4,1,3])
>>> y = ivy.unique_counts(x)
>>> print(y)
Results(values=ivy.array([1, 2, 3, 4]), counts=ivy.array([3, 1, 2, 1]))
>>> x = ivy.asarray([[1,2,3,4],[2,3,4,5],[3,4,5,6]])
>>> y = ivy.unique_counts(x)
>>> print(y)
Results(values=ivy.array([1, 2, 3, 4, 5, 6]), counts=ivy.array([1, 2, 3, 3, 2, 1]))
>>> x = ivy.array([0.2,0.3,0.4,0.2,1.4,2.3,0.2])
>>> y = ivy.unique_counts(x)
>>> print(y)
Results(values=ivy.array([0.2 , 0.30000001, 0.40000001, 1.39999998,
2.29999995]),
counts=ivy.array([3, 1, 1, 1, 1]))
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 3. , 2. , 1. , 0.]),
... b=ivy.array([1, 2, 1, 3, 4, 1, 3]))
>>> y = ivy.unique_counts(x)
>>> print(y)
{
a: (list[2],<classivy.array.array.Array>shape=[4]),
b: (list[2],<classivy.array.array.Array>shape=[4])
}
"""
return ivy.current_backend(x).unique_counts(x)
| ivy/ivy/functional/ivy/set.py/0 | {
"file_path": "ivy/ivy/functional/ivy/set.py",
"repo_id": "ivy",
"token_count": 6665
} | 41 |
import ivy
import sys
from importlib.util import resolve_name, module_from_spec
from ivy.utils.backend import ast_helpers
import_cache = {}
path_hooks = []
# Note that any modules listed as 'to skip' should not depend on the Ivy backend state.
# If they do, the behavior of ivy.with_backend is undefined and may not function as
# expected. Import these modules along with Ivy initialization, as the import logic
# assumes they exist in sys.modules.
MODULES_TO_SKIP = ["ivy.compiler", "ivy.engines", "ivy.wrappers"]
IS_COMPILING_WITH_BACKEND = False
class LocalIvyImporter:
def __init__(self):
self.finder = ast_helpers.IvyPathFinder()
def __enter__(self):
global IS_COMPILING_WITH_BACKEND
IS_COMPILING_WITH_BACKEND = True
sys.meta_path.insert(0, self.finder)
path_hooks.insert(0, self.finder)
def __exit__(self, *exc):
path_hooks.remove(self.finder)
sys.meta_path.remove(self.finder)
global IS_COMPILING_WITH_BACKEND
IS_COMPILING_WITH_BACKEND = False
def _clear_cache():
global import_cache
import_cache = {}
def _from_import(name: str, package=None, mod_globals=None, from_list=(), level=0):
"""Handle absolute and relative from_import statement."""
module_exist = name != ""
name = "." * level + name
module = _import_module(name, package)
for entry_name, entry_asname in from_list:
if entry_name == "*":
if "__all__" in module.__dict__.keys():
_all = {
k: v
for (k, v) in module.__dict__.items()
if k in module.__dict__["__all__"]
}
else:
_all = {
k: v for (k, v) in module.__dict__.items() if not k.startswith("__")
}
for k, v in _all.items():
mod_globals[k] = v
continue
alias = entry_name if entry_asname is None else entry_asname
# Handles attributes inside module
try:
mod_globals[alias] = module.__dict__[entry_name]
# In the case this is a module from a package
except KeyError:
if module_exist:
in_name = f"{name}.{entry_name}"
else:
in_name = name + entry_name
mod_globals[alias] = _import_module(in_name, package)
return module
def _absolute_import(name: str, asname=None, mod_globals=None):
"""Handle absolute import statement :param name:
:return:
"""
if asname is None:
_import_module(name)
true_name = name.partition(".")[0]
module = import_cache[true_name]
else:
true_name = asname
module = _import_module(name)
mod_globals[true_name] = module
def _import_module(name, package=None):
global import_cache
absolute_name = resolve_name(name, package)
try:
return import_cache[absolute_name]
except KeyError:
pass
path = None
if "." in absolute_name:
parent_name, _, child_name = absolute_name.rpartition(".")
parent_module = _import_module(parent_name)
path = parent_module.__spec__.submodule_search_locations
# Return the one from global Ivy if the module is marked to skip
for module_to_skip in MODULES_TO_SKIP:
if absolute_name.startswith(module_to_skip):
if path is not None:
# Set reference to self in parent, if exist
setattr(parent_module, child_name, sys.modules[absolute_name])
return sys.modules[absolute_name]
for finder in path_hooks:
spec = finder.find_spec(absolute_name, path)
if spec is not None:
break
else:
msg = f"No module named {absolute_name!r}"
raise ModuleNotFoundError(msg, name=absolute_name)
module = module_from_spec(spec)
import_cache[absolute_name] = module
if ivy.is_local():
spec.loader.exec_module(module, ivy._compiled_id)
else:
spec.loader.exec_module(module)
if path is not None:
# Set reference to self in parent, if exist
setattr(parent_module, child_name, module)
return module
| ivy/ivy/utils/_importlib.py/0 | {
"file_path": "ivy/ivy/utils/_importlib.py",
"repo_id": "ivy",
"token_count": 1861
} | 42 |
import os
import sys
import glob
import importlib
dir_path = os.path.dirname(os.path.realpath(__file__))
so_files = glob.glob(dir_path + "/*.so")
sys.path.append(dir_path)
__all__ = []
for so_file in so_files:
# if os.path.basename(so_file) != "add.so":
# continue
module_name = os.path.splitext(os.path.basename(so_file))[0]
locals()[module_name] = importlib.import_module(module_name)
if module_name + "_wrapper" in locals()[module_name].__dict__.keys():
locals()[module_name + "_wrapper"] = getattr(
locals()[module_name], module_name + "_wrapper"
)
__all__.append(module_name + "_wrapper")
del dir_path
del so_files
import utils
from utils import *
| ivy/ivy/wrappers/__init__.py/0 | {
"file_path": "ivy/ivy/wrappers/__init__.py",
"repo_id": "ivy",
"token_count": 296
} | 43 |
from . import general_helpers
from .general_helpers import *
from . import array_helpers
from .array_helpers import *
from . import dtype_helpers
from .dtype_helpers import *
from . import number_helpers
from .number_helpers import *
| ivy/ivy_tests/test_ivy/helpers/hypothesis_helpers/__init__.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/helpers/hypothesis_helpers/__init__.py",
"repo_id": "ivy",
"token_count": 70
} | 44 |
from .base import FrontendConfigWithBackend
def get_config():
return PaddleFrontendConfig()
class PaddleFrontendConfig(FrontendConfigWithBackend):
backend_str = "paddle"
| ivy/ivy_tests/test_ivy/test_frontends/config/paddle.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/config/paddle.py",
"repo_id": "ivy",
"token_count": 57
} | 45 |
# global
from hypothesis import strategies as st, assume
import numpy as np
import ivy
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import (
_get_first_matrix_and_dtype,
_get_second_matrix_and_dtype,
_get_dtype_value1_value2_axis_for_tensordot,
)
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_elementwise import ( # noqa
ldexp_args,
)
# --- Helpers --- #
# --------------- #
# trapz
@st.composite
def _either_x_dx(draw):
dtype_values_axis = draw(
helpers.dtype_values_axis(
available_dtypes=st.shared(helpers.get_dtypes("float"), key="trapz_dtype"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
allow_neg_axes=True,
valid_axis=True,
force_int_axis=True,
),
)
rand = (draw(st.integers(min_value=0, max_value=1)),)
if rand == 0:
either_x_dx = draw(
helpers.dtype_and_x(
avaliable_dtypes=st.shared(
helpers.get_dtypes("float"), key="trapz_dtype"
),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
)
)
return dtype_values_axis, rand, either_x_dx
else:
either_x_dx = draw(
st.floats(min_value=-10, max_value=10),
)
return dtype_values_axis, rand, either_x_dx
# polyint
@st.composite
def _get_array_values_m_and_k(draw):
dtype_and_x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
min_num_dims=1,
max_num_dims=1,
min_dim_size=1,
)
)
dtype, x = dtype_and_x
m = draw(st.integers(min_value=0, max_value=10))
max_bound = m - 1
if max_bound <= m:
k = None
else:
k = draw(st.integers(min_value=0, max_value=max_bound))
return dtype, x, m, k
@st.composite
def _get_castable_dtypes_values(draw, *, allow_nan=False, use_where=False):
available_dtypes = helpers.get_dtypes("numeric")
shape = draw(helpers.get_shape(min_num_dims=1, max_num_dims=4, max_dim_size=6))
dtype, values = draw(
helpers.dtype_and_values(
available_dtypes=available_dtypes,
num_arrays=1,
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
shape=shape,
allow_nan=allow_nan,
)
)
axis = draw(helpers.get_axis(shape=shape, force_int=True))
dtype1, values, dtype2 = draw(
helpers.get_castable_dtype(draw(available_dtypes), dtype[0], values[0])
)
if use_where:
where = draw(np_frontend_helpers.where(shape=shape))
return [dtype1], [values], axis, dtype2, where
return [dtype1], [values], axis, dtype2
# diff
@st.composite
def _get_dtype_input_and_vector(draw):
size1 = draw(helpers.ints(min_value=1, max_value=5))
size2 = draw(helpers.ints(min_value=1, max_value=5))
dtype = draw(helpers.get_dtypes("integer"))
vec1 = draw(helpers.array_values(dtype=dtype[0], shape=(size1, size2)))
return dtype, vec1
# dot
@st.composite
def _get_dtype_input_and_vectors(draw):
dim_size = draw(helpers.ints(min_value=1, max_value=5))
dtype = draw(helpers.get_dtypes("float", index=1, full=False))
if dim_size == 1:
vec1 = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size,), min_value=2, max_value=5
)
)
vec2 = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size,), min_value=2, max_value=5
)
)
else:
vec1 = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size, dim_size), min_value=2, max_value=5
)
)
vec2 = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size, dim_size), min_value=2, max_value=5
)
)
return dtype, vec1, vec2
# --- Main --- #
# ------------ #
# absolute
@handle_frontend_test(
fn_tree="jax.numpy.absolute",
aliases=["jax.numpy.abs"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("signed_integer"),
),
test_with_out=st.just(False),
)
def test_jax_absolute(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# add
@handle_frontend_test(
fn_tree="jax.numpy.add",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_add(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[0],
)
# angle
@handle_frontend_test(
fn_tree="jax.numpy.angle",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float64"],
min_value=-5,
max_value=5,
max_dim_size=5,
max_num_dims=5,
min_dim_size=1,
min_num_dims=1,
allow_inf=False,
allow_nan=False,
),
deg=st.booleans(),
test_with_out=st.just(False),
)
def test_jax_angle(
*,
dtype_and_x,
deg,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, z = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
z=z[0],
deg=deg,
)
# arccos
@handle_frontend_test(
fn_tree="jax.numpy.arccos",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_arccos(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# arccosh
@handle_frontend_test(
fn_tree="jax.numpy.arccosh",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_arccosh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# arcsin
@handle_frontend_test(
fn_tree="jax.numpy.arcsin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=4,
small_abs_safety_factor=4,
),
)
def test_jax_arcsin(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
rtol=1e-2,
atol=1e-2,
)
# arcsinh
@handle_frontend_test(
fn_tree="jax.numpy.arcsinh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=4,
small_abs_safety_factor=4,
),
test_with_out=st.just(False),
)
def test_jax_arcsinh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# arctan
@handle_frontend_test(
fn_tree="jax.numpy.arctan",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
)
def test_jax_arctan(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# arctan2
@handle_frontend_test(
fn_tree="jax.numpy.arctan2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
),
)
def test_jax_arctan2(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# arctanh
@handle_frontend_test(
fn_tree="jax.numpy.arctanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=0,
),
test_with_out=st.just(False),
)
def test_jax_arctanh(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
x=x[0],
)
# around
@handle_frontend_test(
fn_tree="jax.numpy.around",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
decimals=st.integers(min_value=0, max_value=5),
)
def test_jax_around(
*,
dtype_and_x,
decimals,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
decimals=decimals,
)
# cbrt
@handle_frontend_test(
fn_tree="jax.numpy.cbrt",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_jax_cbrt(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
)
# ceil
@handle_frontend_test(
fn_tree="jax.numpy.ceil",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_ceil(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# clip
@handle_frontend_test(
fn_tree="jax.numpy.clip",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
min_value=-1e3,
max_value=1e3,
max_dim_size=10,
max_num_dims=4,
min_dim_size=1,
min_num_dims=1,
),
a_min=st.integers(min_value=0, max_value=5),
a_max=st.integers(min_value=5, max_value=10),
)
def test_jax_clip(
*,
dtype_and_x,
a_min,
a_max,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
a_min=a_min,
a_max=a_max,
)
# conj
@handle_frontend_test(
fn_tree="jax.numpy.conj",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_jax_conj(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# TODO: uncomment with multiversion pipeline (deprecated since 0.4.12)
# @handle_frontend_test(
# fn_tree="jax.numpy.product",
# dtype_x_axis_dtype_where=_get_castable_dtypes_values(use_where=True),
# keepdims=st.booleans(),
# initial=st.one_of(st.floats(min_value=-100, max_value=100)),
# promote_integers=st.booleans(),
# )
# def test_jax_product(
# dtype_x_axis_dtype_where,
# keepdims,
# initial,
# promote_integers,
# frontend,
# test_flags,
# fn_tree,
# on_device,
# ):
# input_dtypes, x, axis, dtype, where = dtype_x_axis_dtype_where
# if ivy.current_backend_str() == "torch":
# assume(not test_flags.as_variable[0])
# where, input_dtypes, test_flags = np_frontend_helpers.
# handle_where_and_array_bools(
# where=where,
# input_dtype=input_dtypes,
# test_flags=test_flags,
# )
# helpers.test_frontend_function(
# input_dtypes=input_dtypes,
# frontend=frontend,
# test_flags=test_flags,
# fn_tree=fn_tree,
# on_device=on_device,
# a=x[0],
# axis=axis,
# dtype=dtype,
# keepdims=keepdims,
# initial=initial,
# where=where,
# promote_integers=promote_integers,
# )
# conjugate
@handle_frontend_test(
fn_tree="jax.numpy.conjugate",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_jax_conjugate(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# convolve
@handle_frontend_test(
fn_tree="jax.numpy.convolve",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
max_num_dims=1,
min_value=-1e04,
max_value=1e04,
shared_dtype=True,
),
mode=st.sampled_from(["valid", "same", "full"]),
)
def test_jax_convolve(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
mode,
):
input_dtype, x = dtype_and_x
assume("float16" not in input_dtype)
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
rtol=1e-2,
atol=1e-2,
on_device=on_device,
a=x[0],
v=x[1],
mode=mode,
precision=None,
)
# copysign
@handle_frontend_test(
fn_tree="jax.numpy.copysign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_copysign(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[0],
)
@handle_frontend_test(
fn_tree="jax.numpy.cos",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
)
def test_jax_cos(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# cosh
@handle_frontend_test(
fn_tree="jax.numpy.cosh",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_cosh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# deg2rad
@handle_frontend_test(
fn_tree="jax.numpy.deg2rad",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_jax_deg2rad(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# degrees
@handle_frontend_test(
fn_tree="jax.numpy.degrees",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_jax_degrees(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.numpy.diff",
dtype_and_x=_get_dtype_input_and_vector(),
n=helpers.ints(
min_value=0,
max_value=10,
),
axis=helpers.ints(
min_value=-1,
max_value=10,
),
)
def test_jax_diff(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
n,
axis,
):
input_dtype, x = dtype_and_x
axis = min(axis, x[0].ndim - 1)
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
n=n,
axis=axis,
prepend=None,
append=None,
)
# divide
@handle_frontend_test(
fn_tree="jax.numpy.divide",
aliases=["jax.numpy.true_divide"],
dtype_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_divide(
*,
dtype_values,
frontend,
backend_fw,
test_flags,
fn_tree,
):
input_dtype, x = dtype_values
assume(not np.any(np.isclose(x[1], 0)))
if ivy.current_backend_str() == "paddle":
atol, rtol = 1e-2, 1e-2
else:
atol, rtol = 1e-5, 1e-5
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
a=x[0],
b=x[1],
atol=atol,
rtol=rtol,
)
# divmod
@handle_frontend_test(
fn_tree="jax.numpy.divmod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
safety_factor_scale="linear",
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_divmod(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)) and "bfloat16" not in input_dtype)
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
atol=1,
)
@handle_frontend_test(
fn_tree="jax.numpy.dot",
dtype_x_y=_get_dtype_input_and_vectors(),
test_with_out=st.just(False),
)
def test_jax_dot(
*,
dtype_x_y,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, y = dtype_x_y
helpers.test_frontend_function(
input_dtypes=input_dtype,
rtol=1e-01,
atol=1e-01,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x,
b=y,
precision=None,
)
# ediff1d
@handle_frontend_test(
fn_tree="jax.numpy.ediff1d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, max_num_dims=1
),
to_end=helpers.ints(
min_value=-1,
max_value=10,
),
to_begin=helpers.ints(
min_value=-1,
max_value=10,
),
)
def test_jax_ediff1d(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
to_end,
to_begin,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
test_flags=test_flags,
ary=x[0],
to_end=to_end,
to_begin=to_begin,
)
# einsum_path
# For the optimize parameter boolean values are not added to the samples for testing
# as it seems that Jax einsum_path function currently fails when True or False is passed
# as optimize values. Jax einsum_path function calls opt_einsum.contract_path function,
# and it seems that there is an open bug on their repository for boolean values.
# Please see link to the bug https://github.com/dgasmith/opt_einsum/issues/219
@handle_frontend_test(
fn_tree="jax.numpy.einsum_path",
eq_n_op_n_shp=helpers.einsum_helper(),
dtype=helpers.get_dtypes("numeric", full=False),
test_with_out=st.just(False),
optimize=st.sampled_from(["greedy", "optimal"]),
)
def test_jax_einsum_path(
*,
eq_n_op_n_shp,
dtype,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
optimize,
):
eq, operands, dtypes = eq_n_op_n_shp
kw = {}
for i, x_ in enumerate(operands):
dtype = dtypes[i][0]
kw[f"x{i}"] = np.array(x_).astype(dtype)
test_flags.num_positional_args = len(operands) + 1
ret, ret_gt = helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
subscripts=eq,
**kw,
optimize=optimize,
)
assert len(ret[0]) == len(ret_gt[0])
assert all(x == y for x, y in zip(ret[0], ret_gt[0]))
assert ret[1] == str(ret_gt[1])
# exp
@handle_frontend_test(
fn_tree="jax.numpy.exp",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_exp(
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# exp2
@handle_frontend_test(
fn_tree="jax.numpy.exp2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-10,
max_value=10,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
test_with_out=st.just(False),
)
def test_jax_exp2(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
rtol=1e-01,
atol=1e-02,
)
# expm1
@handle_frontend_test(
fn_tree="jax.numpy.expm1",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_jax_expm1(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# fabs
@handle_frontend_test(
fn_tree="jax.numpy.fabs",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_jax_fabs(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# fix
@handle_frontend_test(
fn_tree="jax.numpy.fix",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", index=2),
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
)
def test_jax_fix(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
x=x[0],
)
# float_power
@handle_frontend_test(
fn_tree="jax.numpy.float_power",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-10,
max_value=10,
num_arrays=2,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
test_with_out=st.just(False),
)
def test_jax_float_power(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# floor
@handle_frontend_test(
fn_tree="jax.numpy.floor",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_floor(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# floor_divide
@handle_frontend_test(
fn_tree="jax.numpy.floor_divide",
dtype_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
min_value=-10.0,
max_value=10.0,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
),
)
def test_jax_floor_divide(
*,
dtype_values,
frontend,
backend_fw,
fn_tree,
on_device,
test_flags,
):
input_dtype, x = dtype_values
# Making sure division by zero doesn't occur
assume(not np.any(np.isclose(x[1], 0)))
# Absolute tolerance is 1,
# due to flooring can cause absolute error of 1 due to precision
helpers.test_frontend_function(
input_dtypes=input_dtype,
on_device=on_device,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
x1=x[0],
x2=x[1],
atol=1,
)
# fmax
@handle_frontend_test(
fn_tree="jax.numpy.fmax",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
min_value=-np.inf,
max_value=np.inf,
),
test_with_out=st.just(False),
)
def test_jax_fmax(
*,
dtype_and_inputs,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, inputs = dtype_and_inputs
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=inputs[0],
x2=inputs[1],
)
# fmin
@handle_frontend_test(
fn_tree="jax.numpy.fmin",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
min_value=-np.inf,
max_value=np.inf,
),
)
def test_jax_fmin(
*,
dtype_and_inputs,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, inputs = dtype_and_inputs
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=inputs[0],
x2=inputs[1],
)
# fmod
@handle_frontend_test(
fn_tree="jax.numpy.fmod",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=1.5,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_fmod(
*,
dtype_and_inputs,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_inputs
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# frexp
@handle_frontend_test(
fn_tree="jax.numpy.frexp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=1,
max_value=100,
),
)
def test_jax_frexp(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# gcd
@handle_frontend_test(
fn_tree="jax.numpy.gcd",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
num_arrays=2,
).filter(lambda x: all(dtype != "uint64" for dtype in x[0])),
test_with_out=st.just(False),
)
def test_jax_gcd(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# gradient
@handle_frontend_test(
fn_tree="jax.numpy.gradient",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=("float32", "float16", "float64"),
min_num_dims=1,
max_num_dims=3,
min_dim_size=2,
max_dim_size=4,
valid_axis=True,
force_int_axis=True,
),
varargs=helpers.ints(
min_value=-3,
max_value=3,
),
)
def test_jax_gradient(
dtype_input_axis,
varargs,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x, axis = dtype_input_axis
test_flags.num_positional_args = 2
kw = {}
kw["varargs"] = varargs
kw["axis"] = axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
f=x[0],
**kw,
)
# heaviside
@handle_frontend_test(
fn_tree="jax.numpy.heaviside",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
num_arrays=2,
),
test_with_out=st.just(False),
)
def test_jax_heaviside(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[0],
)
# hypot
@handle_frontend_test(
fn_tree="jax.numpy.hypot",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
),
)
def test_jax_hypot(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
x1=x[0],
x2=x[1],
backend_to_test=backend_fw,
)
# i0
@handle_frontend_test(
fn_tree="jax.numpy.i0",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-10,
max_value=10,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
test_with_out=st.just(False),
)
def test_jax_i0(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# imag
@handle_frontend_test(
fn_tree="jax.numpy.imag",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_value=-20,
max_value=20,
),
test_with_out=st.just(False),
)
def test_jax_imag(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-5,
atol=1e-5,
val=x[0],
)
# inner
@handle_frontend_test(
fn_tree="jax.numpy.inner",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=-10,
max_value=10,
num_arrays=2,
shared_dtype=True,
),
)
def test_jax_inner(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
a=xs[0],
b=xs[1],
)
@handle_frontend_test(
fn_tree="jax.numpy.interp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=1,
),
dtype_and_xp_fp=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
max_num_dims=1,
),
left=st.one_of(st.floats(min_value=-1e04, max_value=1e04), st.just(np.nan)),
right=st.one_of(st.floats(min_value=-1e04, max_value=1e04), st.just(np.nan)),
test_with_out=st.just(False),
)
def test_jax_interp(
*,
dtype_and_x,
dtype_and_xp_fp,
left,
right,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
input_dtype2, xp_fp = dtype_and_xp_fp
xp = xp_fp[0]
fp = xp_fp[1]
helpers.test_frontend_function(
input_dtypes=[input_dtype, input_dtype2],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
xp=xp,
fp=fp,
left=left,
right=right,
)
# kron
@handle_frontend_test(
fn_tree="jax.numpy.kron",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=1,
max_dim_size=3,
num_arrays=2,
),
test_with_out=st.just(False),
)
def test_jax_kron(
*,
dtype_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
b=x[1],
)
# lcm
@handle_frontend_test(
fn_tree="jax.numpy.lcm",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
allow_nan=False,
small_abs_safety_factor=2,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_lcm(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
value_test = True
# Skip Tensorflow backend value test for lcm
# https://github.com/tensorflow/tensorflow/issues/58955
if ivy.current_backend_str() == "tensorflow":
value_test = False
if ivy.current_backend_str() in ("jax", "numpy"):
assume(input_dtype[0] != "uint64" and input_dtype[1] != "uint64")
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
test_values=value_test,
)
# ldexp
@handle_frontend_test(
fn_tree="jax.numpy.ldexp",
dtype_and_x=ldexp_args(),
)
def test_jax_ldexp(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# log
@handle_frontend_test(
fn_tree="jax.numpy.log",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
),
test_with_out=st.just(False),
)
def test_jax_log(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
atol=1e-02,
x=x[0],
)
# log10
@handle_frontend_test(
fn_tree="jax.numpy.log10",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
),
test_with_out=st.just(False),
)
def test_jax_log10(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
atol=1e-02,
x=x[0],
)
# log1p
@handle_frontend_test(
fn_tree="jax.numpy.log1p",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
),
)
def test_jax_log1p(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# log2
@handle_frontend_test(
fn_tree="jax.numpy.log2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_jax_log2(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
x=x[0],
)
# logaddexp
@handle_frontend_test(
fn_tree="jax.numpy.logaddexp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
allow_nan=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_logaddexp(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
atol=1e-02,
x1=x[0],
x2=x[1],
)
# logaddexp2
@handle_frontend_test(
fn_tree="jax.numpy.logaddexp2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
allow_nan=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_logaddexp2(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
atol=1e-02,
x1=x[0],
x2=x[1],
)
# matmul
@handle_frontend_test(
fn_tree="jax.numpy.matmul",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[_get_first_matrix_and_dtype, _get_second_matrix_and_dtype],
),
)
def test_jax_matmul(
dtypes_values_casting,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
dtypes, x, casting, dtype = dtypes_values_casting
helpers.test_frontend_function(
input_dtypes=dtypes,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
b=x[1],
precision=None,
)
# maximum
@handle_frontend_test(
fn_tree="jax.numpy.maximum",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
test_with_out=st.just(False),
)
def test_jax_maximum(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
x1=x[0],
x2=x[1],
)
# minimum
@handle_frontend_test(
fn_tree="jax.numpy.minimum",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
test_with_out=st.just(False),
)
def test_jax_minimum(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
x1=x[0],
x2=x[1],
)
# mod
@handle_frontend_test(
fn_tree="jax.numpy.mod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_mod(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)) and "bfloat16" not in input_dtype)
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# modf
@handle_frontend_test(
fn_tree="jax.numpy.modf",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_modf(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# multiply
@handle_frontend_test(
fn_tree="jax.numpy.multiply",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
test_with_out=st.just(False),
)
def test_jax_multiply(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
x1=x[0],
x2=x[1],
)
# nan_to_num
@handle_frontend_test(
fn_tree="jax.numpy.nan_to_num",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
allow_nan=True,
allow_inf=True,
),
copy=st.booleans(),
nan=st.floats(min_value=0.0, max_value=100),
posinf=st.floats(min_value=5e100, max_value=5e100),
neginf=st.floats(min_value=-5e100, max_value=-5e100),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_jax_nan_to_num(
*,
dtype_and_x,
copy,
nan,
posinf,
neginf,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
copy=copy,
nan=nan,
posinf=posinf,
neginf=neginf,
)
# negative
@handle_frontend_test(
fn_tree="jax.numpy.negative",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1
),
test_with_out=st.just(False),
)
def test_jax_negative(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# nextafter
@handle_frontend_test(
fn_tree="jax.numpy.nextafter",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_nextafter(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[0],
)
# outer
@handle_frontend_test(
fn_tree="jax.numpy.outer",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
min_value=-10,
max_value=10,
min_num_dims=1,
max_num_dims=1,
shared_dtype=True,
),
)
def test_jax_outer(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
a=xs[0],
b=xs[1],
)
# poly
@handle_frontend_test(
fn_tree="jax.numpy.poly",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
min_num_dims=1,
max_num_dims=1,
min_value=-1e04,
max_value=1e04,
),
)
def test_jax_poly(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
assume("float16" not in input_dtype)
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
seq_of_zeros=x[0],
atol=1e-05,
rtol=1e-03,
)
# polyadd
@handle_frontend_test(
fn_tree="jax.numpy.polyadd",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
),
)
def test_jax_polyadd(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
assume("float16" not in input_dtype)
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
a1=x[0],
a2=x[1],
)
# polyder
@handle_frontend_test(
fn_tree="jax.numpy.polyder",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
min_num_dims=1,
max_num_dims=1,
min_dim_size=1,
),
m=st.integers(min_value=0, max_value=10),
)
def test_jax_polyder(
*,
dtype_and_x,
m,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
p=x[0],
m=m,
)
# polydiv
@handle_frontend_test(
fn_tree="jax.numpy.polydiv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
min_dim_size=1,
max_num_dims=1,
min_value=-1e04,
max_value=1e04,
),
)
def test_jax_polydiv(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
assume("float16" not in input_dtype)
# TODO: remove asumme when the decorator works
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
u=x[0],
v=x[1],
rtol=1e-01,
atol=1e-02,
)
@handle_frontend_test(
fn_tree="jax.numpy.polyint",
dtype_and_x_and_k=_get_array_values_m_and_k(),
)
def test_jax_polyint(
*,
dtype_and_x_and_k,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x, m, k = dtype_and_x_and_k
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
p=x[0],
m=m,
k=k,
)
# polymul
@handle_frontend_test(
fn_tree="jax.numpy.polymul",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
),
trim=st.booleans(),
)
def test_jax_polymul(
*,
dtype_and_x,
trim,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
assume("float16" not in input_dtype)
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
a1=x[0],
a2=x[1],
trim_leading_zeros=trim,
atol=1e-01,
rtol=1e-01,
)
# polysub
@handle_frontend_test(
fn_tree="jax.numpy.polysub",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
min_value=-1e04,
max_value=1e04,
),
)
def test_jax_polysub(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
assume("float16" not in input_dtype)
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
a1=x[0],
a2=x[1],
)
# positive
@handle_frontend_test(
fn_tree="jax.numpy.positive",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1
),
test_with_out=st.just(False),
)
def test_jax_positive(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# power
@handle_frontend_test(
fn_tree="jax.numpy.power",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
),
test_with_out=st.just(False),
)
def test_jax_power(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# rad2deg
@handle_frontend_test(
fn_tree="jax.numpy.rad2deg",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), min_num_dims=1
),
test_with_out=st.just(False),
)
def test_jax_rad2deg(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# radians
@handle_frontend_test(
fn_tree="jax.numpy.radians",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_jax_radians(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# real
@handle_frontend_test(
fn_tree="jax.numpy.real",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("real_and_complex"),
),
)
def test_jax_real(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=True,
val=x[0],
)
# reciprocal
@handle_frontend_test(
fn_tree="jax.numpy.reciprocal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
small_abs_safety_factor=4,
large_abs_safety_factor=4,
safety_factor_scale="log",
num_arrays=1,
),
)
def test_jax_reciprocal(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# remainder
@handle_frontend_test(
fn_tree="jax.numpy.remainder",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=6,
small_abs_safety_factor=6,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_remainder(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
rtol=1e-2,
atol=1e-2,
)
# round
@handle_frontend_test(
fn_tree="jax.numpy.round",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
decimals=st.integers(min_value=0, max_value=5),
)
def test_jax_round(
*,
dtype_and_x,
decimals,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
decimals=decimals,
)
# sign
@handle_frontend_test(
fn_tree="jax.numpy.sign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1
),
test_with_out=st.just(False),
)
def test_jax_sign(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
)
# signbit
@handle_frontend_test(
fn_tree="jax.numpy.signbit",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_jax_signbit(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.numpy.sin",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_sin(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# sinc
@handle_frontend_test(
fn_tree="jax.numpy.sinc",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
),
)
def test_jax_sinc(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
atol=1e-02,
x=x[0],
)
# sinh
@handle_frontend_test(
fn_tree="jax.numpy.sinh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=4,
small_abs_safety_factor=4,
),
test_with_out=st.just(False),
)
def test_jax_sinh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# sqrt
@handle_frontend_test(
fn_tree="jax.numpy.sqrt",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_sqrt(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# square
@handle_frontend_test(
fn_tree="jax.numpy.square",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_square(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# subtract
@handle_frontend_test(
fn_tree="jax.numpy.subtract",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_subtract(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[0],
)
# tan
@handle_frontend_test(
fn_tree="jax.numpy.tan",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_tan(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# tanh
@handle_frontend_test(
fn_tree="jax.numpy.tanh",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_tanh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# tensordot
@handle_frontend_test(
fn_tree="jax.numpy.tensordot",
dtype_values_and_axes=_get_dtype_value1_value2_axis_for_tensordot(
helpers.get_dtypes(kind="numeric")
),
test_with_out=st.just(False),
)
def test_jax_tensordot(
dtype_values_and_axes,
frontend,
backend_fw,
test_flags,
fn_tree,
):
dtype, a, b, axes = dtype_values_and_axes
if ivy.current_backend_str() == "torch":
atol = 1e-3
else:
atol = 1e-6
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
a=a,
b=b,
atol=atol,
axes=axes,
)
# trace
@handle_frontend_test(
fn_tree="jax.numpy.trace",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=1,
max_dim_size=10,
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
),
offset=st.integers(min_value=0, max_value=0),
axis1=st.integers(min_value=0, max_value=0),
axis2=st.integers(min_value=1, max_value=1),
test_with_out=st.just(False),
)
def test_jax_trace(
*,
dtype_and_x,
offset,
axis1,
axis2,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-1,
atol=1e-1,
a=x[0],
offset=offset,
axis1=axis1,
axis2=axis2,
)
@handle_frontend_test(
fn_tree="jax.numpy.trapz",
dtype_x_axis_rand_either=_either_x_dx(),
test_with_out=st.just(False),
)
def test_jax_trapz(
*,
dtype_x_axis_rand_either,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
dtype_values_axis, rand, either_x_dx = dtype_x_axis_rand_either
input_dtype, y, axis = dtype_values_axis
if rand == 0:
dtype_x, x = either_x_dx
x = np.asarray(x, dtype=dtype_x)
dx = None
else:
x = None
dx = either_x_dx
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
y=y[0],
x=x,
dx=dx,
axis=axis,
)
# trunc
@handle_frontend_test(
fn_tree="jax.numpy.trunc",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_trunc(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# vdot
@handle_frontend_test(
fn_tree="jax.numpy.vdot",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
test_with_out=st.just(False),
)
def test_jax_vdot(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
a=x[0],
b=x[1],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_mathematical_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_mathematical_functions.py",
"repo_id": "ivy",
"token_count": 40460
} | 46 |
# Testing Function
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_manipulation import ( # noqa: E501
put_along_axis_helper,
)
@handle_frontend_test(
fn_tree="numpy.compress",
dtype_arr_ax=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=10,
max_dim_size=100,
valid_axis=True,
force_int_axis=True,
),
condition=helpers.array_values(
dtype=helpers.get_dtypes("bool"),
shape=helpers.get_shape(
min_num_dims=1, max_num_dims=1, min_dim_size=1, max_dim_size=5
),
),
)
def test_numpy_compress(
dtype_arr_ax,
condition,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
):
dtype, arr, ax = dtype_arr_ax
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
condition=condition,
a=arr[0],
axis=ax,
)
@handle_frontend_test(
fn_tree="numpy.diag",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=2,
min_dim_size=2,
),
k=st.integers(min_value=-1, max_value=1),
test_with_out=st.just(False),
)
def test_numpy_diag(
dtype_and_x,
k,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
on_device=on_device,
frontend=frontend,
fn_tree=fn_tree,
v=x[0],
k=k,
)
@handle_frontend_test(
fn_tree="numpy.diagonal",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
min_axes_size=2,
max_axes_size=2,
valid_axis=True,
),
offset=st.integers(min_value=-1, max_value=1),
test_with_out=st.just(False),
)
def test_numpy_diagonal(
dtype_x_axis,
offset,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtype,
on_device=on_device,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
a=x[0],
offset=offset,
axis1=axis[0],
axis2=axis[1],
)
@handle_frontend_test(
fn_tree="numpy.put_along_axis",
args=put_along_axis_helper(),
test_with_out=st.just(False),
)
def test_numpy_put_along_axis(
*,
args,
test_flags,
frontend,
fn_tree,
on_device,
backend_fw,
):
dtypes, x, indices, values, axis = args
helpers.test_frontend_function(
input_dtypes=dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
arr=x,
indices=indices,
values=values,
axis=axis,
)
@handle_frontend_test(
fn_tree="numpy.take",
dtype_x_indices_axis=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("valid"),
indices_dtypes=["int32", "int64"],
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=5,
indices_same_dims=True,
valid_bounds=False,
),
mode=st.sampled_from(["clip", "wrap"]),
)
def test_numpy_take(
*,
dtype_x_indices_axis,
mode,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtypes, x, indices, axis, _ = dtype_x_indices_axis
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
a=x,
indices=indices,
axis=axis,
mode=mode,
)
@handle_frontend_test(
fn_tree="numpy.take_along_axis",
dtype_x_indices_axis=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("numeric"),
indices_dtypes=["int32", "int64"],
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
indices_same_dims=True,
),
test_with_out=st.just(False),
)
def test_numpy_take_along_axis(
*,
dtype_x_indices_axis,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtypes, x, indices, axis, _ = dtype_x_indices_axis
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
arr=x,
indices=indices,
axis=axis,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_indexing_routines/test_indexing_like_operations.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_indexing_routines/test_indexing_like_operations.py",
"repo_id": "ivy",
"token_count": 2742
} | 47 |
# global
from hypothesis import strategies as st, assume
import ivy
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# all
@handle_frontend_test(
fn_tree="numpy.all",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
valid_axis=True,
max_axes_size=1,
force_int_axis=True,
),
keepdims=st.booleans(),
where=np_frontend_helpers.where(),
)
def test_numpy_all(
*,
dtype_x_axis,
keepdims,
where,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
axis = axis if axis is None or isinstance(axis, int) else axis[0]
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
keepdims=keepdims,
where=where,
)
# any
@handle_frontend_test(
fn_tree="numpy.any",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
valid_axis=True,
max_axes_size=1,
force_int_axis=True,
),
keepdims=st.booleans(),
where=np_frontend_helpers.where(),
)
def test_numpy_any(
*,
dtype_x_axis,
keepdims,
where,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
axis = axis if axis is None or isinstance(axis, int) else axis[0]
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
keepdims=keepdims,
where=where,
)
@handle_frontend_test(
fn_tree="numpy.iscomplex",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("real_and_complex"), min_num_dims=1
),
test_with_out=st.just(False),
)
def test_numpy_iscomplex(
*,
dtype_and_x,
frontend,
on_device,
fn_tree,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="numpy.iscomplexobj",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("real_and_complex"),
),
test_with_out=st.just(False),
)
def test_numpy_iscomplexobj(
*,
dtype_and_x,
frontend,
on_device,
fn_tree,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
if ivy.current_backend_str() == "paddle":
# mostly paddle doesn't support unsigned int
assume(input_dtype[0] not in ["int8", "uint8", "int16"])
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="numpy.isfortran",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), min_num_dims=1
),
test_with_out=st.just(False),
)
def test_numpy_isfortran(
*,
dtype_and_x,
frontend,
on_device,
fn_tree,
test_flags,
backend_fw,
):
if ivy.current_backend() != "numpy":
return
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
)
@handle_frontend_test(
fn_tree="numpy.isreal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex")
),
)
def test_numpy_isreal(
*,
dtype_and_x,
frontend,
on_device,
fn_tree,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="numpy.isrealobj",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("real_and_complex"), min_num_dims=1
),
test_with_out=st.just(False),
)
def test_numpy_isrealobj(
*,
dtype_and_x,
frontend,
on_device,
fn_tree,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="numpy.isscalar",
element=st.booleans() | st.floats() | st.integers() | st.complex_numbers(),
test_with_out=st.just(False),
)
def test_numpy_isscalar(
*,
element,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=ivy.all_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
element=element,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_logic/test_truth_value_testing.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_logic/test_truth_value_testing.py",
"repo_id": "ivy",
"token_count": 3124
} | 48 |
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _dtype_x_bounded_axis(draw, **kwargs):
dtype, x, shape = draw(helpers.dtype_and_values(**kwargs, ret_shape=True))
axis = draw(helpers.ints(min_value=0, max_value=len(shape) - 1))
return dtype, x, axis
@st.composite
def _get_dtype_values_k_axes_for_rot90(
draw,
available_dtypes,
min_value=None,
max_value=None,
allow_inf=False,
exclude_min=False,
exclude_max=False,
min_num_dims=1,
max_num_dims=10,
min_dim_size=1,
max_dim_size=10,
):
shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
k = draw(helpers.ints(min_value=-4, max_value=4))
axes = tuple(
draw(
st.lists(
helpers.ints(min_value=-(len(shape) - 1), max_value=len(shape) - 2),
min_size=2,
max_size=2,
unique=True,
).filter(lambda axes: abs(axes[0] - axes[1]) != len(shape) - 1)
)
)
dtype = draw(st.sampled_from(draw(available_dtypes)))
values = draw(
helpers.array_values(
dtype=dtype,
shape=shape,
min_value=min_value,
max_value=max_value,
allow_inf=allow_inf,
exclude_min=exclude_min,
exclude_max=exclude_max,
large_abs_safety_factor=72,
small_abs_safety_factor=72,
safety_factor_scale="log",
)
)
return [dtype], values, k, axes
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="numpy.flip",
dtype_x_axis=_dtype_x_bounded_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=1,
),
test_with_out=st.just(False),
)
def test_numpy_flip(
*,
dtype_x_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=x[0],
axis=axis,
)
# fliplr
@handle_frontend_test(
fn_tree="numpy.fliplr",
dtype_and_m=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
),
test_with_out=st.just(False),
)
def test_numpy_fliplr(
*,
dtype_and_m,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, m = dtype_and_m
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=m[0],
)
# flipud
@handle_frontend_test(
fn_tree="numpy.flipud",
dtype_and_m=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
test_with_out=st.just(False),
)
def test_numpy_flipud(
*,
dtype_and_m,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, m = dtype_and_m
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=m[0],
)
# roll
@handle_frontend_test(
fn_tree="numpy.roll",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
min_dim_size=2,
),
shift=helpers.ints(min_value=1, max_value=10),
axis=helpers.ints(min_value=-1, max_value=1),
test_with_out=st.just(False),
)
def test_numpy_roll(
*,
dtype_and_x,
shift,
axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
shift=shift,
axis=axis,
)
# rot90
@handle_frontend_test(
fn_tree="numpy.rot90",
dtype_m_k_axes=_get_dtype_values_k_axes_for_rot90(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=3,
max_num_dims=6,
min_dim_size=1,
max_dim_size=10,
),
test_with_out=st.just(False),
)
def test_numpy_rot90(
*,
dtype_m_k_axes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, m, k, axes = dtype_m_k_axes
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=m[0],
k=k,
axes=axes,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_rearranging_elements.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_rearranging_elements.py",
"repo_id": "ivy",
"token_count": 2906
} | 49 |
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# arccos
@handle_frontend_test(
fn_tree="numpy.arccos",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="arccos"
),
)
def test_numpy_arccos(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# arccosh
@handle_frontend_test(
fn_tree="numpy.arccosh",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="arccosh"
),
)
def test_numpy_arccosh(
dtypes_values_casting,
where,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# arcsin
@handle_frontend_test(
fn_tree="numpy.arcsin",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="arcsin"
),
)
def test_numpy_arcsin(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# arctan
@handle_frontend_test(
fn_tree="numpy.arctan",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="arctan"
),
)
def test_numpy_arctan(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-3,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# Test for arctan2
@handle_frontend_test(
fn_tree="numpy.arctan2",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="arctan2"
),
)
def test_numpy_arctan2(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting # Unpack x
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-3,
x1=x[0], # Input x1
x2=x[1], # Input x2
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# cos
@handle_frontend_test(
fn_tree="numpy.cos",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="cos"
),
)
def test_numpy_cos(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-03,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# deg2rad
@handle_frontend_test(
fn_tree="numpy.deg2rad",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="deg2rad"
),
)
def test_numpy_deg2rad(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# degrees
@handle_frontend_test(
fn_tree="numpy.degrees",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="degrees"
),
)
def test_numpy_degrees(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# rad2deg
@handle_frontend_test(
fn_tree="numpy.rad2deg",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="rad2deg"
),
)
def test_numpy_rad2deg(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# tan
@handle_frontend_test(
fn_tree="numpy.tan",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="tan"
),
)
def test_numpy_tan(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
atol=1e-02,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_trigonometric_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_trigonometric_functions.py",
"repo_id": "ivy",
"token_count": 6453
} | 50 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# bincount
@handle_frontend_test(
fn_tree="numpy.bincount",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_value=1,
max_value=2,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=1,
),
key="a_s_d",
),
),
test_with_out=st.just(False),
)
def test_numpy_bincount(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
weights=None,
minlength=0,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_statistics/test_histograms.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_statistics/test_histograms.py",
"repo_id": "ivy",
"token_count": 542
} | 51 |
# global
from hypothesis import strategies as st, assume
import hypothesis.extra.numpy as nph
import numpy as np
import sys
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_frontends.test_torch.test_blas_and_lapack_ops import (
_get_dtype_input_and_matrices,
_get_dtype_and_3dbatch_matrices,
)
# --- Helpers --- #
# --------------- #
@st.composite
def _draw_paddle_diagonal(draw):
_dtype, _x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=10,
min_dim_size=1,
max_dim_size=50,
)
)
offset = (draw(helpers.ints(min_value=-10, max_value=50)),)
axes = (
draw(
st.lists(
helpers.ints(min_value=-(len(_x)), max_value=len(_x)),
min_size=len(_x) + 1,
max_size=len(_x) + 1,
unique=True,
).filter(lambda axes: axes[0] % 2 != axes[1] % 2)
),
)
return _dtype, _x[0], offset[0], axes[0]
@st.composite
def _test_paddle_take_helper(draw):
mode = draw(st.sampled_from(["raise", "clip", "wrap"]))
safe_bounds = mode == "raise"
dtypes, xs, indices, _, _ = draw(
helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("float_and_integer"),
indices_dtypes=["int32", "int64"],
valid_bounds=safe_bounds,
)
)
return dtypes, xs, indices, mode
# --- Main --- #
# ------------ #
# abs
@handle_frontend_test(
fn_tree="paddle.abs",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
)
def test_paddle_abs(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# acos
@handle_frontend_test(
fn_tree="paddle.acos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_acos(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
x=x[0],
)
# acosh
@handle_frontend_test(
fn_tree="paddle.acosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_acosh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
x=x[0],
)
# add
@handle_frontend_test(
fn_tree="paddle.add",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_add(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
)
# addmm
@handle_frontend_test(
fn_tree="paddle.addmm",
dtype_input_xy=_get_dtype_and_3dbatch_matrices(with_input=True, input_3d=True),
beta=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
alpha=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
)
def test_paddle_addmm(
*,
dtype_input_xy,
beta,
alpha,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input, x, y = dtype_input_xy
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
x=x[0],
y=y[0],
beta=beta,
alpha=alpha,
)
# all
@handle_frontend_test(
fn_tree="paddle.all",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=["bool"],
valid_axis=True,
allow_neg_axes=True,
force_int_axis=True,
min_num_dims=1,
),
keepdim=st.booleans(),
)
def test_paddle_all(
*,
dtype_and_x,
keepdim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
backend_to_test=backend_fw,
x=x[0],
axis=axis,
keepdim=keepdim,
)
# amax
@handle_frontend_test(
fn_tree="paddle.amax",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
allow_inf=False,
shared_dtype=True,
),
)
def test_paddle_amax(
*,
dtype_and_x,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
)
# amin
@handle_frontend_test(
fn_tree="paddle.amin",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
valid_axis=True,
),
keepdim=st.booleans(),
)
def test_paddle_amin(
*,
dtype_and_x,
keepdim,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
axis=axis,
keepdim=keepdim,
)
@handle_frontend_test(
fn_tree="paddle.angle",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float64", "complex64", "complex128"],
),
)
def test_paddle_angle(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# any
@handle_frontend_test(
fn_tree="paddle.any",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=["bool"],
valid_axis=True,
allow_neg_axes=True,
force_int_axis=True,
min_num_dims=1,
),
)
def test_paddle_any(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
backend_to_test=backend_fw,
x=x[0],
axis=axis,
keepdim=False,
)
# asin
@handle_frontend_test(
fn_tree="paddle.asin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_asin(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# asinh
@handle_frontend_test(
fn_tree="paddle.asinh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_asinh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
x=x[0],
)
# atan
@handle_frontend_test(
fn_tree="paddle.atan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_atan(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# atan2
@handle_frontend_test(
fn_tree="paddle.atan2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_atan2(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
)
# atanh
@handle_frontend_test(
fn_tree="paddle.atanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_atanh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# broadcast_shape
@handle_frontend_test(
fn_tree="paddle.broadcast_shape",
input_shapes_x=nph.mutually_broadcastable_shapes(
num_shapes=2, min_dims=1, max_dims=5, min_side=1, max_side=5
),
)
def test_paddle_broadcast_shape(
*,
input_shapes_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
helpers.test_frontend_function(
input_dtypes=["int32", "int64"],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x_shape=input_shapes_x[0][0],
y_shape=input_shapes_x[0][1],
)
# ceil
@handle_frontend_test(
fn_tree="paddle.ceil",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_ceil(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# conj
@handle_frontend_test(
fn_tree="paddle.conj",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_paddle_conj(
*,
dtype_and_input,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# cos
@handle_frontend_test(
fn_tree="paddle.cos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_cos(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# cosh
@handle_frontend_test(
fn_tree="paddle.cosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_cosh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
x=x[0],
)
# count_nonzero
@handle_frontend_test(
fn_tree="paddle.count_nonzero",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes(kind="integer"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
),
)
def test_paddle_count_nonzero(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
on_device=on_device,
fn_tree=fn_tree,
test_flags=test_flags,
frontend=frontend,
x=x[0],
axis=axis,
)
# cumprod
@handle_frontend_test(
fn_tree="paddle.cumprod",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
),
)
def test_paddle_cumprod(
*,
dtype_x_axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
dim=axis,
)
@handle_frontend_test(
fn_tree="paddle.cumsum",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
),
)
def test_paddle_cumsum(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
# rtol=1e-04,
# atol=1e-04,
)
# deg2rad
@handle_frontend_test(
fn_tree="paddle.deg2rad",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_deg2rad(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# diagonal
@handle_frontend_test(fn_tree="paddle.diagonal", data=_draw_paddle_diagonal())
def test_paddle_diagonal(
*,
data,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
_dtype, _x, offset, axes = data
helpers.test_frontend_function(
input_dtypes=_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=_x,
offset=offset,
axis1=axes[0],
axis2=axes[1],
)
# diff
@handle_frontend_test(
fn_tree="paddle.diff",
dtype_n_x_n_axis=helpers.dtype_values_axis(
available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
),
n=st.integers(min_value=1, max_value=1),
dtype_prepend=helpers.dtype_and_values(
available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"),
min_num_dims=1,
max_num_dims=1,
),
dtype_append=helpers.dtype_and_values(
available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"),
min_num_dims=1,
max_num_dims=1,
),
)
def test_paddle_diff(
*,
dtype_n_x_n_axis,
n,
dtype_prepend,
dtype_append,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x, axis = dtype_n_x_n_axis
_, prepend = dtype_prepend
_, append = dtype_append
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
n=n,
axis=axis,
prepend=prepend[0],
append=append[0],
)
# digamma
@handle_frontend_test(
fn_tree="paddle.digamma",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
safety_factor_scale="log",
),
)
def test_paddle_digamma(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-4,
x=x[0],
)
# divide
@handle_frontend_test(
fn_tree="paddle.divide",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_divide(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# erf
@handle_frontend_test(
fn_tree="paddle.erf",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_erf(
*,
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# exp
@handle_frontend_test(
fn_tree="paddle.exp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_exp(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# expm1
@handle_frontend_test(
fn_tree="paddle.expm1",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_expm1(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# floor
@handle_frontend_test(
fn_tree="paddle.floor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_floor(
*,
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# floor_divide
@handle_frontend_test(
fn_tree="paddle.floor_divide",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=-10,
max_value=10,
num_arrays=2,
allow_inf=False,
shared_dtype=True,
),
)
def test_paddle_floor_divide(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
atol=1e-5,
)
# floor_mod
@handle_frontend_test(
fn_tree="paddle.floor_mod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
min_value=1,
),
)
def test_paddle_floor_mod(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="paddle.fmax",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_fmax(
*,
dtypes_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="paddle.fmin",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_fmin(
*,
dtypes_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# frac
@handle_frontend_test(
fn_tree="paddle.frac",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
max_value=1e6,
min_value=-1e6,
),
)
def test_paddle_frac(
*,
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# gcd
@handle_frontend_test(
fn_tree="paddle.gcd",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=-100,
max_value=100,
min_num_dims=1,
min_dim_size=1,
num_arrays=2,
shared_dtype=True,
),
)
def test_paddle_gcd(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
)
# heaviside
@handle_frontend_test(
fn_tree="paddle.heaviside",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_heaviside(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
)
# inner
@handle_frontend_test(
fn_tree="paddle.inner",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=-10,
max_value=10,
num_arrays=2,
shared_dtype=True,
),
)
def test_paddle_inner(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
)
# inverse
@handle_frontend_test(
fn_tree="paddle.inverse",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-100.0,
max_value=100.0,
shape=helpers.ints(min_value=2, max_value=10).map(lambda x: (x, x)),
).filter(
lambda x: "float16" not in x[0]
and "bfloat16" not in x[0]
and np.linalg.det(np.asarray(x[1][0])) != 0
and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon
),
test_with_out=st.just(False),
)
def test_paddle_inverse(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
rtol=1e-01,
atol=1e-01,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# isfinite
@handle_frontend_test(
fn_tree="paddle.isfinite",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_isfinite(
*,
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# isinf
@handle_frontend_test(
fn_tree="paddle.isinf",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_isinf(
*,
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# isnan
@handle_frontend_test(
fn_tree="paddle.isnan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_isnan(
*,
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# kron
@handle_frontend_test(
fn_tree="paddle.kron",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
shared_dtype=True,
),
)
def test_paddle_kron(
*,
dtype_and_x,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
)
# lcm
@handle_frontend_test(
fn_tree="paddle.lcm",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
min_num_dims=1,
safety_factor_scale="log",
large_abs_safety_factor=2,
shared_dtype=True,
),
)
def test_paddle_lcm(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
)
# lerp
@handle_frontend_test(
fn_tree="paddle.lerp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=3,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_lerp(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
weight=x[2],
)
# lgamma
@handle_frontend_test(
fn_tree="paddle.lgamma",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
safety_factor_scale="log",
),
)
def test_paddle_lgamma(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-4,
x=x[0],
)
# log
@handle_frontend_test(
fn_tree="paddle.log",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_log(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="paddle.log10",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_log10(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# log1p
@handle_frontend_test(
fn_tree="paddle.log1p",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
max_value=1e5,
),
)
def test_paddle_log1p(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# log2
@handle_frontend_test(
fn_tree="paddle.log2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_log2(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# logit
@handle_frontend_test(
fn_tree="paddle.logit",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_logit(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
eps=1e-2,
)
# logsumexp
@handle_frontend_test(
fn_tree="paddle.tensor.math.logsumexp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
max_num_dims=4,
num_arrays=2,
allow_inf=False,
shared_dtype=True,
),
)
def test_paddle_logsumexp(
*,
dtype_and_x,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
axis=None,
)
# max
@handle_frontend_test(
fn_tree="paddle.max",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=False,
),
)
def test_paddle_max(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
keepdim=False,
)
# maximum
@handle_frontend_test(
fn_tree="paddle.maximum",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
)
def test_paddle_maximum(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# min
@handle_frontend_test(
fn_tree="paddle.min",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=False,
),
)
def test_paddle_min(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
keepdim=False,
)
@handle_frontend_test(
fn_tree="paddle.minimum",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
)
def test_paddle_minimum(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
)
# mm
@handle_frontend_test(
fn_tree="paddle.mm",
dtype_xy=_get_dtype_input_and_matrices(),
)
def test_paddle_mm(
*,
dtype_xy,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, y = dtype_xy
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
mat2=y,
)
# mod
@handle_frontend_test(
fn_tree="paddle.mod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_mod(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# multiply
@handle_frontend_test(
fn_tree="paddle.multiply",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_multiply(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="paddle.nanmean",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
allow_nan=True,
),
)
def test_paddle_nanmean(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
rtol=1e-04,
atol=1e-04,
)
# nansum
@handle_frontend_test(
fn_tree="paddle.nansum",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
allow_nan=True,
),
)
def test_paddle_nansum(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
rtol=1e-04,
atol=1e-04,
)
# neg
@handle_frontend_test(
fn_tree="paddle.neg",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float32", "float64", "int8", "int16", "int32", "int64"],
),
)
def test_paddle_neg(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# outer
@handle_frontend_test(
fn_tree="paddle.outer",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
min_num_dims=1,
max_num_dims=1,
shared_dtype=True,
),
)
def test_paddle_outer(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
)
# pow
@handle_frontend_test(
fn_tree="paddle.pow",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
allow_inf=False,
shared_dtype=True,
),
)
def test_paddle_pow(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# prod
@handle_frontend_test(
fn_tree="paddle.prod",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
min_value=-10,
max_value=10,
force_int_axis=False,
allow_nan=False,
),
)
def test_paddle_prod(
*,
dtype_and_x,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
keepdim=False,
backend_to_test=backend_fw,
)
# rad2deg
@handle_frontend_test(
fn_tree="paddle.rad2deg",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_rad2deg(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# reciprocal
@handle_frontend_test(
fn_tree="paddle.reciprocal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_reciprocal(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# remainder
@handle_frontend_test(
fn_tree="paddle.remainder",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_remainder(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# round
@handle_frontend_test(
fn_tree="paddle.round",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=1,
),
)
def test_paddle_round(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# rsqrt
@handle_frontend_test(
fn_tree="paddle.rsqrt",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_rsqrt(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="paddle.sgn",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_num_dims=1,
max_num_dims=1,
min_dim_size=1,
max_dim_size=1,
abs_smallest_val=1e-10,
min_value=-10,
max_value=10,
),
)
def test_paddle_sgn(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# sign
@handle_frontend_test(
fn_tree="paddle.sign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_sign(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# sin
@handle_frontend_test(
fn_tree="paddle.sin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_sin(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# diff
# sinh
@handle_frontend_test(
fn_tree="paddle.sinh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_sinh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# sqrt
@handle_frontend_test(
fn_tree="paddle.sqrt",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_sqrt(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# square
@handle_frontend_test(
fn_tree="paddle.square",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_square(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# stanh
@handle_frontend_test(
fn_tree="paddle.stanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
scale_a=st.floats(1e-5, 1e5),
scale_b=st.floats(1e-5, 1e5),
)
def test_paddle_stanh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
scale_a,
scale_b,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
scale_a=scale_a,
scale_b=scale_b,
)
# subtract
@handle_frontend_test(
fn_tree="paddle.subtract",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_subtract(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="paddle.sum",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_sum(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
)
# take
@handle_frontend_test(
fn_tree="paddle.take", dtype_and_values=_test_paddle_take_helper()
)
def test_paddle_take(
*,
dtype_and_values,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
dtypes, xs, indices, modes = dtype_and_values
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs,
index=indices,
mode=modes,
)
# tan
@handle_frontend_test(
fn_tree="paddle.tan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_tan(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
x=x[0],
)
# tanh
@handle_frontend_test(
fn_tree="paddle.tanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_tanh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
x=x[0],
)
# trace
@handle_frontend_test(
fn_tree="paddle.trace",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
min_num_dims=2,
min_value=-1e04,
max_value=1e04,
allow_inf=False,
),
offset=st.integers(min_value=-1e04, max_value=1e04),
axis1=st.integers(min_value=0, max_value=0),
axis2=st.integers(min_value=1, max_value=1),
)
def test_paddle_trace(
*,
dtype_and_x,
offset,
axis1,
axis2,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
offset=offset,
axis1=axis1,
axis2=axis2,
)
# trunc
@handle_frontend_test(
fn_tree="paddle.trunc",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", "int"),
),
)
def test_paddle_trunc(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_math.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_math.py",
"repo_id": "ivy",
"token_count": 30734
} | 52 |
# TODO: uncomment after frontend is not required
# global
import sys
from hypothesis import strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test, BackendHandler
# --- Helpers --- #
# --------------- #
@st.composite
def _generate_eigh_tridiagonal_args(draw):
dtype, alpha = draw(
helpers.dtype_and_values(
min_dim_size=2,
min_num_dims=1,
max_num_dims=1,
min_value=2.0,
max_value=5,
available_dtypes=helpers.get_dtypes("float"),
)
)
beta_shape = len(alpha[0]) - 1
dtype, beta = draw(
helpers.dtype_and_values(
available_dtypes=dtype,
shape=(beta_shape,),
min_value=2.0,
max_value=5,
)
)
select = draw(st.sampled_from(("a", "i", "v")))
if select == "a":
select_range = None
elif select == "i":
range_slice = draw(
st.slices(beta_shape).filter(
lambda x: x.start
and x.stop
and x.step
and x.start >= 0
and x.stop >= 0
and x.step >= 0
and x.start < x.stop
)
)
select_range = [range_slice.start, range_slice.stop]
else:
select_range = [-100, 100]
eigvals_only = draw(st.booleans())
tol = draw(st.floats(1e-5, 1e-3))
return dtype, alpha, beta, eigvals_only, select, select_range, tol
@st.composite
def _norm_helper(draw):
def _matrix_norm_example():
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=helpers.get_shape(min_num_dims=2, max_num_dims=2),
min_num_dims=2,
max_num_dims=2,
min_dim_size=1,
max_dim_size=10,
min_value=-1e4,
max_value=1e4,
large_abs_safety_factor=10,
small_abs_safety_factor=10,
safety_factor_scale="log",
),
)
ord = draw(st.sampled_from(["fro", "nuc"]))
axis = (-2, -1)
check_stable = True
return x_dtype, x, axis, ord, check_stable
def _vector_norm_example():
x_dtype, x, axis = draw(
helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
valid_axis=True,
force_int_axis=True,
min_value=-1e04,
max_value=1e04,
large_abs_safety_factor=10,
small_abs_safety_factor=10,
safety_factor_scale="log",
)
)
ints = draw(helpers.ints(min_value=1, max_value=2))
floats = draw(helpers.floats(min_value=1, max_value=2))
ord = draw(st.sampled_from([ints, floats, float("inf"), float("-inf")]))
check_stable = False
return x_dtype, x, axis, ord, check_stable
is_vec_norm = draw(st.booleans())
if is_vec_norm:
return _vector_norm_example()
return _matrix_norm_example()
# --- Main --- #
# ------------ #
# eigh_tridiagonal
@handle_frontend_test(
fn_tree="scipy.linalg.eigh_tridiagonal",
all_args=_generate_eigh_tridiagonal_args(),
test_with_out=st.just(False),
)
def test_scipy_eigh_tridiagonal(
all_args,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
dtype, alpha, beta, eigvals_only, select, select_range, tol = all_args
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
alpha=alpha[0],
beta=beta[0],
eigvals_only=eigvals_only,
select=select,
select_range=select_range,
tol=tol,
)
# inv
@handle_frontend_test(
fn_tree="scipy.linalg.inv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
small_abs_safety_factor=2,
safety_factor_scale="log",
shape=helpers.ints(min_value=2, max_value=20).map(lambda x: (x, x)),
).filter(lambda x: np.linalg.cond(x[1][0].tolist()) < 1 / sys.float_info.epsilon),
test_with_out=st.just(False),
)
def test_scipy_inv(
dtype_and_x,
test_flags,
frontend,
fn_tree,
on_device,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
)
# kron
@handle_frontend_test(
fn_tree="scipy.linalg.kron",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=1,
max_dim_size=10,
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_scipy_kron(dtype_and_x, frontend, test_flags, fn_tree, on_device, backend_fw):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
b=x[1],
)
# lu_factor
@handle_frontend_test(
fn_tree="scipy.linalg.lu_factor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=50,
min_num_dims=2,
),
overwrite_a=st.booleans(),
check_finite=st.booleans(),
test_with_out=st.just(False),
)
def test_scipy_lu_factor(
dtype_and_x,
overwrite_a,
check_finite,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
test_values=False,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
overwrite_a=overwrite_a,
check_finite=check_finite,
)
# norm
@handle_frontend_test(
fn_tree="scipy.linalg.norm",
dtype_values=_norm_helper(),
keepdims=st.booleans(),
test_with_out=st.just(False),
)
def test_scipy_norm(
dtype_values,
keepdims,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
dtype, x, axis, ord, _ = dtype_values
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
ord=ord,
axis=axis,
keepdims=keepdims,
)
# pinv
@handle_frontend_test(
fn_tree="scipy.linalg.pinv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=2,
),
test_with_out=st.just(False),
)
def test_scipy_pinv(
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
)
# svd
@handle_frontend_test(
fn_tree="scipy.linalg.svd",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0.1,
max_value=10,
shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)),
),
full_matrices=st.booleans(),
compute_uv=st.booleans(),
test_with_out=st.just(False),
)
def test_scipy_svd(
dtype_and_x,
full_matrices,
compute_uv,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
x = x[0]
x = (
np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3
) # make symmetric positive-definite
ret, ret_gt = helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
test_values=False,
fn_tree=fn_tree,
on_device=on_device,
a=x,
full_matrices=full_matrices,
compute_uv=compute_uv,
)
with BackendHandler.update_backend(backend_fw) as ivy_backend:
for u, v in zip(ret, ret_gt):
u = ivy_backend.to_numpy(ivy_backend.abs(u))
v = ivy_backend.to_numpy(ivy_backend.abs(v))
helpers.value_test(
ret_np_flat=u,
ret_np_from_gt_flat=v,
rtol=1e-04,
atol=1e-04,
backend=backend_fw,
ground_truth_backend=frontend,
)
# svdvals
@handle_frontend_test(
fn_tree="scipy.linalg.svdvals",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0.1,
max_value=50,
shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)),
),
check_finite=st.booleans(),
test_with_out=st.just(False),
)
def test_scipy_svdvals(
dtype_and_x,
check_finite,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
x = x[0]
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
test_values=False,
fn_tree=fn_tree,
on_device=on_device,
a=x,
check_finite=check_finite,
)
# Tests #
# ----- #
# tril
@handle_frontend_test(
fn_tree="scipy.linalg.tril",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
k=helpers.ints(min_value=-10, max_value=10),
test_with_out=st.just(False),
)
def test_scipy_tril(
dtype_and_x,
k,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=x[0],
k=k,
)
# triu
@handle_frontend_test(
fn_tree="scipy.linalg.triu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
k=helpers.ints(min_value=-10, max_value=10),
test_with_out=st.just(False),
)
def test_scipy_triu(
dtype_and_x,
k,
test_flags,
frontend,
fn_tree,
on_device,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=x[0],
k=k,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_scipy/test_linalg/test_linalg.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_scipy/test_linalg/test_linalg.py",
"repo_id": "ivy",
"token_count": 6302
} | 53 |
from hypothesis import strategies as st
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
@handle_frontend_test(
fn_tree="sklearn.utils.as_float_array",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
test_with_copy=st.just(True),
)
def test_sklearn_as_float_array(
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
frontend=frontend,
on_device=on_device,
X=x[0],
)
@handle_frontend_test(
fn_tree="sklearn.utils.column_or_1d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.one_of(
st.tuples(st.integers(1, 10), st.just(1)), st.tuples(st.integers(1, 10))
),
),
)
def test_sklearn_column_or_1d(
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
frontend=frontend,
on_device=on_device,
y=x[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_sklearn/test_utils/test_validation.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_sklearn/test_utils/test_validation.py",
"repo_id": "ivy",
"token_count": 711
} | 54 |
# import torch
from ivy_tests.test_ivy.test_frontends import NativeClass
torch_classes_to_ivy_classes = {}
def convtorch(argument):
"""Convert NativeClass in argument to ivy frontend counterpart for
torch."""
if isinstance(argument, NativeClass):
return torch_classes_to_ivy_classes.get(argument._native_class)
return argument
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/__init__.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/__init__.py",
"repo_id": "ivy",
"token_count": 119
} | 55 |
# global
from hypothesis import assume, strategies as st
import numpy as np
# local
import ivy
from ivy.functional.ivy.layers import _get_embed_dim, _pack_padded_sequence
from ivy_tests.test_ivy import helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_nn.test_layers import _mha_helper
# --- Helpers --- #
# --------------- #
@st.composite
def _lstm_helper(draw):
dtype = draw(helpers.get_dtypes("valid", full=False))
has_biases = draw(st.booleans())
bidirectional = draw(st.booleans())
dropout = draw(st.floats(min_value=0, max_value=0.99))
train = (
draw(st.booleans()) and not dropout
) # not yet supported by original function
packed = draw(st.booleans())
batch_first = draw(st.booleans()) and not packed
num_batches = draw(st.integers(min_value=1, max_value=5))
num_layers = draw(st.integers(min_value=1, max_value=3))
num_directions = 2 if bidirectional else 1
seq_size = draw(st.integers(min_value=1, max_value=5))
in_size = draw(st.integers(min_value=1, max_value=3))
hidden_size = draw(st.integers(min_value=1, max_value=3))
input = draw(
helpers.array_values(
dtype=dtype[0],
shape=(
(num_batches, seq_size, in_size)
if batch_first
else (seq_size, num_batches, in_size)
),
min_value=0,
max_value=1,
)
)
init_h = draw(
helpers.array_values(
dtype=dtype[0],
shape=(num_directions * num_layers, num_batches, hidden_size),
min_value=0,
max_value=1,
)
)
init_c = draw(
helpers.array_values(
dtype=dtype[0],
shape=(num_directions * num_layers, num_batches, hidden_size),
min_value=0,
max_value=1,
)
)
all_weights = []
for k in range(num_layers):
for _ in range(num_directions):
weight_ih = draw(
helpers.array_values(
dtype=dtype[0],
shape=(
(4 * hidden_size, in_size)
if k == 0
else (4 * hidden_size, num_directions * hidden_size)
),
min_value=0,
max_value=1,
)
)
weight_hh = draw(
helpers.array_values(
dtype=dtype[0],
shape=(4 * hidden_size, hidden_size),
min_value=0,
max_value=1,
)
)
all_weights += [weight_ih, weight_hh]
if has_biases:
bias_ih = draw(
helpers.array_values(
dtype=dtype[0],
shape=(4 * hidden_size,),
min_value=0,
max_value=1,
)
)
bias_hh = draw(
helpers.array_values(
dtype=dtype[0],
shape=(4 * hidden_size,),
min_value=0,
max_value=1,
)
)
all_weights += [bias_ih, bias_hh]
if packed:
batch_sizes = [seq_size]
batch_sizes += draw(
st.lists(
st.integers(min_value=1, max_value=seq_size),
min_size=num_batches - 1,
max_size=num_batches - 1,
)
)
batch_sizes = np.array(draw(st.permutations(batch_sizes)))
input, batch_sizes = (
ivy.to_numpy(p) for p in _pack_padded_sequence(input, batch_sizes)
)
else:
batch_sizes = None
initial_states = init_h, init_c
all_weights = tuple(all_weights)
if batch_sizes is not None:
dtypes = dtype + ["int64"]
kwargs = {
"data": input,
"batch_sizes": batch_sizes,
"hx": initial_states,
"params": all_weights,
"has_biases": has_biases,
"num_layers": num_layers,
"dropout": dropout,
"train": train,
"bidirectional": bidirectional,
}
else:
dtypes = dtype
kwargs = {
"input": input,
"hx": initial_states,
"params": all_weights,
"has_biases": has_biases,
"num_layers": num_layers,
"dropout": dropout,
"train": train,
"bidirectional": bidirectional,
"batch_first": batch_first,
}
return dtypes, kwargs
# --- Main --- #
# ------------ #
# lstm
@handle_frontend_test(
fn_tree="torch.lstm",
dtypes_kwargs=_lstm_helper(),
test_with_out=st.just(False),
)
def test_torch_lstm(
*,
dtypes_kwargs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, kwargs = dtypes_kwargs
# Todo: Debug the function to have this case passing as well
assume("batch_sizes" not in kwargs)
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**kwargs,
)
# multi_head_attention_forward
@handle_frontend_test(
fn_tree="torch.nn.functional.multi_head_attention_forward",
dtype_mha_args=_mha_helper(same_pre_embed_dim=True, batch_second=True).filter(
lambda args: args[10] is not None
and (not args[22] or args[5] is not None)
and len(set(_get_embed_dim(*args[6:10], args[1]))) == 1
),
test_with_out=st.just(False),
)
def test_torch_multi_head_attention_forward(
*,
on_device,
fn_tree,
frontend,
test_flags,
dtype_mha_args,
backend_fw,
):
(
dtype,
q,
k,
v,
heads,
attn_mask,
in_proj_weight,
q_proj_weight,
k_proj_weight,
v_proj_weight,
out_proj_weight,
in_proj_bias,
out_proj_bias,
key_padding_mask,
bias_k,
bias_v,
static_k,
static_v,
_,
add_zero_attn,
dropout_p,
training,
is_causal,
need_weights,
average_attn_weights,
batch_first,
) = dtype_mha_args
if k is None and v is None:
k = v = q
# re-order the dtypes to match the order of the frontend arguments, not the order
# of ivy.multi_head_attention's arguments given by _mha_helper
kwargs = {
"query": q,
"key": k,
"value": v,
"embed_dim_to_check": q.shape[-1],
"num_heads": heads,
"in_proj_weight": in_proj_weight,
"in_proj_bias": in_proj_bias,
"bias_k": bias_k,
"bias_v": bias_v,
"add_zero_attn": add_zero_attn,
"dropout_p": dropout_p,
"out_proj_weight": out_proj_weight,
"out_proj_bias": out_proj_bias,
"training": training,
"key_padding_mask": key_padding_mask,
"need_weights": need_weights,
"attn_mask": attn_mask,
"use_separate_proj_weight": in_proj_weight is None,
"q_proj_weight": q_proj_weight,
"k_proj_weight": k_proj_weight,
"v_proj_weight": v_proj_weight,
"static_k": static_k,
"static_v": static_v,
"average_attn_weights": average_attn_weights,
"is_causal": is_causal,
}
helpers.test_frontend_function(
input_dtypes=[str(r.dtype) for r in kwargs.values() if ivy.is_array(r)],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
atol=1e-03,
on_device=on_device,
test_values=not training or dropout_p == 0.0,
**kwargs,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_layer_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_layer_functions.py",
"repo_id": "ivy",
"token_count": 4410
} | 56 |
"""Collection of tests for unified meta functions."""
# global
import pytest
import numpy as np
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
from ivy_tests.test_ivy.helpers.pipeline_helper import BackendHandler
# fomaml step overlapping vars
@handle_test(
fn_tree="functional.ivy.fomaml_step",
inner_grad_steps=helpers.ints(min_value=1, max_value=3),
with_outer_cost_fn=st.booleans(),
average_across_steps=st.booleans(),
batched=st.booleans(),
stop_gradients=st.booleans(),
num_tasks=helpers.ints(min_value=1, max_value=2),
return_inner_v=st.sampled_from(["first", "all", False]),
)
def test_fomaml_step_overlapping_vars(
on_device,
inner_grad_steps,
with_outer_cost_fn,
average_across_steps,
batched,
stop_gradients,
num_tasks,
return_inner_v,
backend_fw,
):
# Numpy does not support gradients, jax does not support gradients on custom
# nested classes
if backend_fw == "numpy":
pytest.skip()
with BackendHandler.update_backend(backend_fw) as ivy_backend:
# config
inner_learning_rate = 1e-2
variable_fn = ivy_backend.functional.ivy._variable
# create variables
if batched:
variables = ivy_backend.Container(
{
"latent": variable_fn(
ivy_backend.repeat(
ivy_backend.array([[0.0]], device=on_device),
num_tasks,
axis=0,
)
),
"weight": variable_fn(
ivy_backend.repeat(
ivy_backend.array([[1.0]], device=on_device),
num_tasks,
axis=0,
)
),
}
)
else:
variables = ivy_backend.Container(
{
"latent": variable_fn(ivy_backend.array([0.0], device=on_device)),
"weight": variable_fn(ivy_backend.array([1.0], device=on_device)),
}
)
# batch
batch = ivy_backend.Container(
{"x": ivy_backend.arange(1, num_tasks + 1, dtype="float32")}
)
# inner cost function
def inner_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost - (sub_batch_in["x"] * sub_v["latent"] * sub_v["weight"])[0]
return cost / batch_size
# outer cost function
def outer_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost + (sub_batch_in["x"] * sub_v["latent"] * sub_v["weight"])[0]
return cost / batch_size
# numpy
latent_np = ivy_backend.to_numpy(variables.latent[0:1])
weight_np = ivy_backend.to_numpy(variables.weight[0:1])
batch_np = batch.cont_map(lambda x, kc: ivy_backend.to_numpy(x))
# true gradient
all_outer_grads = []
for sub_batch in batch_np.cont_unstack_conts(0, True, num_tasks):
all_outer_grads.append(
[
(
-i * inner_learning_rate * weight_np * sub_batch["x"][0] ** 2
- sub_batch["x"][0] * latent_np
)
* (-1 if with_outer_cost_fn else 1)
for i in range(inner_grad_steps + 1)
]
)
if average_across_steps:
true_weight_grad = (
sum(sum(og) / len(og) for og in all_outer_grads) / num_tasks
)
else:
true_weight_grad = sum(og[-1] for og in all_outer_grads) / num_tasks
# true latent gradient
true_latent_grad = np.array(
[(-1 - (num_tasks - 1) / 2) * (-1 if with_outer_cost_fn else 1)]
)
# true cost
true_cost_dict = {
1: {
True: {True: {1: 0.005, 2: 0.0125}, False: {1: 0.01, 2: 0.025}},
False: {True: {1: -0.005, 2: -0.0125}, False: {1: -0.01, 2: -0.025}},
},
2: {
True: {True: {1: 0.01, 2: 0.025}, False: {1: 0.02, 2: 0.05}},
False: {True: {1: -0.01, 2: -0.025}, False: {1: -0.02, 2: -0.05}},
},
3: {
True: {True: {1: 0.015, 2: 0.0375}, False: {1: 0.03, 2: 0.075}},
False: {True: {1: -0.015, 2: -0.0375}, False: {1: -0.03, 2: -0.075}},
},
}
true_cost = true_cost_dict[inner_grad_steps][with_outer_cost_fn][
average_across_steps
][num_tasks]
# meta update
rets = ivy_backend.fomaml_step(
batch,
inner_cost_fn,
outer_cost_fn if with_outer_cost_fn else None,
variables,
inner_grad_steps,
inner_learning_rate,
average_across_steps=average_across_steps,
batched=batched,
inner_v="latent",
return_inner_v=return_inner_v,
stop_gradients=stop_gradients,
)
calc_cost = rets[0]
if stop_gradients:
assert ivy_backend.equal(
ivy_backend.functional.ivy._is_variable(calc_cost, exclusive=True),
False,
)
assert np.allclose(ivy_backend.to_scalar(calc_cost), true_cost)
outer_grads = rets[1]
assert np.allclose(
ivy_backend.to_numpy(outer_grads.weight[0]), np.array(true_weight_grad)
)
assert np.allclose(
ivy_backend.to_numpy(outer_grads.latent[0]), np.array(true_latent_grad)
)
if return_inner_v:
inner_v_rets = rets[2]
assert isinstance(inner_v_rets, ivy_backend.Container)
if return_inner_v == "all":
assert list(inner_v_rets.cont_shape) == [num_tasks, 1]
elif return_inner_v == "first":
assert list(inner_v_rets.cont_shape) == [1, 1]
# fomaml step shared vars
@handle_test(
fn_tree="functional.ivy.fomaml_step",
inner_grad_steps=helpers.ints(min_value=1, max_value=3),
with_outer_cost_fn=st.booleans(),
average_across_steps=st.booleans(),
batched=st.booleans(),
stop_gradients=st.booleans(),
num_tasks=helpers.ints(min_value=1, max_value=2),
return_inner_v=st.sampled_from(["first", "all", False]),
)
def test_fomaml_step_shared_vars(
on_device,
inner_grad_steps,
with_outer_cost_fn,
average_across_steps,
batched,
stop_gradients,
num_tasks,
return_inner_v,
backend_fw,
):
# Numpy does not support gradients, jax does not support gradients on custom
# nested classes
if backend_fw == "numpy":
return
with BackendHandler.update_backend(backend_fw) as ivy_backend:
# config
inner_learning_rate = 1e-2
variable_fn = ivy_backend.functional.ivy._variable
# create variable
if batched:
variables = ivy_backend.Container(
{
"latent": variable_fn(
ivy_backend.repeat(
ivy_backend.array([[1.0]], device=on_device),
num_tasks,
axis=0,
)
)
}
)
else:
variables = ivy_backend.Container(
{"latent": variable_fn(ivy_backend.array([1.0], device=on_device))}
)
# batch
batch = ivy_backend.Container(
{"x": ivy_backend.arange(1, num_tasks + 1, dtype="float32")}
)
# inner cost function
def inner_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost - (sub_batch_in["x"] * sub_v["latent"] ** 2)[0]
return cost / batch_size
# outer cost function
def outer_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost + (sub_batch_in["x"] * sub_v["latent"] ** 2)[0]
return cost / batch_size
# numpy
latent_np = ivy_backend.to_numpy(variables.latent[0:1])
batch_np = batch.cont_map(lambda x, kc: ivy_backend.to_numpy(x))
# loss grad function
def loss_grad_fn(sub_batch_in, w_in, outer=False):
return (
(1 if (with_outer_cost_fn and outer) else -1)
* 2
* sub_batch_in["x"][0]
* w_in
)
# true gradient
true_outer_grads = []
for sub_batch in batch_np.cont_unstack_conts(0, True, num_tasks):
ws = []
grads = []
ws.append(latent_np)
for step in range(inner_grad_steps):
update_grad = loss_grad_fn(sub_batch, ws[-1])
w = ws[-1] - inner_learning_rate * update_grad
if with_outer_cost_fn:
grads.append(loss_grad_fn(sub_batch, ws[-1], outer=True))
else:
grads.append(update_grad)
ws.append(w)
if with_outer_cost_fn:
grads.append(loss_grad_fn(sub_batch, ws[-1], outer=True))
else:
grads.append(loss_grad_fn(sub_batch, ws[-1]))
# true outer grad
if average_across_steps:
true_outer_grad = sum(grads) / len(grads)
else:
true_outer_grad = grads[-1]
true_outer_grads.append(true_outer_grad)
true_outer_grad = sum(true_outer_grads) / len(true_outer_grads)
# true cost
true_cost_dict = {
1: {
True: {True: {1: 1.0202, 2: 1.5509}, False: {1: 1.0404, 2: 1.6018}},
False: {
True: {1: -1.0202, 2: -1.5509},
False: {1: -1.0404, 2: -1.6018},
},
},
2: {
True: {
True: {1: 1.0409441, 2: 1.6042916},
False: {1: 1.0824323, 2: 1.7110746},
},
False: {
True: {1: -1.0409441, 2: -1.6042916},
False: {1: -1.0824323, 2: -1.7110746},
},
},
3: {
True: {
True: {1: 1.0622487, 2: 1.6603187},
False: {1: 1.1261624, 2: 1.8284001},
},
False: {
True: {1: -1.0622487, 2: -1.6603187},
False: {1: -1.1261624, 2: -1.8284001},
},
},
}
true_cost = true_cost_dict[inner_grad_steps][with_outer_cost_fn][
average_across_steps
][num_tasks]
# meta update
rets = ivy_backend.fomaml_step(
batch,
inner_cost_fn,
outer_cost_fn if with_outer_cost_fn else None,
variables,
inner_grad_steps,
inner_learning_rate,
average_across_steps=average_across_steps,
batched=batched,
return_inner_v=return_inner_v,
stop_gradients=stop_gradients,
)
calc_cost = rets[0]
if stop_gradients:
assert ivy_backend.equal(
ivy_backend.functional.ivy._is_variable(calc_cost, exclusive=True),
False,
)
assert np.allclose(ivy_backend.to_scalar(calc_cost), true_cost)
outer_grads = rets[1]
assert np.allclose(
ivy_backend.to_numpy(outer_grads.latent[0]), np.array(true_outer_grad)
)
if return_inner_v:
inner_v_rets = rets[2]
assert isinstance(inner_v_rets, ivy_backend.Container)
if return_inner_v == "all":
assert list(inner_v_rets.cont_shape) == [num_tasks, 1]
elif return_inner_v == "first":
assert list(inner_v_rets.cont_shape) == [1, 1]
# ToDo: replace dict checks for verifying costs with analytic calculations
# First Order #
# ------------#
# fomaml step unique vars
@handle_test(
fn_tree="functional.ivy.fomaml_step",
inner_grad_steps=helpers.ints(min_value=1, max_value=3),
with_outer_cost_fn=st.booleans(),
average_across_steps=st.booleans(),
batched=st.booleans(),
stop_gradients=st.booleans(),
num_tasks=helpers.ints(min_value=1, max_value=2),
return_inner_v=st.sampled_from(["first", "all", False]),
)
def test_fomaml_step_unique_vars(
on_device,
inner_grad_steps,
with_outer_cost_fn,
average_across_steps,
batched,
stop_gradients,
num_tasks,
return_inner_v,
backend_fw,
):
# Numpy does not support gradients, and jax does not support gradients on
# custom nested classes
if backend_fw == "numpy":
return
with BackendHandler.update_backend(backend_fw) as ivy_backend:
# config
inner_learning_rate = 1e-2
variable_fn = ivy_backend.functional.ivy._variable
# create variables
if batched:
variables = ivy_backend.Container(
{
"latent": variable_fn(
ivy_backend.repeat(
ivy_backend.array([[0.0]], device=on_device),
num_tasks,
axis=0,
)
),
"weight": variable_fn(
ivy_backend.repeat(
ivy_backend.array([[1.0]], device=on_device),
num_tasks,
axis=0,
)
),
}
)
else:
variables = ivy_backend.Container(
{
"latent": variable_fn(ivy_backend.array([0.0], device=on_device)),
"weight": variable_fn(ivy_backend.array([1.0], device=on_device)),
}
)
# batch
batch = ivy_backend.Container(
{"x": ivy_backend.arange(1, num_tasks + 1, dtype="float32")}
)
# inner cost function
def inner_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost - (sub_v["latent"] * sub_batch_in["x"] * sub_v["weight"])[0]
return cost / batch_size
# outer cost function
def outer_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost + (sub_v["latent"] * sub_batch_in["x"] * sub_v["weight"])[0]
return cost / batch_size
# numpy
weight_np = ivy_backend.to_numpy(variables.weight[0:1])
latent_np = ivy_backend.to_numpy(variables.latent[0:1])
batch_np = batch.cont_map(lambda x, kc: ivy_backend.to_numpy(x))
# true gradient
all_outer_grads = []
for sub_batch in batch_np.cont_unstack_conts(0, True, num_tasks):
all_outer_grads.append(
[
(
-i * inner_learning_rate * weight_np * sub_batch["x"][0] ** 2
- sub_batch["x"][0] * latent_np
)
* (-1 if with_outer_cost_fn else 1)
for i in range(inner_grad_steps + 1)
]
)
if average_across_steps:
true_weight_grad = (
sum(sum(og) / len(og) for og in all_outer_grads) / num_tasks
)
else:
true_weight_grad = sum(og[-1] for og in all_outer_grads) / num_tasks
# true cost
true_cost_dict = {
1: {
True: {True: {1: 0.005, 2: 0.0125}, False: {1: 0.01, 2: 0.025}},
False: {True: {1: -0.005, 2: -0.0125}, False: {1: -0.01, 2: -0.025}},
},
2: {
True: {True: {1: 0.01, 2: 0.025}, False: {1: 0.02, 2: 0.05}},
False: {True: {1: -0.01, 2: -0.025}, False: {1: -0.02, 2: -0.05}},
},
3: {
True: {True: {1: 0.015, 2: 0.0375}, False: {1: 0.03, 2: 0.075}},
False: {True: {1: -0.015, 2: -0.0375}, False: {1: -0.03, 2: -0.075}},
},
}
true_cost = true_cost_dict[inner_grad_steps][with_outer_cost_fn][
average_across_steps
][num_tasks]
# meta update
rets = ivy_backend.fomaml_step(
batch,
inner_cost_fn,
outer_cost_fn if with_outer_cost_fn else None,
variables,
inner_grad_steps,
inner_learning_rate,
average_across_steps=average_across_steps,
batched=batched,
inner_v="latent",
outer_v="weight",
return_inner_v=return_inner_v,
stop_gradients=stop_gradients,
)
calc_cost = rets[0]
if stop_gradients:
assert ivy_backend.equal(
ivy_backend.functional.ivy._is_variable(calc_cost, exclusive=True),
False,
)
assert np.allclose(ivy_backend.to_scalar(calc_cost), true_cost)
outer_grads = rets[1]
assert np.allclose(
ivy_backend.to_numpy(outer_grads.weight[0]), np.array(true_weight_grad)
)
if return_inner_v:
inner_v_rets = rets[2]
assert isinstance(inner_v_rets, ivy_backend.Container)
if return_inner_v == "all":
assert list(inner_v_rets.cont_shape) == [num_tasks, 1]
elif return_inner_v == "first":
assert list(inner_v_rets.cont_shape) == [1, 1]
# maml step overlapping vars
@pytest.mark.parametrize("inner_grad_steps", [1, 2, 3])
@pytest.mark.parametrize("with_outer_cost_fn", [True, False])
@pytest.mark.parametrize("average_across_steps", [True, False])
@pytest.mark.parametrize("batched", [True, False])
@pytest.mark.parametrize("stop_gradients", [True, False])
@pytest.mark.parametrize("num_tasks", [1, 2])
@pytest.mark.parametrize("return_inner_v", ["first", "all", False])
def test_maml_step_overlapping_vars(
on_device,
inner_grad_steps,
with_outer_cost_fn,
average_across_steps,
batched,
stop_gradients,
num_tasks,
return_inner_v,
backend_fw,
):
if backend_fw in ["numpy", "tensorflow"]:
# ToDo: work out why MAML does not work for tensorflow
# Numpy does not support gradients, jax does not support gradients on custom
# nested classes
pytest.skip()
with BackendHandler.update_backend(backend_fw) as ivy_backend:
# config
inner_learning_rate = 1e-2
variable_fn = ivy_backend.functional.ivy._variable
# create variables
if batched:
variables = ivy_backend.Container(
{
"latent": variable_fn(
ivy_backend.repeat(
ivy_backend.array([[0.0]], device=on_device),
num_tasks,
axis=0,
)
),
"weight": variable_fn(
ivy_backend.repeat(
ivy_backend.array([[1.0]], device=on_device),
num_tasks,
axis=0,
)
),
}
)
else:
variables = ivy_backend.Container(
{
"latent": variable_fn(ivy_backend.array([0.0], device=on_device)),
"weight": variable_fn(ivy_backend.array([1.0], device=on_device)),
}
)
# batch
batch = ivy_backend.Container(
{"x": ivy_backend.arange(1, num_tasks + 1, dtype="float32")}
)
# inner cost function
def inner_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost - (sub_batch_in["x"] * sub_v["latent"] * sub_v["weight"])[0]
return cost / batch_size
# outer cost function
def outer_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost + (sub_batch_in["x"] * sub_v["latent"] * sub_v["weight"])[0]
return cost / batch_size
# numpy
latent_np = ivy_backend.to_numpy(variables.latent)
weight_np = ivy_backend.to_numpy(variables.weight)
batch_np = batch.cont_map(lambda x, kc: ivy_backend.to_numpy(x))
# true weight gradient
all_outer_grads = []
for sub_batch in batch_np.cont_unstack_conts(0, True, num_tasks):
all_outer_grads.append(
[
(
-2
* i
* inner_learning_rate
* weight_np
* sub_batch["x"][0] ** 2
- sub_batch["x"][0] * latent_np
)
* (-1 if with_outer_cost_fn else 1)
for i in range(inner_grad_steps + 1)
]
)
if average_across_steps:
true_weight_grad = (
sum(sum(og) / len(og) for og in all_outer_grads) / num_tasks
)
else:
true_weight_grad = sum(og[-1] for og in all_outer_grads) / num_tasks
# true latent gradient
true_latent_grad = np.array(
[(-1 - (num_tasks - 1) / 2) * (-1 if with_outer_cost_fn else 1)]
)
# true cost
true_cost_dict = {
1: {
True: {True: {1: 0.005, 2: 0.0125}, False: {1: 0.01, 2: 0.025}},
False: {True: {1: -0.005, 2: -0.0125}, False: {1: -0.01, 2: -0.025}},
},
2: {
True: {True: {1: 0.01, 2: 0.025}, False: {1: 0.02, 2: 0.05}},
False: {True: {1: -0.01, 2: -0.025}, False: {1: -0.02, 2: -0.05}},
},
3: {
True: {True: {1: 0.015, 2: 0.0375}, False: {1: 0.03, 2: 0.075}},
False: {True: {1: -0.015, 2: -0.0375}, False: {1: -0.03, 2: -0.075}},
},
}
true_cost = true_cost_dict[inner_grad_steps][with_outer_cost_fn][
average_across_steps
][num_tasks]
# meta update
rets = ivy_backend.maml_step(
batch,
inner_cost_fn,
outer_cost_fn if with_outer_cost_fn else None,
variables,
inner_grad_steps,
inner_learning_rate,
average_across_steps=average_across_steps,
batched=batched,
inner_v="latent",
return_inner_v=return_inner_v,
stop_gradients=stop_gradients,
)
calc_cost = rets[0]
if stop_gradients:
assert ivy_backend.equal(
ivy_backend.functional.ivy._is_variable(calc_cost, exclusive=True),
False,
)
assert np.allclose(ivy_backend.to_scalar(calc_cost), true_cost)
outer_grads = rets[1]
assert np.allclose(
ivy_backend.to_numpy(outer_grads.weight), np.array(true_weight_grad)
)
assert np.allclose(
ivy_backend.to_numpy(outer_grads.latent), np.array(true_latent_grad)
)
if return_inner_v:
inner_v_rets = rets[2]
assert isinstance(inner_v_rets, ivy_backend.Container)
if return_inner_v == "all":
assert list(inner_v_rets.cont_shape) == [num_tasks, 1]
elif return_inner_v == "first":
assert list(inner_v_rets.cont_shape) == [1, 1]
# maml step shared vars
@pytest.mark.parametrize("inner_grad_steps", [1, 2, 3])
@pytest.mark.parametrize("with_outer_cost_fn", [True, False])
@pytest.mark.parametrize("average_across_steps", [True, False])
@pytest.mark.parametrize("batched", [True, False])
@pytest.mark.parametrize("stop_gradients", [True, False])
@pytest.mark.parametrize("num_tasks", [1, 2])
@pytest.mark.parametrize("return_inner_v", ["first", "all", False])
def test_maml_step_shared_vars(
on_device,
inner_grad_steps,
with_outer_cost_fn,
average_across_steps,
batched,
stop_gradients,
num_tasks,
return_inner_v,
backend_fw,
):
if backend_fw in ["numpy", "tensorflow"]:
# ToDo: work out why MAML does not work for tensorflow
# Numpy does not support gradients, jax does not support gradients on custom
# nested classes
pytest.skip()
with BackendHandler.update_backend(backend_fw) as ivy_backend:
# config
inner_learning_rate = 1e-2
variable_fn = ivy_backend.functional.ivy._variable
# create variable
if batched:
variables = ivy_backend.Container(
{
"latent": variable_fn(
ivy_backend.repeat(
ivy_backend.array([[1.0]], device=on_device),
num_tasks,
axis=0,
)
)
}
)
else:
variables = ivy_backend.Container(
{"latent": variable_fn(ivy_backend.array([1.0], device=on_device))}
)
# batch
batch = ivy_backend.Container(
{"x": ivy_backend.arange(1, num_tasks + 1, dtype="float32")}
)
# inner cost function
def inner_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost - (sub_batch_in["x"] * sub_v["latent"] ** 2)[0]
return cost / batch_size
# outer cost function
def outer_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost + (sub_batch_in["x"] * sub_v["latent"] ** 2)[0]
return cost / batch_size
# numpy
variables_np = variables.cont_map(lambda x, kc: ivy_backend.to_ivy(x))
batch_np = batch.cont_map(lambda x, kc: ivy_backend.to_ivy(x))
# loss grad function
def loss_grad_fn(sub_batch_in, w_in, outer=False):
return (
(1 if (with_outer_cost_fn and outer) else -1)
* 2
* sub_batch_in["x"][0]
* w_in
)
# update grad function
def update_grad_fn(w_init, sub_batch_in, num_steps, average=False):
terms = [0] * num_steps + [1]
collection_of_terms = [terms]
for s in range(num_steps):
rhs = [t * 2 * sub_batch_in["x"][0] for t in terms]
rhs.pop(0)
rhs.append(0)
terms = [t + rh for t, rh in zip(terms, rhs)]
collection_of_terms.append([t for t in terms])
if average:
return [
(
sum(
t * inner_learning_rate ** (num_steps - i)
for i, t in enumerate(tms)
)
* w_init.latent
)
for tms in collection_of_terms
]
return (
sum(
t * inner_learning_rate ** (num_steps - i)
for i, t in enumerate(terms)
)
* w_init.latent
)
# true gradient
true_outer_grads = []
for sub_batch in batch_np.cont_unstack_conts(0, True, num_tasks):
ws = []
grads = []
ws.append(variables_np)
for step in range(inner_grad_steps):
update_grad = loss_grad_fn(sub_batch, ws[-1])
w = ws[-1] - inner_learning_rate * update_grad
if with_outer_cost_fn:
grads.append(loss_grad_fn(sub_batch, ws[-1], outer=True))
else:
grads.append(update_grad)
ws.append(w)
if with_outer_cost_fn:
grads.append(loss_grad_fn(sub_batch, ws[-1], outer=True))
else:
grads.append(loss_grad_fn(sub_batch, ws[-1]))
# true outer grad
if average_across_steps:
true_outer_grad = sum(
ig.latent * ug
for ig, ug in zip(
grads,
update_grad_fn(
variables_np,
sub_batch,
inner_grad_steps,
average=True,
),
)
) / len(grads)
else:
true_outer_grad = ivy_backend.multiply(
update_grad_fn(variables_np, sub_batch, inner_grad_steps),
grads[-1].latent,
)
true_outer_grads.append(true_outer_grad)
true_outer_grad = sum(true_outer_grads) / len(true_outer_grads)
# true cost
true_cost_dict = {
1: {
True: {True: {1: 1.0202, 2: 1.5509}, False: {1: 1.0404, 2: 1.6018}},
False: {
True: {1: -1.0202, 2: -1.5509},
False: {1: -1.0404, 2: -1.6018},
},
},
2: {
True: {
True: {1: 1.0409441, 2: 1.6042916},
False: {1: 1.0824323, 2: 1.7110746},
},
False: {
True: {1: -1.0409441, 2: -1.6042916},
False: {1: -1.0824323, 2: -1.7110746},
},
},
3: {
True: {
True: {1: 1.0622487, 2: 1.6603187},
False: {1: 1.1261624, 2: 1.8284001},
},
False: {
True: {1: -1.0622487, 2: -1.6603187},
False: {1: -1.1261624, 2: -1.8284001},
},
},
}
true_cost = true_cost_dict[inner_grad_steps][with_outer_cost_fn][
average_across_steps
][num_tasks]
# meta update
rets = ivy_backend.maml_step(
batch,
inner_cost_fn,
outer_cost_fn if with_outer_cost_fn else None,
variables,
inner_grad_steps,
inner_learning_rate,
average_across_steps=average_across_steps,
batched=batched,
return_inner_v=return_inner_v,
stop_gradients=stop_gradients,
)
calc_cost = rets[0]
if stop_gradients:
assert ivy_backend.equal(
ivy_backend.functional.ivy._is_variable(calc_cost, exclusive=True),
False,
)
assert np.allclose(ivy_backend.to_scalar(calc_cost), true_cost)
outer_grads = rets[1]
assert np.allclose(
ivy_backend.to_numpy(outer_grads.latent),
ivy_backend.to_numpy(true_outer_grad[0]),
rtol=1e-1,
atol=1e-1,
)
if return_inner_v:
inner_v_rets = rets[2]
assert isinstance(inner_v_rets, ivy_backend.Container)
if return_inner_v == "all":
assert list(inner_v_rets.cont_shape) == [num_tasks, 1]
elif return_inner_v == "first":
assert list(inner_v_rets.cont_shape) == [1, 1]
# Second Order #
# -------------#
# maml step unique vars
@pytest.mark.parametrize("inner_grad_steps", [1, 2, 3])
@pytest.mark.parametrize("with_outer_cost_fn", [True, False])
@pytest.mark.parametrize("average_across_steps", [True, False])
@pytest.mark.parametrize("batched", [True, False])
@pytest.mark.parametrize("stop_gradients", [True, False])
@pytest.mark.parametrize("num_tasks", [1, 2])
@pytest.mark.parametrize("return_inner_v", ["first", "all", False])
def test_maml_step_unique_vars(
on_device,
inner_grad_steps,
with_outer_cost_fn,
average_across_steps,
batched,
stop_gradients,
num_tasks,
return_inner_v,
backend_fw,
):
if backend_fw in ["numpy", "tensorflow"]:
# ToDo: work out why MAML does not work for tensorflow
# Numpy does not support gradients, jax does not support gradients on custom
# nested classes
pytest.skip()
with BackendHandler.update_backend(backend_fw) as ivy_backend:
# config
inner_learning_rate = 1e-2
variable_fn = ivy_backend.functional.ivy._variable
# create variables
if batched:
variables = ivy_backend.Container(
{
"latent": variable_fn(
ivy_backend.repeat(
ivy_backend.array([[0.0]], device=on_device),
num_tasks,
axis=0,
)
),
"weight": variable_fn(
ivy_backend.repeat(
ivy_backend.array([[1.0]], device=on_device),
num_tasks,
axis=0,
)
),
}
)
else:
variables = ivy_backend.Container(
{
"latent": variable_fn(ivy_backend.array([0.0], device=on_device)),
"weight": variable_fn(ivy_backend.array([1.0], device=on_device)),
}
)
# batch
batch = ivy_backend.Container(
{"x": ivy_backend.arange(1, num_tasks + 1, dtype="float32")}
)
# inner cost function
def inner_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost - (sub_batch_in["x"] * sub_v["latent"] * sub_v["weight"])[0]
return cost / batch_size
# outer cost function
def outer_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost + (sub_batch_in["x"] * sub_v["latent"] * sub_v["weight"])[0]
return cost / batch_size
# numpy
weight_np = ivy_backend.to_numpy(variables.weight[0:1])
latent_np = ivy_backend.to_numpy(variables.latent[0:1])
batch_np = batch.cont_map(lambda x, kc: ivy_backend.to_numpy(x))
# true gradient
all_outer_grads = []
for sub_batch in batch_np.cont_unstack_conts(0, True, num_tasks):
all_outer_grads.append(
[
(
-2
* i
* inner_learning_rate
* weight_np
* sub_batch["x"][0] ** 2
- sub_batch["x"][0] * latent_np
)
* (-1 if with_outer_cost_fn else 1)
for i in range(inner_grad_steps + 1)
]
)
if average_across_steps:
true_outer_grad = (
sum(sum(og) / len(og) for og in all_outer_grads) / num_tasks
)
else:
true_outer_grad = sum(og[-1] for og in all_outer_grads) / num_tasks
# true cost
true_cost_dict = {
1: {
True: {True: {1: 0.005, 2: 0.0125}, False: {1: 0.01, 2: 0.025}},
False: {True: {1: -0.005, 2: -0.0125}, False: {1: -0.01, 2: -0.025}},
},
2: {
True: {True: {1: 0.01, 2: 0.025}, False: {1: 0.02, 2: 0.05}},
False: {True: {1: -0.01, 2: -0.025}, False: {1: -0.02, 2: -0.05}},
},
3: {
True: {True: {1: 0.015, 2: 0.0375}, False: {1: 0.03, 2: 0.075}},
False: {True: {1: -0.015, 2: -0.0375}, False: {1: -0.03, 2: -0.075}},
},
}
true_cost = true_cost_dict[inner_grad_steps][with_outer_cost_fn][
average_across_steps
][num_tasks]
# meta update
rets = ivy_backend.maml_step(
batch,
inner_cost_fn,
outer_cost_fn if with_outer_cost_fn else None,
variables,
inner_grad_steps,
inner_learning_rate,
average_across_steps=average_across_steps,
batched=batched,
inner_v="latent",
outer_v="weight",
return_inner_v=return_inner_v,
stop_gradients=stop_gradients,
)
calc_cost = rets[0]
if stop_gradients:
assert ivy_backend.equal(
ivy_backend.functional.ivy._is_variable(calc_cost, exclusive=True),
False,
)
assert np.allclose(ivy_backend.to_scalar(calc_cost), true_cost)
outer_grads = rets[1]
assert np.allclose(
ivy_backend.to_numpy(outer_grads.weight), np.array(true_outer_grad)
)
if return_inner_v:
inner_v_rets = rets[2]
assert isinstance(inner_v_rets, ivy_backend.Container)
if return_inner_v == "all":
assert list(inner_v_rets.cont_shape) == [num_tasks, 1]
elif return_inner_v == "first":
assert list(inner_v_rets.cont_shape) == [1, 1]
# reptile step
@pytest.mark.parametrize("inner_grad_steps", [1, 2, 3])
@pytest.mark.parametrize("batched", [True, False])
@pytest.mark.parametrize("stop_gradients", [True, False])
@pytest.mark.parametrize("num_tasks", [1, 2])
@pytest.mark.parametrize("return_inner_v", ["first", "all", False])
def test_reptile_step(
on_device,
inner_grad_steps,
batched,
stop_gradients,
num_tasks,
return_inner_v,
backend_fw,
):
if backend_fw == "numpy":
# Numpy does not support gradients, jax does not support gradients on custom
# nested classes,
pytest.skip()
with BackendHandler.update_backend(backend_fw) as ivy_backend:
# config
inner_learning_rate = 1e-2
variable_fn = ivy_backend.functional.ivy._variable
# create variable
if batched:
variables = ivy_backend.Container(
{
"latent": variable_fn(
ivy_backend.repeat(
ivy_backend.array([[1.0]], device=on_device),
num_tasks,
axis=0,
)
)
}
)
else:
variables = ivy_backend.Container(
{"latent": variable_fn(ivy_backend.array([1.0], device=on_device))}
)
# batch
batch = ivy_backend.Container(
{"x": ivy_backend.arange(1, num_tasks + 1, dtype="float32")}
)
# inner cost function
def inner_cost_fn(batch_in, v):
cost = 0
batch_size = batch_in.cont_shape[0]
for sub_batch_in, sub_v in zip(
batch_in.cont_unstack_conts(0, keepdims=True),
v.cont_unstack_conts(0, keepdims=True),
):
cost = cost - (sub_batch_in["x"] * sub_v["latent"] ** 2)[0]
return cost / batch_size
# numpy
latent_np = ivy_backend.to_numpy(variables.latent[0:1])
batch_np = batch.cont_map(lambda x, kc: ivy_backend.to_numpy(x))
# loss grad function
def loss_grad_fn(sub_batch_in, w_in):
return -2 * sub_batch_in["x"][0] * w_in
# true gradient
true_outer_grads = []
for sub_batch in batch_np.cont_unstack_conts(0, True, num_tasks):
ws = []
grads = []
ws.append(latent_np)
for step in range(inner_grad_steps):
update_grad = loss_grad_fn(sub_batch, ws[-1])
w = ws[-1] - inner_learning_rate * update_grad
grads.append(update_grad)
ws.append(w)
grads.append(loss_grad_fn(sub_batch, ws[-1]))
# true outer grad
true_outer_grad = sum(grads) / len(grads)
true_outer_grads.append(true_outer_grad)
true_outer_grad = (
sum(true_outer_grads) / len(true_outer_grads)
) / inner_learning_rate
# true cost
true_cost_dict = {
1: {1: -1.0202, 2: -1.5509},
2: {1: -1.0409441, 2: -1.6042916},
3: {1: -1.0622487, 2: -1.6603187},
}
true_cost = true_cost_dict[inner_grad_steps][num_tasks]
# meta update
rets = ivy_backend.reptile_step(
batch,
inner_cost_fn,
variables,
inner_grad_steps,
inner_learning_rate,
batched=batched,
return_inner_v=return_inner_v,
stop_gradients=stop_gradients,
)
calc_cost = rets[0]
if stop_gradients:
assert ivy_backend.equal(
ivy_backend.functional.ivy._is_variable(calc_cost, exclusive=True),
False,
)
assert np.allclose(ivy_backend.to_scalar(calc_cost), true_cost)
outer_grads = rets[1]
assert np.allclose(
ivy_backend.to_numpy(outer_grads.latent[0]), np.array(true_outer_grad)
)
if return_inner_v:
inner_v_rets = rets[2]
assert isinstance(inner_v_rets, ivy_backend.Container)
if return_inner_v == "all":
assert list(inner_v_rets.cont_shape) == [num_tasks, 1]
elif return_inner_v == "first":
assert list(inner_v_rets.cont_shape) == [1, 1]
| ivy/ivy_tests/test_ivy/test_functional/test_core/test_meta.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_meta.py",
"repo_id": "ivy",
"token_count": 25578
} | 57 |
# global
from hypothesis import strategies as st, assume
import hypothesis.extra.numpy as nph
import numpy as np
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers.hypothesis_helpers.general_helpers import sizes_
from ivy_tests.test_ivy.helpers import handle_test, create_concatenable_arrays_dtypes
from ivy.functional.ivy.experimental.manipulation import _check_bounds
from ivy_tests.test_ivy.test_functional.test_core.test_manipulation import _get_splits
# --- Helpers --- #
# --------------- #
@st.composite
def _as_strided_helper(draw):
dtype, x = draw(helpers.dtype_and_values(min_num_dims=1, max_num_dims=5))
x = x[0]
itemsize = x.itemsize
shape = draw(helpers.get_shape(min_num_dims=1, max_num_dims=5))
new_ndim = len(shape)
strides = draw(
st.lists(
st.integers(min_value=1, max_value=16),
min_size=new_ndim,
max_size=new_ndim,
).filter(lambda x: all(x[i] % itemsize == 0 for i in range(new_ndim)))
)
assume(_check_bounds(x.shape, shape, strides, itemsize))
return dtype, x, shape, strides
@st.composite
def _associative_scan_helper(draw):
input_dtype = draw(
st.shared(
st.sampled_from(draw(helpers.get_dtypes("float"))),
key="shared_dtype",
).filter(lambda _x: "float16" not in _x)
)
random_size = draw(
st.shared(helpers.ints(min_value=1, max_value=5), key="shared_size")
)
shared_size = draw(
st.shared(helpers.ints(min_value=1, max_value=5), key="shared_size")
)
shape = (random_size, shared_size, shared_size)
matrix = draw(
helpers.array_values(
dtype=input_dtype,
shape=shape,
min_value=1,
max_value=10,
)
)
axis = draw(
helpers.get_axis(
shape=shape,
allow_neg=False,
force_int=True,
).filter(lambda _x: _x < len(shape) - 2)
)
return [input_dtype], matrix, axis
@st.composite
def _concat_from_sequence_helper(draw):
dtypes, arrays, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=helpers.ints(min_value=1, max_value=6),
ret_shape=True,
min_num_dims=2,
min_dim_size=2,
shared_dtype=True,
)
)
axis = draw(
helpers.get_axis(
shape=shape,
force_int=True,
)
)
return dtypes, arrays, axis
@st.composite
def _flatten_data_helper(draw):
mixed_fn_compos = draw(st.booleans())
is_torch_backend = ivy.current_backend_str() == "torch"
dtype_and_x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"valid", mixed_fn_compos=mixed_fn_compos
),
shape=st.shared(helpers.get_shape(), key="flatten_shape"),
)
)
axes = draw(
helpers.get_axis(
shape=st.shared(helpers.get_shape(), key="flatten_shape"),
min_size=2,
max_size=2,
unique=False,
force_tuple=True,
)
)
order = draw(st.sampled_from(["C", "F"]))
if not mixed_fn_compos and is_torch_backend:
order = "C"
return dtype_and_x, axes, order
@st.composite
def _fold_data(draw):
shape = draw(
helpers.get_shape(
min_num_dims=2, max_num_dims=5, min_dim_size=2, max_dim_size=3
)
)
mode = draw(helpers.ints(min_value=0, max_value=len(shape) - 1))
reduced_dims = int(ivy.prod(shape[0:mode]) * ivy.prod(shape[mode + 1 :]))
unfolded_shape = (shape[mode], reduced_dims)
dtype, input = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), shape=unfolded_shape
)
)
return dtype, input, shape, mode
@st.composite
def _get_dtype_values_k_axes_for_rot90(
draw,
available_dtypes,
min_value=None,
max_value=None,
allow_inf=False,
exclude_min=False,
exclude_max=False,
min_num_dims=1,
max_num_dims=10,
min_dim_size=1,
max_dim_size=10,
):
shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
k = draw(helpers.ints(min_value=-4, max_value=4))
axes = draw(
st.lists(
helpers.ints(min_value=-len(shape), max_value=len(shape) - 1),
min_size=2,
max_size=2,
unique=True,
).filter(lambda axes: abs(axes[0] - axes[1]) != len(shape))
)
dtype = draw(st.sampled_from(draw(available_dtypes)))
values = draw(
helpers.array_values(
dtype=dtype,
shape=shape,
min_value=min_value,
max_value=max_value,
allow_inf=allow_inf,
exclude_min=exclude_min,
exclude_max=exclude_max,
large_abs_safety_factor=72,
small_abs_safety_factor=72,
safety_factor_scale="log",
)
)
return [dtype], values, k, axes
@st.composite
def _matricize_data(draw):
input_dtype, input, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
ret_shape=True,
min_num_dims=2,
max_num_dims=5,
)
)
ndims = len(shape)
dims = {*range(ndims)}
row_modes = set(
draw(st.lists(helpers.ints(min_value=0, max_value=ndims - 1), min_size=1))
)
col_modes = dims - row_modes
return input_dtype, input, row_modes, col_modes
@st.composite
def _pad_helper(draw):
mode = draw(
st.sampled_from(
[
"constant",
"dilated",
"edge",
"linear_ramp",
"maximum",
"mean",
"median",
"minimum",
"reflect",
"symmetric",
"wrap",
]
)
)
if mode in ["median", "minimum", "maximum", "linear_ramp"]:
dtypes = "float"
else:
dtypes = "numeric"
dtype, input, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(dtypes),
ret_shape=True,
min_num_dims=1,
min_value=-100,
max_value=100,
).filter(lambda x: x[0][0] not in ["float16", "bfloat16"])
)
ndim = len(shape)
min_dim = min(shape)
if mode == "dilated":
pad_width = draw(
st.lists(
st.tuples(
st.integers(min_value=-min_dim, max_value=min_dim),
st.integers(min_value=-min_dim, max_value=min_dim),
st.integers(min_value=0, max_value=min_dim),
),
min_size=ndim,
max_size=ndim,
).filter(
lambda x: all(shape[i] + x[i][0] + x[i][1] >= 0 for i in range(ndim))
)
)
constant_values = draw(helpers.number(min_value=0, max_value=100))
else:
pad_width = draw(_st_tuples_or_int(ndim))
constant_values = draw(_st_tuples_or_int(ndim))
stat_length = draw(_st_tuples_or_int(ndim, min_val=2))
end_values = draw(_st_tuples_or_int(ndim))
return dtype, input[0], pad_width, stat_length, constant_values, end_values, mode
@st.composite
def _partial_fold_data(draw):
shape = draw(
helpers.get_shape(
min_num_dims=2, max_num_dims=5, min_dim_size=2, max_dim_size=3
)
)
ndims = len(shape)
mode_and_skip_begin = draw(
st.lists(
helpers.ints(min_value=0, max_value=ndims - 1), min_size=2, max_size=2
).filter(lambda nums: np.sum(nums) <= ndims - 1)
)
skip_begin, mode = sorted(mode_and_skip_begin)
skip_end = draw(
helpers.ints(min_value=0, max_value=ndims - (skip_begin + mode) - 1)
)
if skip_end != 0:
reduced_dims = int(
ivy.prod(shape[skip_begin : skip_begin + mode])
* ivy.prod(shape[skip_begin + mode + 1 : -skip_end])
)
unfolded_shape = (
*shape[:skip_begin],
shape[skip_begin + mode],
reduced_dims,
*shape[-skip_end:],
)
else:
reduced_dims = int(
ivy.prod(shape[skip_begin : skip_begin + mode])
* ivy.prod(shape[skip_begin + mode + 1 :])
)
unfolded_shape = (*shape[:skip_begin], shape[skip_begin + mode], reduced_dims)
dtype, input = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), shape=unfolded_shape
)
)
return dtype, input, skip_begin, shape, mode
@st.composite
def _partial_tensor_to_vec_data(draw):
input_dtype, input, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, ret_shape=True
)
)
ndims = len(shape)
skip_begin = draw(helpers.ints(min_value=0, max_value=ndims - 1))
skip_end = draw(helpers.ints(min_value=0, max_value=ndims - 1 - skip_begin))
return input_dtype, input, skip_begin, skip_end
@st.composite
def _partial_unfold_data(draw):
dtype, input = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
)
)
ndims = len(input[0].shape)
mode_and_skip_begin = draw(
st.lists(
helpers.ints(min_value=0, max_value=ndims - 1), min_size=2, max_size=2
).filter(lambda nums: np.sum(nums) <= ndims - 1)
)
skip_begin, mode = sorted(mode_and_skip_begin)
skip_end = draw(
helpers.ints(min_value=0, max_value=ndims - (skip_begin + mode) - 1)
)
ravel_tensors = draw(st.booleans())
return dtype, input, mode, skip_begin, skip_end, ravel_tensors
@st.composite
def _partial_vec_to_tensor(draw):
shape = draw(helpers.get_shape(min_num_dims=1, max_num_dims=5))
numel = int(ivy.prod(shape))
input_dtype, input = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), shape=(numel,)
)
)
ndims = len(shape)
skip_begin = draw(helpers.ints(min_value=0, max_value=ndims - 1))
return input_dtype, input, shape, skip_begin
@st.composite
def _soft_thresholding_data(draw):
x_min, x_max = 1e-2, 1e2
x_dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
ret_shape=True,
min_value=x_min,
max_value=x_max,
)
)
threshold_choice_1 = draw(helpers.floats(min_value=x_min, max_value=x_max))
t_dtype, threshold_choice_2 = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=shape,
min_value=x_min,
max_value=x_max,
)
)
threshold = draw(st.sampled_from([threshold_choice_1, threshold_choice_2]))
return x_dtype + t_dtype, x, threshold
@st.composite
def _st_col_row_stack_arrays(draw, stack_dim):
ndim = draw(st.integers(min_value=2, max_value=5))
dtype = draw(st.sampled_from(draw(helpers.get_dtypes("valid"))))
arrays, dtypes = draw(
create_concatenable_arrays_dtypes(
min_num_dims=ndim,
max_num_dims=ndim,
min_num_arrays=1,
max_num_arrays=3,
concat_dim=stack_dim,
dtypes=[dtype],
)
)
if ndim == 2:
non_stack_dim_len = arrays[0].shape[1 - stack_dim]
add_1D = draw(st.booleans())
if add_1D:
arrays_1D, dtypes_1D = draw(
create_concatenable_arrays_dtypes(
min_num_dims=None,
max_num_dims=None,
min_num_arrays=1,
max_num_arrays=2,
concat_dim=None,
dtypes=[dtype],
common_shape=[non_stack_dim_len],
)
)
arrays += arrays_1D
dtypes += dtypes_1D
if non_stack_dim_len == 1:
add_0D = draw(st.booleans())
if add_0D:
arrays_0D, dtypes_0D = draw(
create_concatenable_arrays_dtypes(
min_num_dims=0,
max_num_dims=0,
min_num_arrays=1,
max_num_arrays=2,
concat_dim=None,
dtypes=[dtype],
)
)
arrays += arrays_0D
dtypes += dtypes_0D
arrays_dtypes = draw(st.permutations(list(zip(arrays, dtypes))))
arrays, dtypes = list(zip(*arrays_dtypes))
return list(arrays), list(dtypes)
def _st_tuples_or_int(n_pairs, min_val=0):
return st.one_of(
st_tuples(
st.tuples(
st.integers(min_value=min_val, max_value=4),
st.integers(min_value=min_val, max_value=4),
),
min_size=n_pairs,
max_size=n_pairs,
),
helpers.ints(min_value=min_val, max_value=4),
)
@st.composite
def put_along_axis_helper(draw):
input_dtype, x, axis, shape = draw(
helpers.dtype_values_axis(
# does not work for bool yet because scatter_nd doesn't
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=3,
min_dim_size=2,
max_dim_size=5,
min_value=-1e2,
max_value=1e2,
valid_axis=True,
force_int_axis=True,
ret_shape=True,
)
)
idx_shape = list(shape)
idx_shape[axis] = 1
ind_dtype, indices = draw(
helpers.dtype_and_values(
available_dtypes=["int64"],
shape=idx_shape,
min_value=0,
max_value=len(idx_shape) - 2,
)
)
_, values = draw(
helpers.dtype_and_values(
available_dtypes=input_dtype,
shape=idx_shape,
min_value=0,
max_value=100,
)
)
return input_dtype + ind_dtype + input_dtype, x[0], indices[0], values[0], axis
# --- Main --- #
# ------------ #
def st_tuples(elements, *, min_size=0, max_size=None, unique_by=None, unique=False):
return st.lists(
elements,
min_size=min_size,
max_size=max_size,
unique_by=unique_by,
unique=unique,
).map(tuple)
@handle_test(
fn_tree="as_strided",
all_args=_as_strided_helper(),
test_with_out=st.just(False),
test_gradients=st.just(False),
ground_truth_backend="numpy",
test_with_copy=st.just(True),
)
def test_as_strided(*, all_args, test_flags, backend_fw, fn_name, on_device):
dtype, x, shape, strides = all_args
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x,
shape=shape,
strides=strides,
)
# associative_scan
@handle_test(
fn_tree="functional.ivy.experimental.associative_scan",
dtype_elems_axis=_associative_scan_helper(),
fn=st.sampled_from([ivy.matmul, ivy.multiply, ivy.add]),
reverse=st.booleans(),
test_with_out=st.just(False),
ground_truth_backend="jax",
)
def test_associative_scan(
*, dtype_elems_axis, fn, reverse, fn_name, test_flags, backend_fw, on_device
):
dtype, elems, axis = dtype_elems_axis
helpers.test_function(
fn_name=fn_name,
test_flags=test_flags,
backend_to_test=backend_fw,
on_device=on_device,
input_dtypes=dtype,
elems=elems,
fn=fn,
reverse=reverse,
axis=axis,
)
# atleast_1d
@handle_test(
fn_tree="functional.ivy.experimental.atleast_1d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=helpers.ints(min_value=1, max_value=5),
),
test_with_out=st.just(False),
test_gradients=st.just(False),
test_with_copy=st.just(True),
)
def test_atleast_1d(dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtypes, arrays = dtype_and_x
kw = {}
for i, (array, idtype) in enumerate(zip(arrays, input_dtypes)):
kw[f"x{i}"] = np.asarray(array, dtype=idtype)
test_flags.num_positional_args = len(kw)
helpers.test_function(
input_dtypes=input_dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
**kw,
)
# atleast_2d
@handle_test(
fn_tree="functional.ivy.experimental.atleast_2d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=helpers.ints(min_value=1, max_value=5),
),
test_with_out=st.just(False),
test_gradients=st.just(False),
test_with_copy=st.just(True),
)
def test_atleast_2d(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtypes, arrays = dtype_and_x
kw = {}
for i, (array, idtype) in enumerate(zip(arrays, input_dtypes)):
kw[f"x{i}"] = np.asarray(array, dtype=idtype)
test_flags.num_positional_args = len(kw)
helpers.test_function(
input_dtypes=input_dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
**kw,
)
@handle_test(
fn_tree="functional.ivy.experimental.atleast_3d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=helpers.ints(min_value=1, max_value=5),
shared_dtype=True,
),
test_with_out=st.just(False),
test_gradients=st.just(False),
test_with_copy=st.just(True),
)
def test_atleast_3d(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtypes, arrays = dtype_and_x
arrys = {}
for i, (array, idtype) in enumerate(zip(arrays, input_dtypes)):
arrys[f"x{i}"] = np.asarray(array, dtype=idtype)
test_flags.num_positional_args = len(arrys)
helpers.test_function(
input_dtypes=input_dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
**arrys,
)
# broadcast_shapes
@handle_test(
fn_tree="functional.ivy.experimental.broadcast_shapes",
shapes=nph.mutually_broadcastable_shapes(
num_shapes=4, min_dims=1, max_dims=5, min_side=1, max_side=5
),
test_instance_method=st.just(False),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_broadcast_shapes(*, shapes, test_flags, backend_fw, fn_name, on_device):
shape, _ = shapes
shapes = {f"shape{i}": shape[i] for i in range(len(shape))}
test_flags.num_positional_args = len(shapes)
helpers.test_function(
input_dtypes=["int64"],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
**shapes,
)
# column_stack
@handle_test(
fn_tree="functional.ivy.experimental.column_stack",
arrays_dtypes=_st_col_row_stack_arrays(stack_dim=1),
test_gradients=st.just(False),
)
def test_column_stack(*, arrays_dtypes, test_flags, backend_fw, fn_name, on_device):
arrays, dtypes = arrays_dtypes
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
arrays=arrays,
)
# concat_from_sequence
@handle_test(
fn_tree="functional.ivy.experimental.concat_from_sequence",
dtypes_arrays_axis=_concat_from_sequence_helper(),
new_axis=st.integers(min_value=0, max_value=1),
container_flags=st.just([False]),
test_instance_method=st.just(False),
)
def test_concat_from_sequence(
*, dtypes_arrays_axis, new_axis, test_flags, backend_fw, fn_name, on_device: str
):
dtypes, arrays, axis = dtypes_arrays_axis
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
*arrays,
new_axis=new_axis,
axis=axis,
)
# dsplit
@handle_test(
fn_tree="functional.ivy.experimental.dsplit",
test_with_copy=st.just(True),
)
def test_dsplit(
dtype_and_x, indices_or_sections, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
on_device=on_device,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
indices_or_sections=indices_or_sections,
)
# dstack
@handle_test(
fn_tree="functional.ivy.experimental.dstack",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shared_dtype=True,
num_arrays=helpers.ints(min_value=1, max_value=10),
shape=helpers.get_shape(
min_num_dims=1,
),
),
test_gradients=st.just(False),
)
def test_dstack(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
arrays=x,
)
@handle_test(
fn_tree="expand",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric", full=False),
shape=st.shared(
helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
key="value_shape",
),
),
shape=st.shared(
helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
key="value_shape",
),
container_flags=st.just([False]),
test_instance_method=st.just(False),
test_gradients=st.just(False),
test_with_copy=st.just(True),
)
def test_expand(*, dtype_and_x, shape, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
shape=shape,
)
# fill_diag
@handle_test(
fn_tree="fill_diagonal",
dt_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
max_num_dims=4,
min_dim_size=3,
max_dim_size=3,
num_arrays=2,
),
v=st.sampled_from([1, 2, 3, 10]),
v_is_array_like=st.booleans(),
wrap=st.booleans(),
test_with_out=st.just(False),
test_gradients=st.just(False),
ground_truth_backend="numpy",
)
def test_fill_diagonal(
*,
dt_a,
v,
v_is_array_like,
wrap,
test_flags,
backend_fw,
fn_name,
on_device,
):
dt, a = dt_a
if v_is_array_like:
v = a[1]
helpers.test_function(
input_dtypes=dt,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
a=a[0],
v=v,
wrap=wrap,
)
@handle_test(
fn_tree="functional.ivy.experimental.flatten",
data=_flatten_data_helper(),
test_with_copy=st.just(True),
)
def test_flatten(
*,
data,
test_flags,
backend_fw,
fn_name,
on_device,
):
(input_dtypes, x), axes, order = data
helpers.test_function(
input_dtypes=input_dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
start_dim=axes[0],
end_dim=axes[1],
order=order,
)
# fliplr
@handle_test(
fn_tree="functional.ivy.experimental.fliplr",
dtype_and_m=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
),
test_gradients=st.just(False),
test_with_copy=st.just(True),
)
def test_fliplr(*, dtype_and_m, test_flags, backend_fw, fn_name, on_device):
input_dtype, m = dtype_and_m
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
m=m[0],
)
# flipud
@handle_test(
fn_tree="functional.ivy.experimental.flipud",
dtype_and_m=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
test_gradients=st.just(False),
test_with_copy=st.just(True),
)
def test_flipud(*, dtype_and_m, test_flags, backend_fw, fn_name, on_device):
input_dtype, m = dtype_and_m
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
m=m[0],
)
@handle_test(
fn_tree="functional.ivy.experimental.fold",
data=_fold_data(),
)
def test_fold(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtype, input, shape, mode = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=input_dtype,
x=input[0],
mode=mode,
shape=shape,
)
# heaviside
@handle_test(
fn_tree="functional.ivy.experimental.heaviside",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
num_arrays=2,
shared_dtype=True,
),
test_gradients=st.just(False),
)
def test_heaviside(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x1=x[0],
x2=x[0],
)
# hsplit
# TODO: there is a failure with paddle (dtype('int32')) caused by the `_get_splits`
# method which returns a numpy array with a numpy dtype
@handle_test(
fn_tree="functional.ivy.experimental.hsplit",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="value_shape"),
),
indices_or_sections=_get_splits(allow_none=False, min_num_dims=2, axis=1),
test_gradients=st.just(False),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_hsplit(
dtype_and_x, indices_or_sections, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x = dtype_and_x
if (
not isinstance(indices_or_sections, int)
and not isinstance(indices_or_sections, list)
and indices_or_sections is not None
):
input_dtype = [*input_dtype, indices_or_sections.dtype]
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
indices_or_sections=indices_or_sections,
)
# hstack
@handle_test(
fn_tree="functional.ivy.experimental.hstack",
dtype_and_m=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shared_dtype=True,
num_arrays=helpers.ints(min_value=2, max_value=10),
shape=helpers.get_shape(
min_num_dims=1,
),
),
test_gradients=st.just(False),
)
def test_hstack(dtype_and_m, test_flags, backend_fw, fn_name, on_device):
input_dtype, m = dtype_and_m
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
arrays=m,
)
# i0
@handle_test(
fn_tree="functional.ivy.experimental.i0",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-10,
max_value=10,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
test_gradients=st.just(False),
)
def test_i0(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
@handle_test(
fn_tree="functional.ivy.experimental.matricize",
data=_matricize_data(),
)
def test_matricize(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtype, input, row_modes, column_modes = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=input_dtype,
x=input[0],
row_modes=row_modes,
column_modes=column_modes,
)
# moveaxis
@handle_test(
fn_tree="functional.ivy.experimental.moveaxis",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=-100,
max_value=100,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
),
source=helpers.get_axis(
allow_none=False,
unique=True,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
min_size=1,
force_int=True,
),
destination=helpers.get_axis(
allow_none=False,
unique=True,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
min_size=1,
force_int=True,
),
test_gradients=st.just(False),
test_with_copy=st.just(True),
)
def test_moveaxis(
*, dtype_and_a, source, destination, test_flags, backend_fw, fn_name, on_device
):
input_dtype, a = dtype_and_a
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
a=a[0],
source=source,
destination=destination,
)
@handle_test(
fn_tree="functional.ivy.experimental.pad",
ground_truth_backend="numpy",
dtype_and_input_and_other=_pad_helper(),
reflect_type=st.sampled_from(["even", "odd"]),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_pad(
*,
dtype_and_input_and_other,
reflect_type,
test_flags,
backend_fw,
fn_name,
on_device,
):
(
dtype,
input,
pad_width,
stat_length,
constant_values,
end_values,
mode,
) = dtype_and_input_and_other
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
input=input,
pad_width=pad_width,
mode=mode,
stat_length=stat_length,
constant_values=constant_values,
end_values=end_values,
reflect_type=reflect_type,
)
@handle_test(
fn_tree="functional.ivy.experimental.partial_fold",
data=_partial_fold_data(),
)
def test_partial_fold(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtype, input, skip_begin, shape, mode = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=input_dtype,
x=input[0],
mode=mode,
shape=shape,
skip_begin=skip_begin,
)
@handle_test(
fn_tree="functional.ivy.experimental.partial_tensor_to_vec",
data=_partial_tensor_to_vec_data(),
)
def test_partial_tensor_to_vec(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtype, input, skip_begin, skip_end = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=input_dtype,
x=input[0],
skip_begin=skip_begin,
skip_end=skip_end,
)
@handle_test(
fn_tree="functional.ivy.experimental.partial_unfold",
data=_partial_unfold_data(),
)
def test_partial_unfold(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtype, input, axis, skip_begin, skip_end, ravel_tensors = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=input_dtype,
input=input[0],
mode=axis,
skip_begin=skip_begin,
skip_end=skip_end,
ravel_tensors=ravel_tensors,
)
@handle_test(
fn_tree="functional.ivy.experimental.partial_vec_to_tensor",
data=_partial_vec_to_tensor(),
)
def test_partial_vec_to_tensor(*, data, test_flags, backend_fw, fn_name, on_device):
input_dtype, input, shape, skip_begin = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=input_dtype,
input=input[0],
shape=shape,
skip_begin=skip_begin,
)
# put_along_axis
@handle_test(
fn_tree="functional.ivy.experimental.put_along_axis",
args=put_along_axis_helper(),
# ToDo: test for "mean" when support has been added
mode=st.sampled_from(["sum", "min", "max", "mul", "replace"]),
test_with_out=st.just(False),
test_gradients=st.just(False),
ground_truth_backend="torch",
)
def test_put_along_axis(
*,
args,
mode,
test_flags,
backend_fw,
fn_name,
on_device,
):
dtypes, x, indices, values, axis = args
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
arr=x,
indices=indices,
values=values,
axis=axis,
mode=mode,
)
# rot90
@handle_test(
fn_tree="functional.ivy.experimental.rot90",
dtype_m_k_axes=_get_dtype_values_k_axes_for_rot90(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
test_gradients=st.just(False),
test_with_copy=st.just(True),
)
def test_rot90(dtype_m_k_axes, test_flags, backend_fw, fn_name, on_device):
input_dtype, m, k, axes = dtype_m_k_axes
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
m=m,
k=k,
axes=tuple(axes),
)
@handle_test(
fn_tree="functional.ivy.experimental.soft_thresholding",
data=_soft_thresholding_data(),
)
def test_soft_thresholding(*, data, test_flags, backend_fw, fn_name, on_device):
x_dtype, x, threshold = data
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=x_dtype,
x=x[0],
threshold=threshold,
)
@handle_test(
fn_tree="functional.ivy.experimental.take",
dtype_x_indices_axis=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("valid"),
indices_dtypes=["int32", "int64"],
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=5,
indices_same_dims=False,
valid_bounds=False,
),
mode=st.sampled_from(["clip", "wrap", "fill"]),
ground_truth_backend="jax",
)
def test_take(
*,
dtype_x_indices_axis,
mode,
test_flags,
backend_fw,
fn_name,
on_device,
):
dtypes, x, indices, axis, _ = dtype_x_indices_axis
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x,
indices=indices,
axis=axis,
mode=mode,
)
# take_along_axis
@handle_test(
fn_tree="functional.ivy.experimental.take_along_axis",
dtype_x_indices_axis=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("valid"),
indices_dtypes=["int32", "int64"],
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
indices_same_dims=True,
valid_bounds=False,
),
mode=st.sampled_from(["clip", "fill", "drop"]),
ground_truth_backend="jax",
test_gradients=st.just(False),
)
def test_take_along_axis(
*,
dtype_x_indices_axis,
mode,
test_flags,
backend_fw,
fn_name,
on_device,
):
dtypes, x, indices, axis, _ = dtype_x_indices_axis
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
arr=x,
indices=indices,
axis=axis,
mode=mode,
)
# top_k
@handle_test(
fn_tree="functional.ivy.experimental.top_k",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
force_int_axis=True,
valid_axis=True,
),
k=helpers.ints(min_value=1, max_value=4),
largest=st.booleans(),
sorted=st.booleans(),
test_gradients=st.just(False),
)
def test_top_k(
*, dtype_x_axis, k, largest, sorted, test_flags, backend_fw, fn_name, on_device
):
dtype, x, axis = dtype_x_axis
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
k=k,
axis=axis,
largest=largest,
sorted=sorted,
)
@handle_test(
fn_tree="trim_zeros",
dt_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_num_dims=1,
max_num_dims=1,
min_value=-100,
max_value=100,
),
test_with_out=st.just(False),
)
def test_trim_zeros(
*,
dt_a,
test_flags,
backend_fw,
fn_name,
on_device,
):
dt, a = dt_a
helpers.test_function(
input_dtypes=dt,
test_flags=test_flags,
on_device=on_device,
fw=backend_fw,
fn_name=fn_name,
a=a[0],
)
# unflatten
@handle_test(
fn_tree="functional.ivy.experimental.unflatten",
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
shape_key="shape",
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
force_int=True,
),
)
def test_unflatten(
*,
dtype_and_values,
on_device,
fn_name,
test_flags,
backend_fw,
shape,
axis,
):
shape_ = sizes_(shape, axis)
dtype, x = dtype_and_values
helpers.test_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
test_values=False,
x=x[0],
shape=shape_,
dim=axis,
)
@handle_test(
fn_tree="functional.ivy.experimental.unfold",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
valid_axis=True,
allow_neg_axes=False,
force_int_axis=True,
),
)
def test_unfold(*, dtype_values_axis, test_flags, backend_fw, fn_name, on_device):
input_dtype, input, axis = dtype_values_axis
if axis is None:
axis = 0
helpers.test_function(
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
input_dtypes=input_dtype,
x=input[0],
mode=axis,
)
# unique_consecutive
@handle_test(
fn_tree="unique_consecutive",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=2,
force_int_axis=True,
valid_axis=True,
),
none_axis=st.booleans(),
test_with_out=st.just(False),
test_gradients=st.just(False),
ground_truth_backend="torch",
)
def test_unique_consecutive(
*, dtype_x_axis, none_axis, test_flags, backend_fw, fn_name, on_device
):
dtype, x, axis = dtype_x_axis
if none_axis:
axis = None
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
axis=axis,
)
# vsplit
@handle_test(
fn_tree="functional.ivy.experimental.vsplit",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="value_shape"),
),
indices_or_sections=_get_splits(allow_none=False, min_num_dims=2, axis=0),
test_gradients=st.just(False),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_vsplit(
dtype_and_x, indices_or_sections, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
on_device=on_device,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
indices_or_sections=indices_or_sections,
)
# vstack
@handle_test(
fn_tree="functional.ivy.experimental.vstack",
arrays_dtypes=_st_col_row_stack_arrays(stack_dim=0),
test_gradients=st.just(False),
)
def test_vstack(*, arrays_dtypes, test_flags, backend_fw, fn_name, on_device):
arrays, dtypes = arrays_dtypes
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
arrays=arrays,
)
| ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_manipulation.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_manipulation.py",
"repo_id": "ivy",
"token_count": 22804
} | 58 |
import ivy
import numpy as np
import pytest
@pytest.mark.parametrize(
("shape1", "shape2", "shape3"),
[
(
(2, 4, 3),
(3, 5, 2),
(2, 6, 2),
)
],
)
def test_tr_to_tensor(shape1, shape2, shape3):
# Create ground truth TR factors
factors = [
ivy.random_uniform(shape=shape1),
ivy.random_uniform(shape=shape2),
ivy.random_uniform(shape=shape3),
]
# Create tensor
tensor = ivy.einsum("iaj,jbk,kci->abc", *factors)
# Check that TR factors re-assemble to the original tensor
assert np.allclose(tensor, ivy.TRTensor.tr_to_tensor(factors), atol=1e-6, rtol=1e-6)
@pytest.mark.parametrize(
("rank1", "rank2"),
[((2, 3, 4, 2), (2, 3, 4, 2, 3))],
)
def test_validate_tr_rank(rank1, rank2):
tensor_shape = tuple(np.random.randint(1, 100, size=4))
n_param_tensor = np.prod(tensor_shape)
# Rounding = floor
rank = ivy.TRTensor.validate_tr_rank(tensor_shape, rank="same", rounding="floor")
n_param = ivy.TRTensor.tr_n_param(tensor_shape, rank)
assert n_param <= n_param_tensor
# Rounding = ceil
rank = ivy.TRTensor.validate_tr_rank(tensor_shape, rank="same", rounding="ceil")
n_param = ivy.TRTensor.tr_n_param(tensor_shape, rank)
assert n_param >= n_param_tensor
# Integer rank
with np.testing.assert_raises(ValueError):
ivy.TRTensor.validate_tr_rank(tensor_shape, rank=rank1)
with np.testing.assert_raises(ValueError):
ivy.TRTensor.validate_tr_rank(tensor_shape, rank=rank2)
# These tests have been adapted from Tensorly
# https://github.com/tensorly/tensorly/blob/main/tensorly/tests/test_tr_tensor.py
@pytest.mark.parametrize(
("true_shape", "true_rank"),
[
(
(6, 4, 5),
(3, 2, 2, 3),
)
],
)
def test_validate_tr_tensor(true_shape, true_rank):
factors = ivy.random_tr(true_shape, true_rank).factors
# Check correct rank and shapes are returned
shape, rank = ivy.TRTensor.validate_tr_tensor(factors)
np.testing.assert_equal(
shape,
true_shape,
err_msg=f"Returned incorrect shape (got {shape}, expected {true_shape})",
)
np.testing.assert_equal(
rank,
true_rank,
err_msg=f"Returned incorrect rank (got {rank}, expected {true_rank})",
)
# One of the factors has the wrong ndim
factors[0] = ivy.random_uniform(shape=(4, 4))
with np.testing.assert_raises(ValueError):
ivy.TRTensor.validate_tr_tensor(factors)
# Consecutive factors ranks don't match
factors[0] = ivy.random_uniform(shape=(3, 6, 4))
with np.testing.assert_raises(ValueError):
ivy.TRTensor.validate_tr_tensor(factors)
# Boundary conditions not respected
factors[0] = ivy.random_uniform(shape=(2, 6, 2))
with np.testing.assert_raises(ValueError):
ivy.TRTensor.validate_tr_tensor(factors)
| ivy/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tr_tensor.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tr_tensor.py",
"repo_id": "ivy",
"token_count": 1335
} | 59 |
"""Collection of tests for Ivy modules."""
# global
import os
from hypothesis import given, strategies as st
import numpy as np
# local
import ivy
from ivy.functional.ivy.gradients import _variable
import ivy_tests.test_ivy.helpers as helpers
class TrainableModule(ivy.Module):
def __init__(
self,
in_size,
out_size,
device=None,
hidden_size=64,
v=None,
with_partial_v=False,
):
self._linear0 = ivy.Linear(in_size, hidden_size, device=device)
self._linear1 = ivy.Linear(hidden_size, hidden_size, device=device)
self._linear2 = ivy.Linear(hidden_size, out_size, device=device)
ivy.Module.__init__(self, device=device, v=v, with_partial_v=with_partial_v)
def _forward(self, x):
x = ivy.expand_dims(x, axis=0)
x = ivy.tanh(self._linear0(x))
x = ivy.tanh(self._linear1(x))
return ivy.tanh(self._linear2(x))[0]
class TrainableModuleWithList(ivy.Module):
def __init__(self, in_size, out_size, device=None, hidden_size=64):
linear0 = ivy.Linear(in_size, hidden_size, device=device)
linear1 = ivy.Linear(hidden_size, hidden_size, device=device)
linear2 = ivy.Linear(hidden_size, out_size, device=device)
self._layers = [linear0, linear1, linear2]
ivy.Module.__init__(self, device=device)
def _forward(self, x):
x = ivy.expand_dims(x, axis=0)
x = ivy.tanh(self._layers[0](x))
x = ivy.tanh(self._layers[1](x))
return ivy.tanh(self._layers[2](x))[0]
class ModuleWithNoneAttribute(ivy.Module):
def __init__(self, device=None, hidden_size=64):
self.some_attribute = None
ivy.Module.__init__(self, device=device)
def _forward(self, x):
return x
class TrainableModuleWithDuplicate(ivy.Module):
def __init__(self, channels, same_layer, device=None):
if same_layer:
linear = ivy.Linear(channels, channels, device=device)
self._linear0 = linear
self._linear1 = linear
else:
w = _variable(ivy.ones((channels, channels)))
b0 = _variable(ivy.ones((channels,)))
b1 = _variable(ivy.ones((channels,)))
v0 = ivy.Container({"w": w, "b": b0})
v1 = ivy.Container({"w": w, "b": b1})
self._linear0 = ivy.Linear(channels, channels, device=device, v=v0)
self._linear1 = ivy.Linear(channels, channels, device=device, v=v1)
ivy.Module.__init__(self)
def _forward(self, x):
x = self._linear0(x)
return self._linear1(x)
class TrainableModuleWithDict(ivy.Module):
def __init__(self, in_size, out_size, device=None, hidden_size=64):
linear0 = ivy.Linear(in_size, hidden_size, device=device)
linear1 = ivy.Linear(hidden_size, hidden_size, device=device)
linear2 = ivy.Linear(hidden_size, out_size, device=device)
self._layers = {"linear0": linear0, "linear1": linear1, "linear2": linear2}
ivy.Module.__init__(self, device=device)
def _forward(self, x):
x = ivy.expand_dims(x, axis=0)
x = ivy.tanh(self._layers["linear0"](x))
x = ivy.tanh(self._layers["linear1"](x))
return ivy.tanh(self._layers["linear2"](x))[0]
class WithCustomVarStructure(ivy.Module):
def __init__(self, in_size, out_size, device=None, hidden_size=64):
self._linear0 = ivy.Linear(in_size, hidden_size, device=device)
self._linear1 = ivy.Linear(hidden_size, hidden_size, device=device)
self._linear2 = ivy.Linear(hidden_size, out_size, device=device)
ivy.Module.__init__(self, device=device)
def _create_variables(self, device, dtype):
return ivy.Container(x=self._linear0.v, y=self._linear1.v, z=self._linear2.v)
def _forward(self, x):
pass
class DoubleLinear(ivy.Module):
def __init__(self, in_size, out_size, device=None, hidden_size=64):
self._l0 = ivy.Linear(in_size, hidden_size, device=device)
self._l1 = ivy.Linear(hidden_size, out_size, device=device)
ivy.Module.__init__(self, device=device)
def _forward(self, x):
x = self._l0(x)
x = self._l1(x)
return x
class WithNestedModules(ivy.Module):
def __init__(self, in_size, out_size, device=None, hidden_size=64):
self._dl0 = DoubleLinear(in_size, hidden_size, device=device)
self._dl1 = DoubleLinear(hidden_size, hidden_size, device=device)
ivy.Module.__init__(self, device=device)
def _forward(self, x):
x = self._dl0(x)
x = self._dl1(x)
x = self._dl1(x)
return x
class ModuleWithBuffer(ivy.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def _forward(self, *args, **kwargs):
pass
class ModuleWithTrainEval(ivy.Module):
def __init__(self):
super().__init__()
def _forward():
pass
@given(buffer=st.just({"var1": np.ones((1, 2))}))
def test_get_buffers(buffer, backend_fw):
with ivy.utils.backend.ContextManager(backend_fw):
module = ModuleWithBuffer()
buffers = ivy.Container()
for name, value in buffer.items():
value = ivy.array(value)
buffers[name] = value
module.register_buffer(name, value)
assert module.buffers == buffers
@given(
batch_shape=helpers.get_shape(
min_num_dims=2, max_num_dims=2, min_dim_size=1, max_dim_size=2
),
input_channels=st.integers(min_value=2, max_value=5),
output_channels=st.integers(min_value=2, max_value=5),
)
def test_module_save_and_load_as_pickled(
batch_shape, input_channels, output_channels, on_device, backend_fw
):
save_filepath = "module.pickled"
# smoke test
if backend_fw == "numpy":
# NumPy does not support gradients
return
with ivy.utils.backend.ContextManager(backend_fw):
x = ivy.astype(
ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels),
"float32",
)
module = TrainableModule(input_channels, output_channels, device=on_device)
def loss_fn(v_):
out = module(x, v=v_)
return ivy.mean(out)
module.save(save_filepath)
assert os.path.exists(save_filepath)
loaded_module = ivy.Module.load(save_filepath)
# train
loss, grads = ivy.execute_with_gradients(loss_fn, module.v)
module.v = ivy.gradient_descent_update(module.v, grads, 1e-3)
loaded_loss, loaded_grads = ivy.execute_with_gradients(loss_fn, loaded_module.v)
loaded_module.v = ivy.gradient_descent_update(
loaded_module.v, loaded_grads, 1e-3
)
# type test
assert ivy.is_array(loaded_loss)
assert isinstance(loaded_grads, ivy.Container)
# cardinality test
assert loaded_loss.shape == ()
# value test
assert ivy.all_equal(loaded_loss == loss)
assert ivy.Container.all(loaded_module.v == module.v).cont_all_true()
os.remove(save_filepath)
@given(dummy=st.booleans())
def test_module_to_device(dummy, on_device, backend_fw):
with ivy.utils.backend.ContextManager(backend_fw):
model = TrainableModule(5, 5)
model.to_device(on_device)
def assertion(x, on_device):
if x != on_device:
print(f"{x} is not equal to {on_device}")
raise AssertionError
def model_assert(mod, on_device):
for obj in mod.v.values():
if isinstance(obj, ivy.Module):
return model_assert(obj, on_device)
if isinstance(obj, (ivy.Container, dict)):
for item2 in obj.values():
assertion(item2.device, on_device)
else:
assertion(obj.device, on_device)
if getattr(mod, "buffers", None):
for obj in mod.buffers.values():
if isinstance(obj, (ivy.Container, dict)):
ivy.nested_map(lambda x: assertion(x.device, on_device), obj)
else:
assertion(obj.device, on_device)
model_assert(model, on_device)
# module training
@given(
batch_shape=helpers.get_shape(
min_num_dims=2, max_num_dims=2, min_dim_size=1, max_dim_size=2
),
input_channels=st.integers(min_value=2, max_value=5),
output_channels=st.integers(min_value=2, max_value=5),
)
def test_module_training(
batch_shape, input_channels, output_channels, on_device, backend_fw
):
# smoke test
if backend_fw == "numpy":
# NumPy does not support gradients
return
with ivy.utils.backend.ContextManager(backend_fw):
x = ivy.astype(
ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels),
"float32",
)
module = TrainableModule(input_channels, output_channels, device=on_device)
def loss_fn(v_):
out = module(x, v=v_)
return ivy.mean(out)
# train
loss_tm1 = 1e12
loss = None
grads = None
for i in range(10):
loss, grads = ivy.execute_with_gradients(loss_fn, module.v)
module.v = ivy.gradient_descent_update(module.v, grads, 1e-3)
assert loss < loss_tm1
loss_tm1 = loss
# type test
assert ivy.is_array(loss)
assert isinstance(grads, ivy.Container)
# cardinality test
assert loss.shape == ()
# value test
assert ivy.max(ivy.abs(grads.linear0.b)) > 0
assert ivy.max(ivy.abs(grads.linear0.w)) > 0
assert ivy.max(ivy.abs(grads.linear1.b)) > 0
assert ivy.max(ivy.abs(grads.linear1.w)) > 0
assert ivy.max(ivy.abs(grads.linear2.b)) > 0
assert ivy.max(ivy.abs(grads.linear2.w)) > 0
# tracing test
if backend_fw == "torch":
# pytest scripting does not support **kwargs
return
# module training with duplicate
@given(
batch_shape=helpers.get_shape(
min_num_dims=2, max_num_dims=2, min_dim_size=1, max_dim_size=2
),
channels=st.integers(min_value=1, max_value=64),
same_layer=st.booleans(),
)
def test_module_training_with_duplicate(
batch_shape, channels, same_layer, on_device, backend_fw
):
# smoke test
if backend_fw == "numpy":
# NumPy does not support gradients
return
with ivy.utils.backend.ContextManager(backend_fw):
x = ivy.astype(
ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), channels),
"float32",
)
module = TrainableModuleWithDuplicate(channels, same_layer, device=on_device)
def loss_fn(v_):
out = module(x, v=v_)
return ivy.mean(out)
# train
loss_tm1 = 1e12
loss = None
grads = None
for i in range(10):
loss, grads = ivy.execute_with_gradients(loss_fn, module.v)
module.v = ivy.gradient_descent_update(module.v, grads, 1e-3)
assert loss < loss_tm1
loss_tm1 = loss
# type test
assert ivy.is_array(loss)
assert isinstance(grads, ivy.Container)
# cardinality test
assert loss.shape == ()
# value test
assert ivy.max(ivy.abs(grads.linear0.b)) > 0
assert ivy.max(ivy.abs(grads.linear0.w)) > 0
if not same_layer:
assert ivy.max(ivy.abs(grads.linear1.b)) > 0
# tracing test
if backend_fw == "torch":
# pytest scripting does not support **kwargs
return
# module with dict training
@given(
batch_shape=helpers.get_shape(
min_num_dims=2, max_num_dims=2, min_dim_size=1, max_dim_size=2
),
input_channels=st.integers(min_value=2, max_value=5),
output_channels=st.integers(min_value=2, max_value=5),
)
def test_module_w_dict_training(
batch_shape, input_channels, output_channels, on_device, backend_fw
):
# smoke test
if backend_fw == "numpy":
# NumPy does not support gradients
return
with ivy.utils.backend.ContextManager(backend_fw):
x = ivy.astype(
ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels),
"float32",
)
module = TrainableModuleWithDict(
input_channels, output_channels, device=on_device
)
def loss_fn(v_):
out = module(x, v=v_)
return ivy.mean(out)
# train
loss_tm1 = 1e12
loss = None
grads = None
for i in range(10):
loss, grads = ivy.execute_with_gradients(loss_fn, module.v)
module.v = ivy.gradient_descent_update(module.v, grads, 1e-3)
assert loss < loss_tm1
loss_tm1 = loss
# type test
assert ivy.is_array(loss)
assert isinstance(grads, ivy.Container)
# cardinality test
assert loss.shape == ()
# value test
assert ivy.max(ivy.abs(grads.layers.linear0.b)) > 0
assert ivy.max(ivy.abs(grads.layers.linear0.w)) > 0
assert ivy.max(ivy.abs(grads.layers.linear1.b)) > 0
assert ivy.max(ivy.abs(grads.layers.linear1.w)) > 0
assert ivy.max(ivy.abs(grads.layers.linear2.b)) > 0
assert ivy.max(ivy.abs(grads.layers.linear2.w)) > 0
# tracing test
if backend_fw == "torch":
# pytest scripting does not support **kwargs
return
# module with list training
@given(
batch_shape=helpers.get_shape(
min_num_dims=2, max_num_dims=2, min_dim_size=1, max_dim_size=2
),
input_channels=st.integers(min_value=2, max_value=5),
output_channels=st.integers(min_value=2, max_value=5),
)
def test_module_w_list_training(
batch_shape, input_channels, output_channels, on_device, backend_fw
):
# smoke test
if backend_fw == "numpy":
# NumPy does not support gradients
return
with ivy.utils.backend.ContextManager(backend_fw):
x = ivy.astype(
ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels),
"float32",
)
module = TrainableModuleWithList(
input_channels, output_channels, device=on_device
)
def loss_fn(v_):
out = module(x, v=v_)
return ivy.mean(out)
# train
loss_tm1 = 1e12
loss = None
grads = None
for i in range(10):
loss, grads = ivy.execute_with_gradients(loss_fn, module.v)
module.v = ivy.gradient_descent_update(module.v, grads, 1e-3)
assert loss < loss_tm1
loss_tm1 = loss
# type test
assert ivy.is_array(loss)
assert isinstance(grads, ivy.Container)
# cardinality test
assert loss.shape == ()
# value test
assert ivy.max(ivy.abs(grads.layers.v0.b)) > 0
assert ivy.max(ivy.abs(grads.layers.v0.w)) > 0
assert ivy.max(ivy.abs(grads.layers.v1.b)) > 0
assert ivy.max(ivy.abs(grads.layers.v1.w)) > 0
assert ivy.max(ivy.abs(grads.layers.v2.b)) > 0
assert ivy.max(ivy.abs(grads.layers.v2.w)) > 0
# tracing test
if backend_fw == "torch":
# pytest scripting does not support **kwargs
return
# module with none attribute
@given(
batch_shape=helpers.get_shape(
min_num_dims=2, max_num_dims=2, min_dim_size=1, max_dim_size=2
),
input_channels=st.integers(min_value=2, max_value=5),
output_channels=st.integers(min_value=2, max_value=5),
)
def test_module_w_none_attribute(
batch_shape, input_channels, output_channels, on_device, backend_fw
):
# smoke test
if backend_fw == "numpy":
# NumPy does not support gradients
return
with ivy.utils.backend.ContextManager(backend_fw):
x = ivy.astype(
ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels),
"float32",
)
module = ModuleWithNoneAttribute(device=on_device)
module(x)
# module with partial v
@given(
batch_shape=helpers.get_shape(
min_num_dims=2, max_num_dims=2, min_dim_size=1, max_dim_size=2
),
input_channels=st.integers(min_value=2, max_value=5),
output_channels=st.integers(min_value=2, max_value=5),
)
def test_module_w_partial_v(
batch_shape, input_channels, output_channels, on_device, backend_fw
):
# smoke test
if backend_fw == "numpy":
# NumPy does not support gradients
return
with ivy.utils.backend.ContextManager(backend_fw):
x = ivy.astype(
ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels),
"float32",
)
v = ivy.Container(
{
"linear0": {
"b": _variable(ivy.random_uniform(shape=[64])),
"w": _variable(ivy.random_uniform(shape=[64, 4])),
},
"linear1": {
"b": _variable(ivy.random_uniform(shape=[64])),
"w": _variable(ivy.random_uniform(shape=[64, 64])),
"extra": _variable(ivy.random_uniform(shape=[64, 64])),
},
"linear2": {
"b": _variable(ivy.random_uniform(shape=[5])),
"w": _variable(ivy.random_uniform(shape=[5, 64])),
},
}
)
try:
TrainableModule(
input_channels,
output_channels,
device=on_device,
v=v,
with_partial_v=True,
)
raise Exception(
"TrainableModule did not raise exception despite being passed "
"with wrongly shaped variables."
)
except ivy.utils.exceptions.IvyException:
pass
v = ivy.Container(
{
"linear0": {
"b": _variable(ivy.random_uniform(shape=[64])),
},
"linear1": {"w": _variable(ivy.random_uniform(shape=[64, 64]))},
"linear2": {
"b": _variable(ivy.random_uniform(shape=[output_channels]))
},
}
)
try:
TrainableModule(input_channels, output_channels, device=on_device, v=v)
raise Exception(
"TrainableModule did not raise exception despite being passed "
"with wrongly shaped variables."
)
except ivy.utils.exceptions.IvyException:
pass
module = TrainableModule(
input_channels, output_channels, device=on_device, v=v, with_partial_v=True
)
module(x)
@given(mode=st.booleans())
def test_train_eval(mode, backend_fw):
with ivy.utils.backend.ContextManager(backend_fw):
cls = ModuleWithTrainEval()
cls.train(mode)
assert mode == cls.training
cls.eval()
assert not cls.training
# with custom var structure
@given(
batch_shape=helpers.get_shape(
min_num_dims=2, max_num_dims=2, min_dim_size=1, max_dim_size=2
),
input_channels=st.integers(min_value=2, max_value=5),
output_channels=st.integers(min_value=2, max_value=5),
)
def test_with_custom_var_structure(
batch_shape, input_channels, output_channels, on_device, backend_fw
):
# smoke test
if backend_fw == "numpy":
# NumPy does not support gradients
return
with ivy.utils.backend.ContextManager(backend_fw):
module = WithCustomVarStructure(
input_channels, output_channels, device=on_device
)
assert "x" in module.v
assert "y" in module.v
assert "z" in module.v
| ivy/ivy_tests/test_ivy/test_stateful/test_modules.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_stateful/test_modules.py",
"repo_id": "ivy",
"token_count": 9779
} | 60 |
import os
import sys
import subprocess
from pydriller import Repository
from tqdm import tqdm
import bz2
import _pickle as cPickle
def main():
BACKENDS = ["numpy", "jax", "tensorflow", "torch"]
N = 4
run_iter = int(sys.argv[1]) - 1
test_names = []
func_folder = "ivy_tests/array_api_testing/array_api_methods_to_test"
func_fnames = os.listdir(func_folder)
func_fnames.sort()
framework_tests_to_run = {
"jax": [],
"numpy": [],
"torch": [],
"tensorflow": [],
}
# add from each filepath
for fname in func_fnames:
fpath = os.path.join(func_folder, fname)
with open(fpath, "r") as file:
contents = file.read()
contents = [line.replace("__", "") for line in contents.split("\n")]
for framework in framework_tests_to_run:
tests_to_run = []
for s in contents:
if s == "":
continue
if ("#" not in s) or (
"#" in s
and (framework not in s.lower())
and any(f in s.lower() for f in framework_tests_to_run)
):
submod = f"ivy_tests/array_api_testing/test_array_api/array_api_tests/test_{fname.replace('.txt', '.py')}" # noqa
test_name = (
submod
+ "::test_"
+ (s if ("#" not in s) else s.split("#")[1].split(" ")[0])
)
tests_to_run += [test_name]
framework_tests_to_run[framework] += tests_to_run
for backend, tests in framework_tests_to_run.items():
test_names += [test + "," + backend for test in set(tests)]
# Create a Dictionary of Test Names to Index
tests = {"index_mapping": test_names, "tests_mapping": {}}
for i in range(len(test_names)):
tests["tests_mapping"][test_names[i]] = i
# Create k flag files for each backend:
k_flag = {}
subprocess.run(
["python3", "ivy_tests/array_api_testing/write_array_api_tests_k_flag.py"],
check=True,
)
for backend in BACKENDS:
k_flag_file = f"ivy_tests/array_api_testing/.array_api_tests_k_flag_{backend}"
with open(k_flag_file, "r") as f:
array_api_tests_k_flag = f.read().strip()
if backend == "torch":
array_api_tests_k_flag += " and not (uint16 or uint32 or uint64)"
k_flag[backend] = array_api_tests_k_flag
directories = (
[x[0] for x in os.walk("ivy")]
+ [x[0] for x in os.walk("ivy_tests/array_api_testing")]
+ ["ivy_tests"]
)
directories_filtered = [
x for x in directories if not (x.endswith("__pycache__") or "hypothesis" in x)
]
directories = set(directories_filtered)
num_tests = len(test_names)
tests_per_run = num_tests // N
start = run_iter * tests_per_run
end = num_tests if run_iter == N - 1 else (run_iter + 1) * tests_per_run
for test_backend in tqdm(test_names[start:end]):
test_name, backend = test_backend.split(",")
command = (
f"docker run --rm --env IVY_BACKEND={backend} --env "
'ARRAY_API_TESTS_MODULE="ivy" -v "$(pwd)":/ivy unifyai/ivy:latest '
'timeout 30m /bin/bash -c "coverage run --source=ivy,ivy_tests -m pytest '
f'{test_name} -k \\"{k_flag[backend]}\\" --disable-warnings --tb=short '
"--hypothesis-max-examples 5 -vv > coverage_output;coverage annotate > "
'coverage_output"'
)
os.system(command)
for directory in directories:
for file_name in os.listdir(directory):
if file_name.endswith("cover"):
file_name = directory + "/" + file_name
if file_name not in tests:
tests[file_name] = []
with open(file_name) as f:
for line in f:
tests[file_name].append(set())
with open(file_name) as f:
i = 0
for line in f:
if line[0] == ">":
tests[file_name][i].add(
tests["tests_mapping"][test_backend]
)
i += 1
os.system("find . -name \\*cover -type f -delete")
commit_hash = ""
for commit in Repository(".", order="reverse").traverse_commits():
commit_hash = commit.hash
break
tests["commit"] = commit_hash
with bz2.BZ2File("tests.pbz2", "w") as f:
cPickle.dump(tests, f)
if __name__ == "__main__":
main()
| ivy/scripts/determine_tests/array_api_det_coverage.py/0 | {
"file_path": "ivy/scripts/determine_tests/array_api_det_coverage.py",
"repo_id": "ivy",
"token_count": 2549
} | 61 |
# Run Tests
import os
import sys
if __name__ == "__main__":
failed = False
with open(sys.argv[1], "w") as f_write:
with open("tests_to_run", "r") as f:
for line in f:
test_path, backend = line.strip().split(",")
print(f"\n{'*' * 100}")
print(f"{line[:-1]}")
print(f"{'*' * 100}\n")
sys.stdout.flush()
ret = os.system(
f'docker run --rm -v "$(pwd)":/ivy -v "$(pwd)"/.hypothesis:/.hypothesis unifyai/ivy:latest python3 -m pytest --tb=short {test_path} --skip-trace-testing --backend {backend}' # noqa
)
if ret != 0:
failed = True
f_write.write(line)
if failed:
sys.exit(1)
| ivy/scripts/run_tests/run_tests_pr.py/0 | {
"file_path": "ivy/scripts/run_tests/run_tests_pr.py",
"repo_id": "ivy",
"token_count": 457
} | 62 |
#!/bin/bash -e
docker run --rm -it -v "$(pwd)":/ivy unifyai/ivy:latest python3 -m pytest ivy_tests/
| ivy/scripts/shell/run_tests.sh/0 | {
"file_path": "ivy/scripts/shell/run_tests.sh",
"repo_id": "ivy",
"token_count": 45
} | 63 |
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
// https://github.com/microsoft/vscode-dev-containers/tree/v0.236.0/containers/docker-existing-dockerfile
{
"name": "Ivy Development Environment (build)",
"build": {
"dockerfile": "../../docker/Dockerfile",
"context": "../..",
"args": {
"pycon": ["3.10"]
}
},
"customizations": {
"vscode": {
"extensions": [
"ms-python.python"
],
"settings": {
"python.defaultInterpreterPath": "/opt/miniconda/envs/multienv/bin/python3"
}
}
},
"postCreateCommand": {
"post_create": "bash .devcontainer/post_create_commands.sh",
"bashrc": "echo \"alias python=python3\" >> ~/.bashrc"
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment the next line to run commands after the container is created - for example installing curl.
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
// "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
// "remoteUser": "vscode",
"features": {
"ghcr.io/devcontainers/features/common-utils:2": {
"installZsh": true,
"configureZshAsDefaultShell": true,
"installOhMyZsh": true,
"upgradePackages": false
},
"ghcr.io/devcontainers/features/docker-outside-of-docker:1": {
"moby": true,
"installDockerBuildx": true,
"version": "20.10",
"dockerDashComposeVersion": "v2"
},
"ghcr.io/devcontainers/features/github-cli:1": {
"installDirectlyFromGitHubRelease": true,
"version": "latest"
}
}
}
| ivy/.devcontainer/build/devcontainer.json/0 | {
"file_path": "ivy/.devcontainer/build/devcontainer.json",
"repo_id": "ivy",
"token_count": 765
} | 0 |
<component name="ProjectRunConfigurationManager">
<configuration default="true" type="tests" factoryName="py.test">
<module name="ivy" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
<env name="PYTHONPATH" value="/opt/fw/numpy:/opt/fw/jax:/opt/fw/tensorflow:/opt/fw/torch:/opt/fw/paddle:/opt/fw/mxnet" />
</envs>
<option name="SDK_HOME" value="python" />
<option name="SDK_NAME" value="Remote Python 3.10.0 Docker (unifyai/ivy:latest)" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="DockerContainerSettingsRunConfigurationExtension">
<option name="envVars">
<list />
</option>
<option name="extraHosts">
<list />
</option>
<option name="links">
<list />
</option>
<option name="networkDisabled" value="false" />
<option name="networkMode" value="bridge" />
<option name="portBindings">
<list />
</option>
<option name="publishAllPorts" value="false" />
<option name="runCliOptions" value="--entrypoint= --rm" />
<option name="version" value="2" />
<option name="volumeBindings">
<list>
<DockerVolumeBindingImpl>
<option name="containerPath" value="/opt/project" />
<option name="editable" value="true" />
<option name="hostPath" value="$PROJECT_DIR$" />
<option name="readOnly" value="false" />
</DockerVolumeBindingImpl>
</list>
</option>
</EXTENSION>
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="_new_keywords" value="""" />
<option name="_new_parameters" value="""" />
<option name="_new_additionalArguments" value="""" />
<option name="_new_target" value="""" />
<option name="_new_targetType" value=""PYTHON"" />
<method v="2" />
</configuration>
</component>
| ivy/.idea/runConfigurations/_template__of_py_test.xml/0 | {
"file_path": "ivy/.idea/runConfigurations/_template__of_py_test.xml",
"repo_id": "ivy",
"token_count": 937
} | 1 |
FROM debian:buster
WORKDIR /ivy
ARG CLI
# python version for conda
ARG pycon=3.10
ENV DEBIAN_FRONTEND=noninteractive
# Install miniconda
ENV CONDA_DIR /opt/miniconda/
RUN apt clean && \
rm -rf /var/lib/apt/lists/* && \
apt-get update && \
apt-get install -y wget \
git -y && \
wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
/bin/bash ~/miniconda.sh -b -p /opt/miniconda
ENV PATH=$CONDA_DIR/bin:$PATH
RUN conda create --name multienv python==$pycon
# to fix protobuf conflicts
ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION python
ENV PATH=/opt/miniconda/envs/multienv/bin:$PATH
RUN apt-get update && \
apt-get install -y python3-pip python3-tk && \
apt-get install -y libsm6 libxext6 libxrender-dev libgl1-mesa-glx && \
apt-get install -y python-opengl && \
apt-get install -y git && \
apt-get install -y rsync && \
apt-get install -y libusb-1.0-0 && \
apt-get install -y libglib2.0-0 && \
apt-get install -y jq && \
pip3 install --upgrade pip && \
pip3 install pip-autoremove &&\
pip3 install setuptools==58.5.3
# Install Ivy Upstream
RUN git clone --progress --recurse-submodules https://github.com/unifyai/ivy --depth 1 && \
cd ivy && \
cd ivy_tests/array_api_testing/test_array_api && \
pip3 install --no-cache-dir -r requirements.txt
# Install local optional
COPY requirements/optional.txt .
COPY requirements/requirements.txt .
#setting torch path early on because torch-scatter needs it
ENV PYTHONPATH "/opt/fw/torch:/opt/miniconda/envs/multienv/bin"
#torch and torch scatter separate installation because they cause issues
RUN pip3 install --no-cache-dir torch --target '/opt/fw/torch' --extra-index-url https://download.pytorch.org/whl/cpu
RUN export ver=$(pip show torch --path '/opt/fw/torch' | grep Version | cut -d ' ' -f2) && \
pip3 install --target '/opt/fw/torch' --no-cache-dir --upgrade torch-scatter -f https://data.pyg.org/whl/torch-$ver.html
# requirement mappings directs which dependency to be installed and where
COPY /docker/requirement_mappings.json .
SHELL ["/bin/bash", "-c"]
# installing requirements based on mappings in location /opt/fw/$framework
RUN jq -r 'to_entries[] | select(.value != [""]) | .key as $dir | .value[] | @sh "/opt/fw/\($dir) \(.)"' requirement_mappings.json | xargs -I {} sh -c 'printf "Installing %s\n" $2 && pip install --ignore-installed --target $1 $2 --extra-index-url https://download.pytorch.org/whl/cpu --no-cache-dir' sh {}
# install the requirements.txt, optional.txt with the mapped dependencies filtered out
RUN pip install --upgrade -r requirements.txt &&\
cp ./optional.txt tmp.txt &&\
jq -r 'to_entries[] | [.key] + .value | select(length > 0 or (. == "")) | .[]' requirement_mappings.json | sort -u | xargs -I {} sed -i '/{}/d;/torch/d;/torch-scatter/d;/jax\[.*\]/d' tmp.txt && pip install -r tmp.txt
# add all the directories to environment path so that python knows where to find them
ENV PYTHONPATH "/opt/fw/mxnet:/opt/fw/numpy:/opt/fw/tensorflow:/opt/fw/jax:/opt/fw/torch:/opt/fw/paddle:/opt/miniconda/envs/multienv/bin"
COPY scripts/test_dependencies.py .
RUN python3 test_dependencies.py -fp requirements.txt,optional.txt && \
rm -rf requirements.txt && \
rm -rf optional.txt && \
rm -rf tmp.txt && \
rm -rf test_dependencies.py
| ivy/docker/Dockerfile/0 | {
"file_path": "ivy/docker/Dockerfile",
"repo_id": "ivy",
"token_count": 1282
} | 2 |
{% extends "top_level_module.rst" %}
{%- block options -%}
{{super()}} :private-members:
{%- endblock -%}
.. Experimental modules are added here
{% block custom_content %}
{% for submodule in modules %}
.. automodule:: {{submodule}}
:members:
:special-members: __init__
:undoc-members:
:private-members:
{% endfor %}
{% endblock %}
| ivy/docs/_templates/data_module.rst/0 | {
"file_path": "ivy/docs/_templates/data_module.rst",
"repo_id": "ivy",
"token_count": 139
} | 3 |
The Basics
==========
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`todo list issues thread`: https://discord.com/channels/799879767196958751/1189903501011202128
.. _`Atlassian tutorial`: https://www.atlassian.com/git/tutorials/saving-changes/git-stash
.. _`fork management thread`: https://discord.com/channels/799879767196958751/1189903708465672272
.. _`pull requests channel`: https://discord.com/channels/799879767196958751/982728733859414056
.. _`PyCharm blog`: https://www.jetbrains.com/help/pycharm/finding-and-replacing-text-in-file.html
.. _`Debugging`: https://www.jetbrains.com/help/pycharm/debugging-code.html
Getting Help
------------
There are a few different communication channels that you can make use of in order to ask for help:
#. `Discord server <https://discord.gg/sXyFF8tDtm>`_
#. `Issues <https://github.com/unifyai/ivy/issues>`_
We'll quickly outline how each of these should be used, and also which question is most appropriate for which context.
**Discord Server**
The `discord server <https://discord.gg/sXyFF8tDtm>`_ is most suitable for very quick and simple questions.
These questions should **always** be asked in the correct channel.
There is a tendency to use the *general* landing channel for everything.
This isn't the end of the world, but if many unrelated messages come flying into the *general* channel, then it does make it very hard to keep track of the different discussions, and it makes it less likely that you will receive a response.
For example, if you are applying for an internship, then you should make use of the **internship** channels, and **not** the general channel for your questions.
**Issues**
As the name suggests, the `issues <https://github.com/unifyai/ivy/issues>`_ section on GitHub is the best place to raise issues or general bugs that you find with the project.
It can also serve as a useful place to ask questions, but only if you suspect that the behaviour you are observing *might* be a bug.
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/T5vQP1pCXS8" class="video" allowfullscreen="true">
</iframe>
|
ToDo List Issues
----------------
We make extensive use of `ToDo list issues <https://github.com/unifyai/ivy/issues?q=is%3Aopen+is%3Aissue+label%3AToDo>`_, which act as placeholders for tracking many related sub-tasks in a ToDo list.
We have a clear process for contributors to engage with such ToDo lists:
a. Find a task to work on which (i) is not marked as completed with a tick (ii) does not have an issue created and (iii) is not mentioned in the comments. Currently, there are three open tasks: :ref:`overview/contributing/open_tasks:Fixing Failing Tests`, :ref:`overview/contributing/open_tasks:Function Formatting`, :ref:`overview/contributing/open_tasks:Frontend APIs` and :ref:`overview/contributing/open_tasks:Ivy Experimental API`.
b. Create a new issue with the title being just the name of the sub-task you would like to work on.
c. Comment on the ToDo list issue with a reference to your new issue like so:
:code:`- [ ] #Issue_number`
For example, if your issue number is 12345, then the text of your comment should be :code:`- [ ] #12345`. You could also use just the issue number (:code:`#12345`), or a link to the issue itself (:code:`https://github.com/unifyai/ivy/issues/12345`).
At some point after your comment is made, your issue will automatically be added to the ToDo list and the comment will be deleted.
No need to wait for this to happen before progressing to the next stage. Don’t comment anything else on these ToDo issues, which should be kept clean with comments only as described above.
d. Start working on the task, and open a PR as soon as you have a full or partial solution, when you open the PR make sure to follow the `conventional commits format <https://www.conventionalcommits.org/en/v1.0.0/>`_, and then directly reference the issue in the pull request by adding the following content to the description of the PR:
:code:`Close #Issue_number`
This is important, so that the merging of your PR will automatically close the associated issue. Make sure this is in the
description of the PR, otherwise it might not link correctly. If you have a partial solution, the Ivy team can help to guide you through the process of getting it working 🙂
Also, remember to make the PR name well described and if there are some details that can support your changes add them to the description of the PR.
e. Wait for us to review your PR.
Once we have reviewed your PR we will either merge or request changes.
Every time you respond to our requested changes you must re-request a review in order for us to re-engage with the PR.
f. Once the PR is in good shape, we will merge into main, and then you become an Ivy contributor!
In order to keep our ToDo lists moving quickly, if your PR is not created within 7 days of creating the issue, then a warning message will appear on the issue.
If another 7 days pass without any changes, the issue will be closed and the task will be made free for others in the community.
Likewise, if we have requested changes on your PR, and you do not respond and request a new code review within 7 days, then a warning message will appear on the PR.
If another 7 days pass without any changes, then the PR and the associated issue will be closed, and the task will be freed for others in the community.
Even if you do not make code changes, you should request a new code review to flag to us that our attention is again needed to further the discussion.
The purpose of this is to ensure our ToDo lists remain accessible for all in the community to engage with, where priority is given to those who can engage on a more short-term basis.
We want to avoid the situation where tasks are allocated but then are not acted upon for long periods of time, while preventing others in the community from working on these instead.
Starting an issue and then being unable to complete it is not a problem from our side at all, we automatically close these just so we can keep our community engaged with these tasks 🙂
Our automatic closing is obviously never a reflection on the quality of the PR or the developer who made it, or any reflection of hypothetical frustration we have for more delayed response times etc.
Developers are of course very busy people, and sometimes there is not as much free time available as initially thought.
That's totally fine.
Please don't take it personally if your issue or PR gets closed because of this 7-day inactivity time limit.
Reach out to me on discord if at any point you believe this happened to you unfairly, and we will definitely investigate!
Finally, we limit the maximum number of *open* and *incomplete* sub-task issues to *three* per person.
This is to prevent anyone from self-allocating many sub-tasks, preventing others in the community from engaging, and then not being able to complete them.
Even though the limit is three, sub-tasks should only be self-assigned using **one comment per sub-task**.
For example, a sequence of comments like this :code:`- [ ] #Issue_number` will register correctly whereas a single comment like this :code:`- [ ] #Issue_number, - [ ] #Issue_number, - [ ] #Issue_number` or this :code:`- [ ] #Issue_number #Issue_number #Issue_number` etc. will not.
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/wBKTOGmwfbo" class="video" allowfullscreen="true">
</iframe>
|
For questions, please reach out on `discord`_ in the `todo list issues thread`_!
Managing Your Fork
------------------
When contributing to Ivy, the first step is to create a fork of the repository.
Then, it's best practice to create a separate branch for each new pull request (PR) you create.
This can be done using:
.. code-block:: bash
git checkout -b name_of_your_branch
The main branch then simply has the role of being kept up to date with upstream.
You *can* create PRs based on the main branch of your fork, but this will make things more complicated if you would then like to create additional PRs in the future.
For keeping any branch on your fork up to date, there is a script in the root folder of the repo `scripts/shell/merge_with_upstream.sh <https://github.com/unifyai/ivy/blob/bcddc79978afe447958dfa3ea660716845c85846/scripts/shell/merge_with_upstream.sh>`_.
To update your fork's branch to the upstream main branch, simply run :code:`./scripts/shell/merge_with_upstream.sh name_of_your_branch`.
To update the main branch, this would then be: :code:`./scripts/shell/merge_with_upstream.sh main`.
When making a PR (explained in the next sub-section), sometimes you will see that changes to upstream have caused conflicts with your PR.
In this case, you will need to either resolve these conflicts in the browser, or clone your fork and make changes locally in the terminal and push once resolved.
Both of these cases are explained in the following video.
You may find that once you have made changes locally and try pulling from main, the pull request is aborted as there are merge conflicts.
In order to avoid tedious merge conflict resolution, you can try 'stashing' your local changes, then pulling from main.
Once your branch is up-to-date with main, you can reinstate the most recently stashed changes, commit and push to main with no conflicts.
The corresponding commands are :code:`git stash` -> :code:`git fetch` -> :code:`git pull` -> :code:`git stash apply stash@{0}`.
Note that this only works for uncommitted changes (staged and unstaged) and untracked files won't be stashed.
For a comprehensive explanation of git stashing, check out this `Atlassian tutorial`_.
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/TFMPihytg9U" class="video" allowfullscreen="true">
</iframe>
|
For questions, please reach out on `discord`_ in the `fork management thread`_!
Who To Ask
----------
When raising issues on the Ivy repo, it can be useful to know who in the team wrote which piece of code.
Armed with this information, you can then for example directly tag (using @) the member of the team who worked on a particular piece of code, which you are trying to understand, or you would like to ask questions about.
Here we describe a workflow to help navigate this question of "who to ask".
With Command Line:
******************
**git blame** - Show what revision and author last modified each line of a file
**git log** - Show commit logs
.. code-block:: none
# Eg: From line 16 to next 5 lines since past 2 weeks
git blame --since=2.weeks -L 16,+5 <filepath> | grep -v "^\^"
# Deeper look at what each author changed in files retrieved from the above step
git log <commit_id> -p
With Browser:
*************
**Git Blame View** is a handy tool to view the line-by-line revision history for an entire file, or view the revision history of a single line within a file.
.. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/contributing/the_basics/git_blame/git_blame_1.png?raw=true
:width: 420
This view can be toggled from the option in left vertical pane, or from the "blame" icon in top-right, as highlighted above.
.. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/contributing/the_basics/git_blame/git_blame_2.png?raw=true
:width: 420
Each time you click the highlighted icon, the previous revision information for that line is shown, including who committed the change and when this happened.
.. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/contributing/the_basics/git_blame/git_blame_3.png?raw=true
:width: 420
Whenever starting a discussion or creating an issue, you are very welcome to tag members of the Ivy team using "@", selecting the person you think would be most suitable to interact with, based on the information gained from the above steps.
Pull Requests
-------------
Our process for responding to pull requests is quite simple.
All newly created PRs will be reviewed by a member of the team, and then the PR will either be merged or changes will be requested.
In order for us to look at the changes you have made, you will then need to request a code review once you have addressed our requested changes.
We will then take another look, and either merge the PR or request further changes.
This process then will repeat until either the PR is closed by us or yourself, or the PR is merged.
If we request changes, you make those changes, but you do not request a code review, then we will likely not check the changes.
This is the case even if you comment on the PR.
This simple process makes it much simpler for us to track where and when our attention is needed.
Note that you cannot request a code review until you have already received at least one review from us.
Therefore, all new PRs will receive a code review, so please just wait and we will check out and review your newly created PR as soon as possible!
Your PR will never be closed until we have provided at least a code review on it.
After a new PR is made, for the tests to run, it needs an approval of someone from the ivy team for the workflows to start running.
Once approved, you can see the failing and passing checks for a commit relevant to your PR by clicking on the ❌ or ✔️ or 🟤 (each for: one or more tests are failing, all tests are passing, the check has just started, respectively) icon next to the commit hash.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/pull_requests/PR_checks.png?raw=true
:width: 420
Further, if you click on the details next to a check then you can see the logs for that particular test.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/pull_requests/pr_logs.png?raw=true
:width: 420
Also, if you have pushed multiple commits to a PR in a relatively short time, you may want to cancel the checks for a previous commit to speedup the process, you can do that by going to the log page as described above and clicking on the `Cancel Workflow` button.
Note that this option might be unavailable depending on the level of access that you have.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/pull_requests/cancel_workflow.png?raw=true
:width: 420
Finally, all PRs must give write access to Ivy maintainers of the branch.
This can be done by checking a tickbox in the lower right corner of the PR.
This will enable us to quickly fix conflicts, merge with upstream, and get things moving much more quickly without us needing to request very simple fixes from yourself.
The code review process is explained in more detail in the following video.
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/9G4d-CvlT2g" class="video" allowfullscreen="true">
</iframe>
|
For questions, please reach out on `discord`_ in the `pull requests thread`_!
Small Commits Often
-------------------
Sometimes, you might want to try any make substantial improvements that span many files, with the intention of then creating one very large PR at the end in order to merge all of your changes.
While this is generally an acceptable approach when working on software projects, we discourage this approach for contributions to Ivy.
We adopt a philosophy where small, incremental, frequent commits are **much** more valuable to us and the entire Ivy developer community, than infrequent large commits.
This is for a few reasons:
#. It keeps everyone up to date and on the same page as early as possible.
#. It avoids the case where multiple people waste time fixing the same problem.
#. It enables others to spot mistakes or conflicts in proposals much earlier.
#. It means you avoid the mountain of conflicts to resolve when you do get around to merging.
This is also why we advocate using individual pull-requests per issue in the ToDo list issues.
This keeps each of the commits on main very contained and incremental, which is the style we're going for.
Sometimes, you've already dived very deep into some substantial changes in your fork, and it might be that only some of the problems you were trying to fix are actually fixed by your local changes.
In this hypothetical situation, you should aim to get the working parts merged into main **as soon as possible**.
Adding subsections of your local changes with :code:`git` is easy.
You can add individual files using:
.. code-block:: none
git add filepath
You can also enter an interactive session for adding individual lines of code:
.. code-block:: none
git add -p filepath # choose lines to add from the file
get add -p # choose lines to add from all changes
When in the interactive session, you can split code blocks into smaller code blocks using :code:`s`.
You can also manually edit the exact lines added if further splitting is not possible, using :code:`e`.
Check the `git documentation <https://git-scm.com/doc>`_ for more details.
As a final note, a beautiful commit history is not something we particularly care about.
We're much more concerned that the code itself is good, that things are updated as quickly as possible, and that all developers are able to work efficiently.
If a mistake is committed into the history, it's generally not too difficult to simply undo this in future commits, so don't stress about this too much 🙂
For questions, please reach out on the on `discord`_ in the `commit frequency thread`_!
Interactive Ivy Docker Container
--------------------------------
The advantage of Docker interactive mode is that it allows us to execute commands at the time of running the container.
It's quite a nifty tool which can be used to reassure that the functions are working as expected in an isolated environment.
An interactive bash shell in ivy's docker container can be created by using the following command,
.. code-block:: none
docker run --rm -it unifyai/ivy bash
The project structure and file-system can be explored.
This can be very useful when you want to test out the bash scripts in ivy, run the tests from the command line etc,.
In fact, if you only want to quickly test things in an interactive python shell run the following command,
.. code-block:: none
docker run --rm -it unifyai/ivy python3
In both cases, the ivy version at the time when the container was built will be used.
If you want to try out your local version of ivy, with all of the local changes you have made, you should add the following mount:
.. code-block:: none
docker run --rm -it -v /local_path_to_ivy/ivy/ivy:/ivy/ivy unifyai/ivy bash
* This will overwrite the *ivy* subfolder inside the ivy repo in the container with the *ivy* subfolder inside your local ivy repo.
* Ivy is installed system-wide inside the container via the command :code:`python3 setup.py develop --no-deps`
* The :code:`develop` command means that the system-wide installation will still depend on the original source files, rather than creating a fresh copy.
* Therefore, ivy can be imported into an interactive python shell from any directory inside the container, and it will still use the latest updates made to the source code.
Clearly, running a container in interactive mode can be a helpful tool in a developer’s arsenal.
Running Tests Locally
---------------------
With Docker
***********
#. With PyCharm (With or without docker):
1. PyCharm enables users to run pytest using the green button present near every function declaration inside the :code:`ivy_tests` folder.
.. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/contributing/the_basics/pytest_with_pycharm/pytest_button_pycharm.png?raw=true
:width: 420
2. Testing can be done for the entire project, individual submodules, individual files, and individual tests.
This can be done by selecting the appropriate configuration from the top pane in PyCharm.
.. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/contributing/the_basics/pytest_with_pycharm/pytest_with_pycharm.png?raw=true
:width: 420
#. Through the command line (With docker):
1. We need to replace the folder inside the container with the current local ivy directory to run tests on the current local code.
.. code-block:: none
docker exec <container-name> rm -rf ivy
docker cp ivy <container-name>:/
2. We need to then enter inside the docker container and change into the :code:`ivy` directory using the following command.
.. code-block:: none
docker exec -it ivy_container bash
cd ivy
3. Run the test using the pytest command.
1. Ivy Tests:
1. For a single function:
.. code-block:: none
pytest ivy_tests/test_ivy/test_functional/test_core/test_image.py::test_random_crop --no-header --no-summary -q
2. For a single file:
.. code-block:: none
pytest ivy_tests/test_ivy/test_functional/test_core/test_image.py --no-header --no-summary -q
3. For all tests:
.. code-block:: none
pytest ivy_tests/test_ivy/ --no-header --no-summary -q
2. Array API Tests:
1. For a single function:
.. code-block:: none
pytest ivy_tests/array_api_testing/test_array_api/array_api_tests/test_creation_functions.py::test_arange --no-header --no-summary -q
2. For a single file:
.. code-block:: none
pytest ivy_tests/array_api_testing/test_array_api/array_api_tests/test_creation_functions.py --no-header --no-summary -q
3. For all tests:
.. code-block:: none
pytest ivy_tests/array_api_testing/test_array_api/ --no-header --no-summary -q
3. For the entire project:
.. code-block:: none
pytest ivy_tests/ --no-header --no-summary -q
#. Through the command line (Without docker):
1. We need to first enter inside the virtual environment.
.. code-block:: none
ivy_dev\Scripts\activate.bat
(on Windows)
OR
.. code-block:: none
source ivy_dev/bin/activate
(on Mac/Linux)
2. Run the test using the pytest command.
1. Ivy Tests:
1. For a single function:
.. code-block:: none
python -m pytest ivy_tests/test_ivy/test_functional/test_core/test_image.py::test_random_crop --no-header --no-summary -q
2. For a single file:
.. code-block:: none
python -m pytest ivy_tests/test_ivy/test_functional/test_core/test_image.py --no-header --no-summary -q
3. For all tests:
.. code-block:: none
python -m pytest ivy_tests/test_ivy/ --no-header --no-summary -q
2. Array API Tests
1. For a single function:
.. code-block:: none
python -m pytest ivy_tests/array_api_testing/test_array_api/array_api_tests/test_creation_functions.py::test_arange --no-header --no-summary -q
2. For a single file:
.. code-block:: none
python -m pytest ivy_tests/array_api_testing/test_array_api/array_api_tests/test_creation_functions.py --no-header --no-summary -q
3. For all tests:
.. code-block:: none
python -m pytest ivy_tests/array_api_testing/test_array_api/ --no-header --no-summary -q
3. For the entire project
.. code-block:: none
python -m pytest ivy_tests/ --no-header --no-summary -q
#. Optional Flags: Various optional flags are available for running the tests such as :code:`device`, :code:`backend`, etc.
1. :code:`device`:
1. This flag enables the setting of the device where the tests would be run.
2. Possible values being :code:`cpu` and :code:`gpu`.
3. Default value is :code:`cpu`
2. :code:`backend`:
1. This flag enables running the tests for particular backends.
2. The values of this flag could be any possible combination of JAX, numpy, tensorflow, and torch.
3. Default value is :code:`jax,numpy,tensorflow,torch`.
3. :code:`num-examples`:
1. Set the maximum number of examples to be generated by Hypothesis.
2. The value of this flag could be any positive integer value that is greater than 1.
3. Default value is :code:`5`.
Getting the most out of IDE
---------------------------
with PyCharm
************
#. Find a text:
1. :code:`Ctrl+F` will prompt you to type in the text to be found, if not already selected, and then find all the instances of text within the current file.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/find_file.png?raw=true
:align: center
2. :code:`Ctrl+Shift+F` will find all the instances of text within the project.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/find_project_wide.png?raw=true
:align: center
#. Find+Replace a text:
1. :code:`Ctrl+R` will prompt you to type in the text to be found and the text to be replaced, if not already selected, within the current file.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/find_n_replace_file.png?raw=true
:align: center
2. :code:`Ctrl+Shift+R` will prompt you to type in the text to be found and the text to be replaced, if not already selected, within the whole project.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/find_and_replace_project_wide.png?raw=true
:align: center
#. Find and multiply the cursor:
1. :code:`Ctrl+Shift+Alt+J` will find all the instances of the selected text and multiply the cursor to all these locations.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/multiple_cursor.png?raw=true
:align: center
You can visit `Pycharm Blog`_ for more details on efficient coding!
#. Debugging:
1. add breakpoints:
1. Click the gutter at the executable line of code where you want to set the breakpoint or place the caret at the line and press :code:`Ctrl+F8`
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/adding_breakpoint.png?raw=true
:align: center
2. Enter into the debug mode:
1. Click on Run icon and Select **Debug test** or press :code:`Shift+F9`.
This will open up a Debug Window Toolbar:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/open_in_debug_mode.png?raw=true
:align: center
3. Stepping through the code:
1. Step over:
Steps over the current line of code and takes you to the next line even if the highlighted line has method calls in it.
1. Click the Step Over button or press :code:`F8`
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/step_over.png?raw=true
:align: center
2. Step into:
Steps into the method to show what happens inside it.
Use this option when you are not sure the method is returning a correct result.
Click the Step Into button or press :code:`F7`
1. Smart step into:
Smart step into is helpful when there are several method calls on a line, and you want to be specific about which method to enter.
This feature allows you to select the method call you are interested in.
1. Press :code:`Shift+F7`.
This will prompt you to select the method you want to step into:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/smart_step_into.png?raw=true
:align: center
2. Click the desired method.
4. Python Console:
1. Click the Console option on Debug Tool Window:
This currently stores variables and their values upto which the code has been executed.
You can print outputs and debug the code further on.
2. If you want to open the console at a certain breakpoint:
1. Select the breakpoint-fragment of code, press :code:`Alt+shift+E` Start debugging!
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/console_coding.png?raw=true
:align: center
5. Using **try-except**:
1. PyCharm is great at pointing the lines of code which are causing tests to fail.
Navigating to that line, you can add Try-Except block with breakpoints to get in depth understanding of the errors.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/try_except.png?raw=true
:align: center
6. Dummy **test** file:
1. Create a separate dummy :code:`test.py` file wherein you can evaluate a particular test failure.
Make sure you don't add or commit this dummy file while pushing your changes.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/contributing/the_basics/getting_most_out_of_IDE/dummy_test.png?raw=true
:align: center
PyCharm has a detailed blog on efficient `Debugging`_ which is quite useful.
**Round Up**
This should have hopefully given you a good understanding of the basics for contributing.
If you have any questions, please feel free to reach out on `discord`_ in the `todo list issues thread`_, `fork management thread`_, `pull requests thread`_, depending on the question!
| ivy/docs/overview/contributing/the_basics.rst/0 | {
"file_path": "ivy/docs/overview/contributing/the_basics.rst",
"repo_id": "ivy",
"token_count": 9574
} | 4 |
Function Arguments
==================
.. _`Array API Standard`: https://data-apis.org/array-api/latest/
.. _`spec/API_specification/signatures`: https://github.com/data-apis/array-api/tree/main/spec/2022.12/API_specification
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`function arguments thread`: https://discord.com/channels/799879767196958751/1190247823275470978
.. _`Array API Standard convention`: https://data-apis.org/array-api/2021.12/API_specification/array_object.html#api-specification-array-object--page-root
Here, we explain how the function arguments differ between the placeholder implementation at :mod:`ivy/functional/ivy/category_name.py`, and the backend-specific implementation at :mod:`ivy/functional/backends/backend_name/category_name.py`.
Many of these points are already addressed in the previous sections: `Arrays <arrays.rst>`_, `Data Types <data_types.rst>`_, `Devices <devices.rst>`_ and `Inplace Updates <inplace_updates.rst>`_.
However, we thought it would be convenient to revisit all of these considerations in a single section, dedicated to function arguments.
As for type-hints, all functions in the Ivy API at :mod:`ivy/functional/ivy/category_name.py` should have full and thorough type-hints.
Likewise, all backend implementations at :mod:`ivy/functional/backends/backend_name/category_name.py` should also have full and thorough type-hints.
In order to understand the various requirements for function arguments, it's useful to first look at some examples.
Examples
--------
For the purposes of explanation, we will use four functions as examples: :func:`ivy.tan`, :func:`ivy.roll`, :func:`ivy.add` and :func:`ivy.zeros`.
We present both the Ivy API signature and also a backend-specific signature for each function:
.. code-block:: python
# Ivy
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
def tan(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None
) -> ivy.Array:
# PyTorch
@handle_numpy_arrays_in_specific_backend
def tan(
x: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None
) -> torch.Tensor:
.. code-block:: python
# Ivy
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
def roll(
x: Union[ivy.Array, ivy.NativeArray],
/,
shift: Union[int, Sequence[int]],
*,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
# NumPy
def roll(
x: np.ndarray,
/,
shift: Union[int, Sequence[int]],
*,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
.. code-block:: python
# Ivy
@handle_exceptions
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
def add(
x1: Union[float, ivy.Array, ivy.NativeArray],
x2: Union[float, ivy.Array, ivy.NativeArray],
/,
*,
alpha: Optional[Union[int, float]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
# TensorFlow
def add(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
alpha: Optional[Union[int, float]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
.. code-block:: python
# Ivy
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_native_shapes
@outputs_to_ivy_arrays
@handle_array_function
@infer_dtype
@infer_device
def zeros(
shape: Union[ivy.Shape, ivy.NativeShape],
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None
) -> ivy.Array:
# JAX
def zeros(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device,
out: Optional[JaxArray] = None,
) -> JaxArray:
Positional and Keyword Arguments
--------------------------------
In both signatures, we follow the `Array API Standard convention`_ about positional and keyword arguments.
* Positional parameters must be positional-only parameters.
Positional-only parameters have no externally-usable name.
When a method accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
This is indicated with an :code:`/` after all the position-only arguments.
* Optional parameters must be keyword-only arguments.
A :code:`*` must be added before any of the keyword-only arguments.
Nearly all the functions in the `Array API Standard convention`_ have strictly positional-only and keyword-only arguments, with an exception of few :code:`creation` functions such as :code:`ones(shape, *, dtype=None, device=None)` , :code:`linspace(start, stop, /, num, *, dtype=None, device=None, endpoint=True)` etc.
The rationale behind this is purely a convention.
The :code:`shape` argument is often passed as a keyword, while the :code:`num` argument in :code:`linspace` is often passed as a keyword for improved understandability of the code.
Therefore, given that Ivy fully adheres to the Array API Standard, Ivy also adopts these same exceptions to the general rule for the :code:`shape` and :code:`num` arguments in these functions.
Input Arrays
------------
In each example, we can see that the input arrays have type :code:`Union[ivy.Array, ivy.NativeArray]` whereas the output arrays have type :class:`ivy.Array`.
This is the case for all functions in the Ivy API.
We always return an :class:`ivy.Array` instance to ensure that any subsequent Ivy code is fully framework-agnostic, with all operators performed on the returned array now handled by the special methods of the :class:`ivy.Array` class, and not the special methods of the backend array class (:class:`ivy.NativeArray`).
For example, calling any of (:code:`+`, :code:`-`, :code:`*`, :code:`/` etc.) on the array will result in (:code:`__add__`, :code:`__sub__`, :code:`__mul__`, :code:`__div__` etc.) being called on the array class.
:class:`ivy.NativeArray` instances are also not permitted for the :code:`out` argument, which is used in many functions.
This is because the :code:`out` argument dictates the array to which the result should be written, and so it effectively serves the same purpose as the function return when no :code:`out` argument is specified.
This is all explained in more detail in the `Arrays <arrays.rst>`_ section.
out Argument
------------
The :code:`out` argument should always be provided as a keyword-only argument, and it should be added to all functions in the Ivy API and backend API which support inplace updates, with a default value of :code:`None` in all cases.
The :code:`out` argument is explained in more detail in the `Inplace Updates <inplace_updates.rst>`_ section.
dtype and device arguments
--------------------------
In the Ivy API at :mod:`ivy/functional/ivy/category_name.py`, the :code:`dtype` and :code:`device` arguments should both always be provided as keyword-only arguments, with a default value of :code:`None`.
In contrast, these arguments should both be added as required arguments in the backend implementation at :mod:`ivy/functional/backends/backend_name/category_name.py`.
In a nutshell, by the time the backend implementation is entered, the correct :code:`dtype` and :code:`device` to use have both already been correctly handled by code which is wrapped around the backend implementation.
This is further explained in the `Data Types <data_types.rst>`_ and `Devices <devices.rst>`_ sections respectively.
Numbers in Operator Functions
-----------------------------
All operator functions (which have a corresponding such as :code:`+`, :code:`-`, :code:`*`, :code:`/`) must also be fully compatible with numbers (float or :code:`int`) passed into any of the array inputs, even in the absence of any arrays.
For example, :code:`ivy.add(1, 2)`, :code:`ivy.add(1.5, 2)` and :code:`ivy.add(1.5, ivy.array([2]))` should all run without error.
Therefore, the type hints for :func:`ivy.add` include float as one of the types in the :code:`Union` for the array inputs, and also as one of the types in the :code:`Union` for the output.
`PEP 484 Type Hints <https://peps.python.org/pep-0484/#the-numeric-tower>`_ states that "when an argument is annotated as having type float, an argument of type int is acceptable".
Therefore, we only include float in the type hints.
Integer Sequences
-----------------
For sequences of integers, generally the `Array API Standard`_ dictates that these should be of type :code:`Tuple[int]`, and not :code:`List[int]`.
However, in order to make Ivy code less brittle, we accept arbitrary integer sequences :code:`Sequence[int]` for such arguments (which includes :code:`list`, :code:`tuple` etc.).
This does not break the standard, as the standard is only intended to define a subset of required behaviour.
The standard can be freely extended, as we are doing here.
Good examples of this are the :code:`axis` argument of :func:`ivy.roll` and the :code:`shape` argument of :func:`ivy.zeros`, as shown above.
Nestable Functions
------------------
Most functions in the Ivy API can also consume and return :class:`ivy.Container` instances in place of the **any** of the function arguments.
If an :class:`ivy.Container` is passed, then the function is mapped across all of the leaves of this container.
Because of this feature, we refer to these functions as *nestable* functions.
However, because so many functions in the Ivy API are indeed *nestable* functions, and because this flexibility applies to **every** argument in the function, every type hint for these functions should technically be extended like so: :code:`Union[original_type, ivy.Container]`.
However, this would be very cumbersome, and would only serve to hinder the readability of the docs.
Therefore, we simply omit these :class:`ivy.Container` type hints from *nestable* functions, and instead mention in the docstring whether the function is *nestable* or not.
**Round Up**
These examples should hopefully give you a good understanding of what is required when adding function arguments.
If you have any questions, please feel free to reach out on `discord`_ in the `function arguments thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/5cAbryXza18" class="video">
</iframe>
| ivy/docs/overview/deep_dive/function_arguments.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/function_arguments.rst",
"repo_id": "ivy",
"token_count": 3598
} | 5 |
Ivy Container
=============
Here, we explain how the :class:`ivy.Container` class saves you a ton of time and cleans up code in almost all aspects of your ML workflow.
So without further ado, let’s dive in!
Firstly, Dictionaries are an incredibly powerful and useful data type in Python.
They enable a clean, readable, and efficient-access (via hashing) storage of arbitrarily hierarchical data.
The :class:`ivy.Container` class can be seen as a souped-up Dict, with many useful features built on top.
It’s the backbone of most high level operations in Ivy.
Let’s walk through some of the most important features of the :class:`ivy.Container`!
Construction
------------
A container can be constructed in a number of ways.
All construction approaches below result in identical :class:`ivy.Container` instances.
.. code-block:: python
import ivy
dct = {'a': ivy.array([0.]),
'b': {'c': ivy.array([1.]),
'd': ivy.array([2.])}}
# via dict
cnt = ivy.Container(dct)
# via keyword
cnt = ivy.Container(a=ivy.array([0.]),
b=ivy.Container(c=ivy.array([1.]),
d=ivy.array([2.])))
# combos
cnt = ivy.Container(a=ivy.array([0.]),
b={'c': ivy.array([1.]),
'd': ivy.array([2.])})
cnt = ivy.Container({'a': ivy.array([0.]),
'b': ivy.Container(c=ivy.array([1.]),
d=ivy.array([2.]))})
Representation
--------------
:class:`ivy.Container` prints the hierarchical structure to the terminal in a very intuitive manner, much more so than native Python Dicts.
.. code-block:: python
print(dct)
{'a': ivy.array([0.]), 'b': {'c': ivy.array([1.]), 'd': ivy.array([2.])}}
print(cnt)
{
a: ivy.array([0.]),
b: {
c: ivy.array([1.]),
d: ivy.array([2.])
}
}
If the container holds very large arrays, then their shapes are printed instead.
Again, this does not happen with native Python Dicts.
.. code-block:: python
dct = {'a': ivy.ones((1000, 3)),
'b': {'c': ivy.zeros((3, 1000)),
'd': ivy.ones((1000, 2))}}
print(dct)
{'a': ivy.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
...,
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]), 'b': {'c': ivy.array([[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]]), 'd': ivy.array([[1., 1.],
[1., 1.],
[1., 1.],
...,
[1., 1.],
[1., 1.],
[1., 1.]])}}
cnt = ivy.Container(dct)
print(cnt)
{
a: (<class ivy.array.array.Array> shape=[1000, 3]),
b: {
c: (<class ivy.array.array.Array> shape=[3, 1000]),
d: (<class ivy.array.array.Array> shape=[1000, 2])
}
}
Recursive Methods
-----------------
All methods in Ivy’s functional API are implemented as recursive methods on the :class:`ivy.Container`.
This means you can easily map a single method to all arrays in the container with a single line.
Starting with the following container:
.. code-block:: python
cnt = ivy.Container({'a': ivy.array([0., 1., 2.]),
'b': {'c': ivy.array([2., 6., 5.]),
'd': ivy.array([10., 5., 2.])}})
We can compute the mean of each sub-array:
.. code-block:: python
print(cnt.mean())
{
a: ivy.array(1.),
b: {
c: ivy.array(4.3333335),
d: ivy.array(5.6666665)
}
}
Or we can flip each sub-array:
.. code-block:: python
print(cnt.flip())
{
a: ivy.array([2., 1., 0.]),
b: {
c: ivy.array([5., 6., 2.]),
d: ivy.array([2., 5., 10.])
}
}
There are about 200 such functions for the :class:`ivy.Container` class in total, check out the `code <https://github.com/unifyai/ivy/tree/main/ivy/data_classes/container>`_ or `docs <../../../docs/data_classes/data_classes/ivy.data_classes.container.rst>`_ to see what they are!
Built-ins
----------
All built-in methods also apply recursively.
For example, performing a gradient update step for a set of network weights can be done in one line.
.. code-block:: python
weights = ivy.Container(
{'linear': {'b': ivy.array([0.2]),
'w': ivy.array([1.5, 2.3, 0.9])}})
grads = ivy.Container(
{'linear': {'b': ivy.array([1.4]),
'w': ivy.array([1.9, 0.6, 2.1])}})
lr = 0.1
new_weights = weights - grads * lr
print(new_weights)
{
linear: {
b: ivy.array([0.06]),
w: ivy.array([1.31, 2.24, 0.69])
}
}
Check out the section below on Ivy’s stateful API to see how the :class:`ivy.Container` is used for storing all network weights in :class:`ivy.Module` instances!
Access
------
The keys in an :class:`ivy.Container` can be set and accessed by using either class attributes or keys in the dictionary.
Both of these setting and accessing approaches are equivalent under the hood.
.. code-block:: python
cnt = ivy.Container({'a': ivy.array([0.])})
cnt['b'] = ivy.array([1.])
cnt.c = ivy.array([2.])
print(cnt)
{
a: ivy.array([0.]),
b: ivy.array([1.]),
c: ivy.array([2.])
}
assert cnt.c is cnt['c']
Nested keys can also be set in one line, using either ‘/’ or ‘.’ as a delimiter.
.. code-block:: python
cnt = ivy.Container({'a': ivy.array([0.])})
cnt['b/c'] = ivy.array([1.])
cnt['d.e.f'] = ivy.array([2.])
print(cnt)
{
a: ivy.array([0.]),
b: {
c: ivy.array([1.])
},
d: {
e: {
f: ivy.array([2.])
}
}
}
One of the key benefits of using properties under the hood is the autocomplete support this introduces.
Class attributes can be auto-completed when pressing the :code:`Tab` midway through typing.
This is not possible with Dicts.
.. code-block:: python
cnt = ivy.Container({'agent': {'total_speed': ivy.array([0.])}})
cnt.agent.total_height = ivy.array([1.])
cnt['agent/total_width'] = ivy.array([2.])
cnt.age -> tab
cnt.agent
cnt.agent.tot -> tab
cnt.agent.total_ -> tab
cnt.agent.total_height cnt.agent.total_speed cnt.agent.total_width
cnt.agent.total_h -> tab
cnt.agent.total_height
ivy.array([1.])
Saving and Loading
------------------
Saving and loading to disk can be done in one of many ways, with each being suited to different data types in the container.
For example, if the container mainly contains arrays (such as the weights of a network), then one of the following can be used.
.. code-block:: python
weights = ivy.Container(
{'linear': {'b': ivy.array([[0.2]]),
'w': ivy.array([[1.5, 2.3, 0.9]])}})
# save and load as hdf5
weights.cont_to_disk_as_hdf5('weights.hdf5')
loaded = ivy.Container.cont_from_disk_as_hdf5('weights.hdf5')
assert ivy.Container.cont_identical(
[loaded, weights], same_arrays=False)
# save and load as pickled
weights.cont_to_disk_as_pickled('weights.pickled')
loaded = ivy.Container.cont_from_disk_as_pickled('weights.pickled')
assert ivy.Container.cont_identical(
[loaded, weights], same_arrays=False)
Alternatively, if the container mainly stored experiment configuration data, then the following can be used.
.. code-block:: python
config = ivy.Container(
{'loading': {'batch_size': 16,
'dir': '/dataset/images'},
'training': {'dropout': True,
'lr': 0.1,
'optim': 'ADAM'}})
# save and load as json
config.cont_to_disk_as_json('config.json')
# config.json contents -------------#
# { #
# "loading": { #
# "batch_size": 16, #
# "dir": "/dataset/images" #
# }, #
# "training": { #
# "dropout": true, #
# "lr": 0.1, #
# "optim": "ADAM" #
# } #
# } #
# ----------------------------------#
loaded = ivy.Container.cont_from_disk_as_json('config.json')
assert (config == loaded).cont_all_true()
Comparisons
-----------
Comparing differences between containers can be achieved on a per-leaf basis.
This is useful for debugging and also comparing configurations between runs.
For example, consider a case where two containers of arrays should be identical at all levels.
We can then very quickly find conflicting leaves.
.. code-block:: python
cnt0 = ivy.Container({'a': ivy.array([0.]),
'b': ivy.array([1.])})
cnt1 = cnt0.cont_deep_copy()
cnt1.b = ivy.array([0.])
print(ivy.Container.cont_diff(cnt0, cnt1))
{
a: ivy.array([0.]),
b: {
diff_0: ivy.array([1.]),
diff_1: ivy.array([0.])
}
}
Or perhaps we saved JSON configuration files to disk for two different experiment runs, and then want to quickly see their differences.
The :meth:`ivy.Container.cont_diff` method will also detect differences in the hierarchical structure and key name differences.
.. code-block:: python
config0 = ivy.Container(
{'batch_size': 8,
'lr': 0.1,
'optim': 'ADAM'})
config1 = ivy.Container(
{'batch_size': 16,
'dropout': 0.5,
'lr': 0.1})
print(ivy.Container.cont_diff(config0, config1))
{
batch_size: {
diff_0: 8,
diff_1: 16
},
dropout: {
diff_1: 0.5
},
lr: 0.1,
optim: {
diff_0: ADAM
}
}
The :meth:`ivy.Container.cont_diff` method can be applied to arbitrarily many containers at once in a single call, not just two as in the examples above.
Customized Representations
--------------------------
Not only does :class:`ivy.Container` print to the terminal in a very intuitive manner, but there are also helper functions to fully control this representation.
This is very helpful when debugging networks with huge numbers of parameters with a deep hierarchical structure for example.
If our networks weights go many levels deep in the nested hierarchy, we might not want to see all of them when printing our container to the screen.
Consider the following nested structure.
.. code-block:: python
weights = ivy.Container(
{'decoder':
{'l0':
{'b': ivy.array([0.]),
'w': ivy.array([[0.]])},
'l1':
{'b': ivy.array([0.]),
'w': ivy.array([[0.]])}},
'encoder':
{'l0':
{'b': ivy.array([0.]),
'w': ivy.array([[0.]])},
'l1':
{'b': ivy.array([0.]),
'w': ivy.array([[0.]])}},
'l0':
{'b': ivy.array([0.]),
'w': ivy.array([[0.]])},
'l1':
{'b': ivy.array([0.]),
'w': ivy.array([[0.]])}})
We can clip the depth of the printed container in order to make the structure of the root keys clearer.
All nested structures below this depth are truncated into single keys with a “__” delimiter appending all keys below this depth.
.. code-block:: python
weights.cont_flatten_key_chains(above_height=1)
{
decoder__l0: {
b: ivy.array([0.]),
w: ivy.array([[0.]])
},
decoder__l1: {
b: ivy.array([0.]),
w: ivy.array([[0.]])
},
encoder__l0: {
b: ivy.array([0.]),
w: ivy.array([[0.]])
},
encoder__l1: {
b: ivy.array([0.]),
w: ivy.array([[0.]])
},
l0: {
b: ivy.array([0.]),
w: ivy.array([[0.]])
},
l1: {
b: ivy.array([0.]),
w: ivy.array([[0.]])
}
}
Likewise, we can clip the height of the printed container in order to make the structure of the leaf keys clearer.
All nested structures above this height are truncated into single keys with a “__” delimiter appending all keys above this height.
.. code-block:: python
weights.cont_flatten_key_chains(below_depth=1)
{
decoder: {
l0__b: ivy.array([0.]),
l0__w: ivy.array([[0.]]),
l1__b: ivy.array([0.]),
l1__w: ivy.array([[0.]])
},
encoder: {
l0__b: ivy.array([0.]),
l0__w: ivy.array([[0.]]),
l1__b: ivy.array([0.]),
l1__w: ivy.array([[0.]])
},
l0: {
b: ivy.array([0.]),
w: ivy.array([[0.]])
},
l1: {
b: ivy.array([0.]),
w: ivy.array([[0.]])
}
}
These are very useful methods when stepping through code and debugging complex nested structures such as the weights of a network.
There are also methods: :code:`cont_with_print_limit` for controlling the printable size of arrays before the shape is instead displayed, :code:`cont_with_key_length_limit` for setting the maximum key length before string clipping, :code:`cont_with_print_indent` for controlling the nested indent, and many more.
Check out the `docs <../../../docs/data_classes/data_classes/ivy.data_classes.container.rst>`_ for more details!
Use Cases
---------
We’ll now just go through a few of the different use cases for the Ivy Container.
The container is not limited to these use cases though, the container is the right choice whenever you are storing nested data!
Compartmentalization
--------------------
The most obvious use case for the :class:`ivy.Container` class is to compartmentalize inputs into a useful structure.
For example, without better foresight, we could untidily implement a function :code:`update_agent` as follows:
.. code-block:: python
def normalize_img(img):
img_max = ivy.reduce_max(img)
img_min = ivy.reduce_min(img)
img_range = img_max - img_min
return (img - img_min) / img_range
def update_agent(agent_position, agent_velocity,
agent_cam_front_rgb, agent_cam_front_depth,
agent_cam_rear_rgb, agent_cam_rear_depth,
agent_cam_lidar):
# update agent state
agent_position += ivy.array([0., 1., 2.])
agent_velocity -= ivy.array([2., 1., 0.])
# normalize images
agent_cam_front_rgb = normalize_img(agent_cam_front_rgb)
agent_cam_front_depth = normalize_img(agent_cam_front_depth)
agent_cam_rear_rgb = normalize_img(agent_cam_rear_rgb)
agent_cam_rear_depth = normalize_img(agent_cam_rear_depth)
agent_cam_lidar = normalize_img(agent_cam_lidar)
# return
return agent_position, agent_velocity, agent_cam_front_rgb,\
agent_cam_front_depth, agent_cam_rear_rgb,\
agent_cam_rear_depth, agent_cam_lidar
Our code will be much cleaner if we do something like the following, particularly if there are many additional similar functions performing operations on the agent and the images:
.. code-block:: python
class Cameras(ivy.Container):
def __init__(self, front_rgb: ivy.Array, front_depth: ivy.Array,
rear_rgb: ivy.Array, rear_depth: ivy.Array,
lidar: ivy.Array):
super().__init__(self,
front={'rgb': front_rgb,
'depth': front_depth},
rear={'rgb': rear_rgb,
'depth': rear_depth},
lidar=lidar)
class Agent(ivy.Container):
def __init__(self, position: ivy.Array,
velocity: ivy.Array, cams: Cameras):
super().__init__(self, position=position,
velocity=velocity, cams=cams)
def update_agent(agent: Agent):
# update agent state
agent.position += ivy.array([0., 1., 2.])
agent.velocity -= ivy.array([2., 1., 0.])
# normalize images
cam_max = agent.cams.reduce_max()
cam_min = agent.cams.reduce_min()
cam_range = cam_max - cam_min
agent.cams = (agent.cams - cam_min) / cam_range
Of course, this argument holds for the use of custom classes or built-in containers (Python list, dict, tuple etc.), and isn’t only relevant for the Ivy container.
However, the recursive methods of the Ivy Container make things even more convenient, such as where we recursively normalize all five images in the final four lines of the :code:`update_agent` method.
Configuration
--------------
As briefly alluded to when explaining the :meth:`ivy.Container.cont_diff` method, the container class is also the ideal data type for storing experiment configurations.
Configurations can either first be stored to disk as a JSON file and then loaded into the :class:`ivy.Container` for recursive comparisons to see differences between experiments, or the config can be specified in the code and then saved to disk as a JSON to keep a permanent log afterwards.
Data loading
------------
The container can also be used for data loading.
Our example uses single threaded loading, but incorporating multiprocessing with Queues is also pretty straightforward.
To start with, let’s assume we have an image Dataset saved to disk with separate images for a front camera and a rear camera for each point in time.
We can then load this Dataset with a configurable batch size like so, and we can easily iterate between each item in the batch.
This is useful if we need to recursively unroll the entire batch in the time dimension for example.
.. code-block:: python
class DataLoader:
def __init__(self, batch_size):
self._cnt = ivy.Container(
dict(imgs={'front': 'images/front/img_{}.png',
'rear': 'images/rear/img_{}.png'}))
self._dataset_size = 8
self._batch_size = batch_size
self._count = 0
def __next__(self):
cnt = self._cnt.cont_copy()
# image filenames
img_fnames = ivy.Container.cont_list_stack(
[cnt.imgs.cont_map(
lambda fname, _: fname.format(self._count + i)
) for i in range(self._batch_size)], 0
)
# load from disk
loaded_imgs = img_fnames.cont_map(
lambda fnames, _: np.concatenate(
[np.expand_dims(cv2.imread(fname, -1), 0)
for fname in fnames], 0
)
).from_numpy()
# update count
self._count += self._batch_size
self._count %= self._dataset_size
# return batch
cnt.imgs = loaded_imgs
return cnt
loader = DataLoader(2)
for _ in range(100):
batch = next(loader)
assert batch.imgs.front.shape == (2, 32, 32, 3)
assert batch.imgs.rear.shape == (2, 32, 32, 3)
for batch_slice in batch.unstack(0):
assert batch_slice.imgs.front.shape == (32, 32, 3)
assert batch_slice.imgs.rear.shape == (32, 32, 3)
Network weights
---------------
Finally, the Ivy Containers can also be used for storing network weights.
In fact, as is discussed in the documentation for the Ivy stateful API, this is how the :class:`ivy.Module` class stores all trainable variables in the model.
The following code is possible thanks to the recursive operation of the container, which applies the gradient update to all variable arrays in the container recursively.
.. code-block:: python
class MyModel(ivy.Module):
def __init__(self):
self.linear0 = ivy.Linear(3, 64)
self.linear1 = ivy.Linear(64, 1)
ivy.Module.__init__(self)
def _forward(self, x):
x = ivy.relu(self.linear0(x))
return ivy.sigmoid(self.linear1(x))
model = MyModel()
x_in = ivy.array([1., 2., 3.])
target = ivy.array([0.])
lr = 0.001
def loss_fn(v):
out = model(x_in, v=v)
return ivy.reduce_mean((out - target)**2)[0]
for step in range(100):
loss, grads = ivy.execute_with_gradients(
loss_fn, model.v)
model.v = model.v - lr * grads
print(f'step {step} loss {ivy.to_numpy(loss).item()}')
print(model.v)
{
linear0: {
b: (<class torch.Tensor> shape=[64]),
w: (<class torch.Tensor> shape=[64, 3])
},
linear1: {
b: tensor([-0.0145], grad_fn=<AddBackward0>),
w: (<class torch.Tensor> shape=[1, 64])
}
}
**Round Up**
That should hopefully be enough to get you started with the :class:`ivy.Container` class 😊
Please reach out on `discord <https://discord.gg/sXyFF8tDtm>`_ if you have any questions!
| ivy/docs/overview/design/ivy_as_a_framework/ivy_container.rst/0 | {
"file_path": "ivy/docs/overview/design/ivy_as_a_framework/ivy_container.rst",
"repo_id": "ivy",
"token_count": 9709
} | 6 |
.. _`RWorks API Standards`:
API Standards
=============
.. _`Array API Standard`: https://data-apis.org/array-api/latest/
.. _`discord`: https://discord.gg/sXyFF8tDtm
API standards are standardized application programming interfaces (APIs) which define the function signatures which similar libraries should adhere to for maximal interoperability between those libraries.
Array API Standard
------------------
The `Array API Standard`_ defines a unified application programming interface (API) for Python libraries which perform numerical operations on high dimensional arrays (tensors).
This standard can be considered as “higher level” than the ML frameworks themselves, given that the standard defines the functions without implementing them, whereas the frameworks include implementations for all of the functions which fit into this standard API, with all the lower level considerations also handled within these implementations.
The Array API Standard takes the lowest common denominator approach, whereby each function in the standard represents the minimum behaviors of the function without restricting extensions to the function.
This means that two very different libraries can adhere to the same standard, despite having very different extended behaviors for some of the functions in the standard.
The standard is also not exhaustive.
For example, there are <insert_number> functions defined in the standard, whereas the functions defined in each framework are as follows:
<insert_table>
Therefore, two frameworks which adhere to the standard will still have major differences by virtue of the extra functions they support which are not present in the standard.
| ivy/docs/overview/related_work/api_standards.rst/0 | {
"file_path": "ivy/docs/overview/related_work/api_standards.rst",
"repo_id": "ivy",
"token_count": 336
} | 7 |
__version__ = "0.0.7.2"
| ivy/ivy/_version.py/0 | {
"file_path": "ivy/ivy/_version.py",
"repo_id": "ivy",
"token_count": 14
} | 8 |
# global
import abc
from typing import Optional, Union
# local
import ivy
class _ArrayWithCreationExperimental(abc.ABC):
def eye_like(
self: ivy.Array,
/,
*,
k: int = 0,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.eye_like. This method
simply wraps the function, and so the docstring for ivy.eye_like also
applies to this method with minimal changes.
Parameters
----------
self
input array from which to derive the output array shape.
k
index of the diagonal. A positive value refers to an upper diagonal,
a negative value to a lower diagonal, and 0 to the main diagonal.
Default: ``0``.
dtype
output array data type. If ``dtype`` is ``None``, the output array data type
must be inferred from ``self``. Default: ``None``.
device
device on which to place the created array. If ``device`` is ``None``, the
output array device must be inferred from ``self``. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array having the same shape as ``self`` and filled with ``ones``
in diagonal ``k`` and ``zeros`` elsewhere.
Examples
--------
>>> x = ivy.array([[2, 3, 8],[1, 2, 1]])
>>> y = x.eye_like()
>>> print(y)
ivy.array([[1., 0., 0.],
0., 1., 0.]])
"""
return ivy.eye_like(self._data, k=k, dtype=dtype, device=device, out=out)
def unsorted_segment_min(
self: ivy.Array,
segment_ids: ivy.Array,
num_segments: Union[int, ivy.Array],
) -> ivy.Array:
r"""ivy.Array instance method variant of ivy.unsorted_segment_min. This
method simply wraps the function, and so the docstring for
ivy.unsorted_segment_min also applies to this method with minimal
changes.
Note
----
If the given segment ID `i` is negative, then the corresponding
value is dropped, and will not be included in the result.
Parameters
----------
self
The array from which to gather values.
segment_ids
Must be in the same size with the first dimension of `self`. Has to be
of integer data type. The index-th element of `segment_ids` array is
the segment identifier for the index-th element of `self`.
num_segments
An integer or array representing the total number of distinct segment IDs.
Returns
-------
ret
The output array, representing the result of a segmented min operation.
For each segment, it computes the min value in `self` where `segment_ids`
equals to segment ID.
"""
return ivy.unsorted_segment_min(self._data, segment_ids, num_segments)
def unsorted_segment_sum(
self: ivy.Array,
segment_ids: ivy.Array,
num_segments: Union[int, ivy.Array],
) -> ivy.Array:
r"""ivy.Array instance method variant of ivy.unsorted_segment_sum. This
method simply wraps the function, and so the docstring for
ivy.unsorted_segment_sum also applies to this method with minimal
changes.
Parameters
----------
self
The array from which to gather values.
segment_ids
Must be in the same size with the first dimension of `self`. Has to be
of integer data type. The index-th element of `segment_ids` array is
the segment identifier for the index-th element of `self`.
num_segments
An integer or array representing the total number of distinct segment IDs.
Returns
-------
ret
The output array, representing the result of a segmented sum operation.
For each segment, it computes the sum of values in `self` where
`segment_ids` equals to segment ID.
"""
return ivy.unsorted_segment_sum(self._data, segment_ids, num_segments)
def blackman_window(
self: ivy.Array,
/,
*,
periodic: bool = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.blackman_window. This
method simply wraps the function, and so the docstring for
ivy.blackman_window also applies to this method with minimal changes.
Parameters
----------
self
int.
periodic
If True, returns a window to be used as periodic function.
If False, return a symmetric window.
Default: ``True``.
dtype
output array data type. If ``dtype`` is ``None``, the output array data type
must be inferred from ``self``. Default: ``None``.
device
device on which to place the created array. If ``device`` is ``None``, the
output array device must be inferred from ``self``. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The array containing the window.
Examples
--------
>>> ivy.blackman_window(4, periodic = True)
ivy.array([-1.38777878e-17, 3.40000000e-01, 1.00000000e+00, 3.40000000e-01])
>>> ivy.blackman_window(7, periodic = False)
ivy.array([-1.38777878e-17, 1.30000000e-01, 6.30000000e-01, 1.00000000e+00,
6.30000000e-01, 1.30000000e-01, -1.38777878e-17])
"""
return ivy.blackman_window(self._data, periodic=periodic, dtype=dtype, out=out)
def trilu(
self: ivy.Array,
/,
*,
k: int = 0,
upper: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.trilu. This method simply
wraps the function, and so the docstring for ivy.trilu also applies to
this method with minimal changes.
Parameters
----------
self
input array having shape (..., M, N) and whose innermost two dimensions form
MxN matrices. *,
k
diagonal below or above which to zero elements. If k = 0, the diagonal is
the main diagonal. If k < 0, the diagonal is below the main diagonal. If
k > 0, the diagonal is above the main diagonal. Default: ``0``.
upper
indicates whether upper or lower part of matrix is retained.
Default: ``True``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the upper triangular part(s). The returned array must
have the same shape and data type as ``self``. All elements below the
specified diagonal k must be zeroed. The returned array should be allocated
on the same device as ``self``.
"""
return ivy.trilu(self._data, k=k, upper=upper, out=out)
@staticmethod
def mel_weight_matrix(
num_mel_bins: Union[int, ivy.Array],
dft_length: Union[int, ivy.Array],
sample_rate: Union[int, ivy.Array],
lower_edge_hertz: Optional[Union[float, ivy.Array]] = 0.0,
upper_edge_hertz: Optional[Union[float, ivy.Array]] = 3000.0,
):
"""Generate a MelWeightMatrix that can be used to re-weight a Tensor
containing a linearly sampled frequency spectra (from DFT or STFT) into
num_mel_bins frequency information based on the [lower_edge_hertz,
upper_edge_hertz]
range on the mel scale. This function defines the mel scale
in terms of a frequency in hertz according to the following
formula: mel(f) = 2595 * log10(1 + f/700)
Parameters
----------
num_mel_bins
The number of bands in the mel spectrum.
dft_length
The size of the original DFT obtained from (n_fft / 2 + 1).
sample_rate
Samples per second of the input signal.
lower_edge_hertz
Lower bound on the frequencies to be included in the mel spectrum.
upper_edge_hertz
The desired top edge of the highest frequency band.
Returns
-------
ret
MelWeightMatrix of shape: [frames, num_mel_bins].
Examples
--------
>>> x = ivy.array([[1, 2, 3],
>>> [1, 1, 1],
>>> [5,6,7 ]])
>>> x.mel_weight_matrix(3, 3, 8000)
ivy.array([[0. ,0. , 0.],
[0. ,0. , 0.75694758],
[0. ,0. , 0. ]])
"""
return ivy.mel_weight_matrix(
num_mel_bins,
dft_length,
sample_rate,
lower_edge_hertz,
upper_edge_hertz,
)
def unsorted_segment_mean(
self: ivy.Array,
segment_ids: ivy.Array,
num_segments: Union[int, ivy.Array],
) -> ivy.Array:
"""Compute the mean of values in the array 'self' based on segment
identifiers.
Parameters
----------
self : ivy.Array
The array from which to gather values.
segment_ids : ivy.Array
Must be in the same size with the first dimension of `self`. Has to be
of integer data type. The index-th element of `segment_ids` array is
the segment identifier for the index-th element of `self`.
num_segments : Union[int, ivy.Array]
An integer or array representing the total number of distinct segment IDs.
Returns
-------
ret : ivy.Array
The output array, representing the result of a segmented mean operation.
For each segment, it computes the mean of values in `self` where
`segment_ids` equals to segment ID.
Examples
--------
>>> data = ivy.array([1.0, 2.0, 3.0, 4.0])
>>> segment_ids = ivy.array([0, 0, 0, 0])
>>> num_segments = 1
>>> result = ivy.unsorted_segment_mean(data, segment_ids, num_segments)
>>> result
ivy.array([2.5])
>>> data = ivy.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
>>> segment_ids = ivy.array([0, 0, 1, 1, 2, 2])
>>> num_segments = 3
>>> result = ivy.unsorted_segment_mean(data, segment_ids, num_segments)
>>> result
ivy.array([[1.5, 3.5, 5.5],[1.5, 3.5, 5.5],[1.5, 3.5, 5.5]])
"""
return ivy.unsorted_segment_mean(self._data, segment_ids, num_segments)
def polyval(
coeffs=ivy.Array,
x=Union[ivy.Array, ivy.NativeArray, int, float],
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
) -> ivy.Array:
"""ivy.Array instance method of polyval. This method simply wraps the
function, and so the docstring for ivy.polyval also applies to this method
with minimal changes.
Evaluate and return a polynomial at specific given values.
Parameters
----------
coeffs
Input array containing polynomial coefficients (including zero)
from highest degree to constant term.
x
The value of the indeterminate variable at which to evaluate the polynomial.
Returns
-------
ret
Simplified result of substituting x in the coefficients - final value of
polynomial.
Examples
--------
>>> x = ivy.array([[0, 0, 0])
>>> x.polyval([3, 0, 1], 5)
ivy.array(76)
"""
return ivy.polyval(
coeffs,
x,
)
| ivy/ivy/data_classes/array/experimental/creation.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/creation.py",
"repo_id": "ivy",
"token_count": 5458
} | 9 |
# global
import abc
from typing import Optional, Union, Tuple, Sequence
# local
import ivy
class _ArrayWithStatisticalExperimental(abc.ABC):
def histogram(
self: ivy.Array,
/,
*,
bins: Optional[Union[int, ivy.Array, ivy.NativeArray, str]] = None,
axis: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
extend_lower_interval: Optional[bool] = False,
extend_upper_interval: Optional[bool] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
range: Optional[Tuple[float]] = None,
weights: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
density: Optional[bool] = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.histogram. This method
simply wraps the function, and so the docstring for ivy.histogram also
applies to this method with minimal changes.
Parameters
----------
self
input array.
bins
if ``bins`` is an int, it defines the number of equal-width bins in the
given range.
if ``bins`` is an array, it defines a monotonically increasing array of bin
edges, including the rightmost edge, allowing for non-uniform bin widths.
axis
dimension along which maximum values must be computed. By default, the
maximum value must be computed over the entire array. Default: ``None``.
extend_lower_interval
if True, extend the lowest interval I0 to (-inf, c1].
extend_upper_interval
ff True, extend the upper interval I_{K-1} to [c_{K-1}, +inf).
dtype
the output type.
range
the lower and upper range of the bins. The first element of the range must
be less than or equal to the second.
weights
each value in ``a`` only contributes its associated weight towards the bin
count (instead of 1). Must be of the same shape as a.
density
if True, the result is the value of the probability density function at the
bin, normalized such that the integral over the range of bins is 1.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
a tuple containing the values of the histogram and the bin edges.
Both the description and the type hints above assumes an array input for
simplicity, but this function is *nestable*, and therefore also accepts
:class:`ivy.Container` instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0, 1, 2])
>>> y = ivy.array([0., 0.5, 1., 1.5, 2.])
>>> z = ivy.histogram(x, bins=y)
>>> print(z)
ivy.array([1., 0., 1., 1.])
"""
return ivy.histogram(
self._data,
bins=bins,
axis=axis,
extend_lower_interval=extend_lower_interval,
extend_upper_interval=extend_upper_interval,
dtype=dtype,
range=range,
weights=weights,
density=density,
out=out,
)
def median(
self: ivy.Array,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.median. This method simply
wraps the function, and so the docstring for ivy.median also applies to
this method with minimal changes.
Parameters
----------
self
Input array.
axis
Axis or axes along which the medians are computed. The default is to compute
the median along a flattened version of the array.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one.
out
optional output array, for writing the result to.
Returns
-------
ret
The median of the array elements.
Examples
--------
>>> a = ivy.array([[10, 7, 4], [3, 2, 1]])
>>> a.median()
3.5
>>> a.median(axis=0)
ivy.array([6.5, 4.5, 2.5])
"""
return ivy.median(self._data, axis=axis, keepdims=keepdims, out=out)
def nanmean(
self: ivy.Array,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.nanmean. This method simply
wraps the function, and so the docstring for ivy.nanmean also applies
to this method with minimal changes.
Parameters
----------
self
Input array.
axis
Axis or axes along which the means are computed.
The default is to compute the mean of the flattened array.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original a. If the value is anything but the default,
then keepdims will be passed through to the mean or sum methods of
sub-classes of ndarray. If the sub-classes methods does not implement
keepdims any exceptions will be raised.
dtype
The desired data type of returned tensor. Default is None.
out
optional output array, for writing the result to.
Returns
-------
ret
The nanmean of the array elements.
Examples
--------
>>> a = ivy.array([[1, ivy.nan], [3, 4]])
>>> a.nanmean()
2.6666666666666665
>>> a.nanmean(axis=0)
ivy.array([2., 4.])
"""
return ivy.nanmean(
self._data, axis=axis, keepdims=keepdims, dtype=dtype, out=out
)
def nanmin(
self: ivy.Array,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: Optional[bool] = False,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[ivy.Array] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.nanmin. This method simply
wraps the function, and so the docstring for ivy.min also applies to
this method with minimal changes.
Parameters
----------
self
Input array.
axis
Axis or axes along which the minimum is computed.
The default is to compute the minimum of the flattened array.
out
optional output array, for writing the result to.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original a.
initial
The maximum value of an output element
where
Elements to compare for the minimum
Returns
-------
ret
Return minimum of an array or minimum along an axis, ignoring any NaNs.
Examples
--------
>>> a = ivy.array([[1, 2], [3, ivy.nan]])
>>> a.nanmin(a)
1.0
>>> a.nanmin(a, axis=0)
ivy.array([1., 2.])
"""
return ivy.nanmin(
self._data,
axis=axis,
keepdims=keepdims,
out=out,
initial=initial,
where=where,
)
def nanprod(
self: ivy.Array,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
keepdims: Optional[bool] = False,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.nanprod. This method simply
wraps the function, and so the docstring for ivy.prod also applies to
this method with minimal changes.
Parameters
----------
self
Input array.
axis
Axis or axes along which the product is computed.
The default is to compute the product of the flattened array.
dtype
The desired data type of returned array. Default is None.
out
optional output array, for writing the result to.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original a.
initial
The starting value for this product.
where
Elements to include in the product
Returns
-------
ret
The product of array elements over a given axis treating
Not a Numbers (NaNs) as ones
Examples
--------
>>> a = ivy.array([[1, 2], [3, ivy.nan]])
>>> a.nanprod(a)
6.0
>>> a.nanprod(a, axis=0)
ivy.array([3., 2.])
"""
return ivy.nanprod(
self._data,
axis=axis,
keepdims=keepdims,
dtype=dtype,
out=out,
initial=initial,
where=where,
)
def quantile(
self: ivy.Array,
q: Union[ivy.Array, float],
/,
*,
axis: Optional[Union[Sequence[int], int]] = None,
keepdims: bool = False,
interpolation: str = "linear",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.quantile. This method
simply wraps the function, and so the docstring for ivy.quantile also
applies to this method with minimal changes.
Parameters
----------
self
Input array.
q
Quantile or sequence of quantiles to compute, which must be
between 0 and 1 inclusive.
axis
Axis or axes along which the quantiles are computed. The default
is to compute the quantile(s) along a flattened version of the array.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original array a.
interpolation
{'nearest', 'linear', 'lower', 'higher', 'midpoint'}. Default value:
'linear'.
This specifies the interpolation method to use when the desired quantile
lies between two data points i < j:
- linear: i + (j - i) * fraction, where fraction is the fractional part of
the index surrounded by i and j.
- lower: i.
- higher: j.
- nearest: i or j, whichever is nearest.
- midpoint: (i + j) / 2. linear and midpoint interpolation do not work with
integer dtypes.
out
optional output array, for writing the result to.
Returns
-------
ret
A (rank(q) + N - len(axis)) dimensional array of same dtype as a, or,
if axis is None, a rank(q) array. The first rank(q) dimensions index
quantiles for different values of q.
Examples
--------
>>> a = ivy.array([[10., 7., 4.], [3., 2., 1.]])
>>> q = ivy.array(0.5)
>>> a.quantile(q)
ivy.array(3.5)
>>> a = ivy.array([[10., 7., 4.], [3., 2., 1.]])
>>> q = 0.5
>>> a.quantile(q)
ivy.array(3.5)
>>> a.quantile(q, axis=0)
ivy.array([6.5, 4.5, 2.5])
>>> a.quantile(q, axis=1)
ivy.array([7., 2.])
>>> a.quantile(q, axis=1, keepdims=True)
ivy.array([[7.],[2.]])
>>> a = ivy.array([1., 2., 3., 4.])
>>> q = ivy.array([0.3, 0.7])
>>> a.quantile(q, interpolation='lower')
ivy.array([1., 3.])
"""
return ivy.quantile(
self._data,
q,
axis=axis,
keepdims=keepdims,
interpolation=interpolation,
out=out,
)
def corrcoef(
self: ivy.Array,
/,
*,
y: Optional[ivy.Array] = None,
rowvar: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.corrcoef. This method
simply wraps the function, and so the docstring for ivy.corrcoef also
applies to this method with minimal changes.
Parameters
----------
self
Input array.
y
An additional input array.
`y` has the same shape as `x`.
rowvar
If rowvar is True (default), then each row represents a variable, with
observations in the columns. Otherwise, the relationship is transposed:
each column represents a variable, while the rows contain observations.
Returns
-------
ret
The corrcoef of the array elements.
Examples
--------
>>> a = ivy.array([[0., 1., 2.], [2., 1., 0.]])
>>> a.corrcoef()
ivy.array([[ 1., -1.],
[-1., 1.]])
>>> a.corrcoef(rowvar=False)
ivy.array([[ 1., nan, -1.],
[nan, nan, nan],
[-1., nan, 1.]])
"""
return ivy.corrcoef(self._data, y=y, rowvar=rowvar, out=out)
def nanmedian(
self: ivy.Array,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: bool = False,
overwrite_input: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.nanmedian. This method
simply wraps the function, and so the docstring for ivy.nanmedian also
applies to this method with minimal changes.
Parameters
----------
self
Input array.
axis
The axis or axes along which the means are computed.
The default is to compute the mean of the flattened array.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original input array. If the value is anything
but the default, then keepdims will be passed through to the mean or
sum methods of sub-classes of ndarray. If the sub-classes methods does
not implement keepdims any exceptions will be raised.
overwrite_input
If True, then allow use of memory of input array a for calculations.
The input array will be modified by the call to median. This will
save memory when you do not need to
preserve the contents of the input array.
Treat the input as undefined, but it will
probably be fully or partially sorted.
Default is False. If overwrite_input
is True and input array is not already an ndarray,
an error will be raised.
out
optional output array, for writing the result to.
Returns
-------
ret
A new array holding the result. If the input contains integers
Examples
--------
With :class:`ivy.array` input and default backend set as `numpy`:
>>> a = ivy.array([[10.0, ivy.nan, 4], [3, 2, 1]])
>>> a.nanmedian()
ivy.array(3.)
>>> a.nanmedian(axis=0)
ivy.array([6.5, 2. , 2.5])
"""
return ivy.nanmedian(
self._data,
axis=axis,
keepdims=keepdims,
overwrite_input=overwrite_input,
out=out,
)
def bincount(
self,
/,
*,
weights: Optional[ivy.Array] = None,
minlength: int = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.bincount. This method
simply wraps the function, and so the docstring for ivy.bincount also
applies to this method with minimal changes.
Parameters
----------
self
Input array. The array is flattened if it is not already 1-dimensional.
weights
Optional weights, array of the same shape as self.
minlength
A minimum number of bins for the output array.
out
An array of the same shape as the returned array, or of the shape
(minlength,) if minlength is specified.
Returns
-------
ret
The result of binning the input array.
Examples
--------
>>> a = ivy.array([0, 1, 1, 3, 2, 1, 7])
>>> a.bincount()
ivy.array([1, 3, 1, 1, 0, 0, 0, 1])
>>> a.bincount(minlength=10)
ivy.array([1, 3, 1, 1, 0, 0, 0, 1, 0, 0])
>>> a.bincount(weights=ivy.array([0.3, 0.5, 0.2, 0.7, 1., 0.6, 1.]))
ivy.array([0.3, 1.3, 1. , 0.7, 0. , 0. , 0. , 1. ])
"""
return ivy.bincount(
self._data,
weights=weights,
minlength=minlength,
out=out,
)
def igamma(
self: ivy.Array,
/,
*,
x: Union[ivy.Array, ivy.NativeArray],
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.igamma. This method simply
wraps the function, and so the docstring for ivy.igamma also applies to
this method with minimal changes.
Parameters
----------
self
Input array.
x
An additional input array.
`x` has the same type as `a`.
out
optional output array, for writing the result to.
Returns
-------
ret
The lower incomplete gamma function of the array elements.
Examples
--------
>>> a = ivy.array([2.5])
>>> x = ivy.array([1.7, 1.2])
>>> a.igamma(x)
ivy.array([0.3614, 0.2085])
"""
return ivy.igamma(
self._data,
x=x,
out=out,
)
def cov(
self: ivy.Array,
x2: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
/,
*,
rowVar: bool = True,
bias: bool = False,
ddof: Optional[int] = None,
fweights: Optional[ivy.Array] = None,
aweights: Optional[ivy.Array] = None,
dtype: Optional[type] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.cov. This method simply
wraps the function, and so the docstring for ivy.cov also applies to
this method with minimal changes.
Parameters
----------
self
a 1D or 2D input array, with a numeric data type.
x2
optional second 1D or 2D input array, with a numeric data type.
Must have the same shape as ``self``.
rowVar
optional variable where each row of input is interpreted as a variable
(default = True). If set to False, each column is instead interpreted as a
variable.
bias
optional variable for normalizing input (default = False) by (N - 1) where
N is the number of given observations. If set to True, then normalization
is instead by N. Can be overridden by keyword ``ddof``.
ddof
optional variable to override ``bias`` (default = None). ddof=1 will return
the unbiased estimate, even with fweights and aweights given. ddof=0 will
return the simple average.
fweights
optional 1D array of integer frequency weights; the number of times each
observation vector should be repeated.
aweights
optional 1D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ddof=0 is specified, the array
of weights can be used to assign probabilities to observation vectors.
dtype
optional variable to set data-type of the result. By default, data-type
will have at least ``float64`` precision.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the covariance matrix of an input matrix, or the
covariance matrix of two variables. The returned array must have a
floating-point data type determined by Type Promotion Rules and must be
a square matrix of shape (N, N), where N is the number of variables in the
input(s).
Examples
--------
>>> x = ivy.array([[1, 2, 3],
... [4, 5, 6]])
>>> y = x[0].cov(x[1])
>>> print(y)
ivy.array([[1., 1.],
[1., 1.]])
>>> x = ivy.array([1,2,3])
>>> y = ivy.array([4,5,6])
>>> z = x.cov(y)
>>> print(z)
ivy.array([[1., 1.],
[1., 1.]])
"""
return ivy.cov(
self._data,
x2,
rowVar=rowVar,
bias=bias,
ddof=ddof,
fweights=fweights,
aweights=aweights,
dtype=dtype,
)
def cummax(
self: ivy.Array,
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.cummax. This method simply
wraps the function, and so the docstring for ivy.cummax also applies to
this method with minimal changes.
Parameters
----------
self
input array
axis
int, axis along which to take the cumulative maximum. Default is ``0``.
reverse
Whether to perform the cummax from last to first element in the selected
axis. Default is ``False`` (from first to last element)
dtype
data type of the returned array. If None, if the default data type
corresponding to the data type “kind” (integer or floating-point) of x
has a smaller range of values than the data type of x (e.g., x has data
type int64 and the default data type is int32, or x has data type uint64
and the default data type is int64), the returned array must have the
same data type as x. if x has a floating-point data type, the returned array
must have the default floating-point data type. if x has a signed integer
data type (e.g., int16), the returned array must have the default integer
data type. if x has an unsigned integer data type (e.g., uint16), the
returned array must have an unsigned integer data type having the same
number of bits as the default integer data type (e.g., if the default
integer data type is int32, the returned array must have a uint32 data
type). If the data type (either specified or resolved) differs from the
data type of x, the input array should be cast to the specified data type
before computing the product. Default: ``None``.
out
optional output array, for writing the result to.
Returns
-------
ret
Input array with cumulatively multiplied elements along the specified axis.
--------
>>> x = ivy.array([1, 2, 5, 4, 3])
>>> y = x.cummax()
>>> print(y)
(ivy.array([1, 2, 5, 5, 5]), ivy.array([0, 1, 2, 2, 2]))
>>> x = ivy.array([[2, 3], [5, 7], [11, 13]])
>>> y = ivy.zeros((3, 2), dtype="int32")
>>> x.cummax(axis=1, reverse=True, out=y)
>>> print(y)
ivy.array([[0, 0],
[0, 0],
[0, 0]])
"""
return ivy.cummax(
self._data,
axis=axis,
exclusive=exclusive,
reverse=reverse,
dtype=dtype,
out=out,
)
def cummin(
self: ivy.Array,
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.cummin. This method simply
wraps the function, and so the docstring for ivy.cummin also applies to
this method with minimal changes.
Parameters
----------
self
input array
axis
int, axis along which to take the cumulative minimum. Default is ``0``.
reverse
Whether to perform the cummin from last to first element in the selected
axis. Default is ``False`` (from first to last element)
dtype
data type of the returned array. If None, if the default data type
corresponding to the data type “kind” (integer or floating-point) of x
has a smaller range of values than the data type of x (e.g., x has data
type int64 and the default data type is int32, or x has data type uint64
and the default data type is int64), the returned array must have the
same data type as x. if x has a floating-point data type, the returned array
must have the default floating-point data type. if x has a signed integer
data type (e.g., int16), the returned array must have the default integer
data type. if x has an unsigned integer data type (e.g., uint16), the
returned array must have an unsigned integer data type having the same
number of bits as the default integer data type (e.g., if the default
integer data type is int32, the returned array must have a uint32 data
type). If the data type (either specified or resolved) differs from the
data type of x, the input array should be cast to the specified data type
before computing the product. Default: ``None``.
out
optional output array, for writing the result to.
Returns
-------
ret
Input array with cumulatively multiplied elements along the specified axis.
--------
>>> x = ivy.array([1, 2, 3, 4, 5])
>>> y = x.cummin()
>>> print(y)
ivy.array([1, 1, 1, 1, 1])
>>> x = ivy.array([[2, 3], [5, 7], [11, 13]])
>>> y = ivy.zeros((3, 2), dtype="int32")
>>> x.cummin(axis=1, reverse=True, out=y)
>>> print(y)
ivy.array([[ 2, 3],
[ 5, 7],
[11, 13]])
"""
return ivy.cummin(
self._data,
axis=axis,
exclusive=exclusive,
reverse=reverse,
dtype=dtype,
out=out,
)
| ivy/ivy/data_classes/array/experimental/statistical.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/statistical.py",
"repo_id": "ivy",
"token_count": 12911
} | 10 |
# local
import ivy
# global
from typing import Callable, Type, List, Iterable
from types import ModuleType
TO_IGNORE = ["shape"]
def _wrap_function(function_name: str) -> Callable:
"""Wrap the function called `function_name`.
Parameters
----------
function_name
the name of the function e.g. "abs", "mean" etc.
Returns
-------
new_function
the wrapped function.
Examples
--------
>>> ivy.set_backend("torch")
>>> from ivy.array.wrapping import _wrap_function
>>> absolute = _wrap_function("abs")
>>> x = ivy.array([-1])
>>> print(absolute(x))
ivy.array([1])
"""
def new_function(self, *args, **kwargs):
"""Add the data of the current array from which the instance function
is invoked as the first arg parameter or kwarg parameter.
Return the new function with the name function_name and the new
args variable or kwargs as the new inputs.
"""
function = ivy.__dict__[function_name]
# gives us the position and name of the array argument
data_idx = function.array_spec[0]
if len(args) >= data_idx[0][0]:
args = ivy.copy_nest(args, to_mutable=True)
data_idx = [data_idx[0][0]] + [
0 if idx is int else idx for idx in data_idx[1:]
]
ivy.insert_into_nest_at_index(args, data_idx, self._data)
else:
kwargs = ivy.copy_nest(kwargs, to_mutable=True)
data_idx = [data_idx[0][1]] + [
0 if idx is int else idx for idx in data_idx[1:]
]
ivy.insert_into_nest_at_index(kwargs, data_idx, self._data)
return function(*args, **kwargs)
return new_function
def add_ivy_array_instance_methods(
cls: Type[ivy.Array], modules: List[ModuleType], to_ignore: Iterable = ()
):
"""Loop over all ivy modules such as activations, general, etc. and add the
module functions to ivy arrays as instance methods using _wrap_function.
Parameters
----------
cls
the class we want to add the instance methods to.
modules
the modules to loop over: activations, general etc.
to_ignore
any items we don't want to add an instance method for.
Examples
--------
As shown, `add_ivy_array_instance_methods` adds all the appropriate functions from
the activations module as instance methods to our toy `ArrayExample` class:
>>> from ivy.functional.ivy import activations
>>> class ArrayExample:
... pass
>>> ivy.add_ivy_array_instance_methods(ArrayExample, [activations])
>>> print(hasattr(ArrayExample, "relu"), hasattr(ArrayExample, "softmax"))
True True
"""
to_ignore = TO_IGNORE + list(to_ignore)
for module in modules:
for key, value in module.__dict__.items():
# we skip the cases where the function is protected, the instance
# method has already been added manually and a few other cases
if (
key.startswith("_")
or key[0].isupper()
or not callable(value)
or key in cls.__dict__
or hasattr(cls, key)
or key in to_ignore
or key not in ivy.__dict__
):
continue
try:
setattr(cls, key, _wrap_function(key))
except AttributeError:
pass
| ivy/ivy/data_classes/array/wrapping.py/0 | {
"file_path": "ivy/ivy/data_classes/array/wrapping.py",
"repo_id": "ivy",
"token_count": 1492
} | 11 |
# global
from typing import Optional, Union, List, Dict, Tuple, Sequence
from numbers import Number
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithElementWiseExperimental(ContainerBase):
@staticmethod
def static_amax(
x: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.amax. This method simply
wraps the function, and so the docstring for ivy.amax also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued data type.
axis
axis or axes along which maximum values must be computed.
By default, the maximum value must be computed over the
entire array. If a tuple of integers, maximum values must
be computed over multiple axes. Default: ``None``.
keepdims
optional boolean, if ``True``, the reduced axes
(dimensions) must be included in the result as singleton
dimensions, and, accordingly, the result must be
compatible with the input array
(see `broadcasting<https://data-apis.org/array-api/
latest/API_specification/
broadcasting.html#broadcasting>`_).
Otherwise, if ``False``, the reduced axes (dimensions)
must not be included in the result.
Default: ``False``.
key_chains
The key-chains to apply or not apply the method to.
Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was
not applied. Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
container, if the maximum value was computed over the entire array,
a zero-dimensional array containing the maximum value;
otherwise, a non-zero-dimensional array containing the
maximum values. The returned array must have the same data type
as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container.static_amax(x)
>>> print(y)
{
a: ivy.array(3),
b: ivy.array(4)
}
>>> x = ivy.Container(a=ivy.array([[1, 2, 3], [-1, 0, 2]]),
... b=ivy.array([[2, 3, 4], [0, 1, 2]]))
>>> y = ivy.Container.static_amax(x, axis=1)
>>> print(y)
{
a:ivy.array([3, 2]),
b:ivy.array([4, 2])
}
"""
return ContainerBase.cont_multi_map_in_function(
"amax",
x,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def amax(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.amax. This method
simply wraps the function, and so the docstring for ivy.amax also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued data type.
axis
axis or axes along which maximum values must be computed.
By default, the maximum value must be computed over the
entire array. If a tuple of integers, maximum values must
be computed over multiple axes. Default: ``None``.
keepdims
optional boolean, if ``True``, the reduced axes
(dimensions) must be included in the result as singleton
dimensions, and, accordingly, the result must be
compatible with the input array
(see `broadcasting<https://data-apis.org/array-api/
latest/API_specification/
broadcasting.html#broadcasting>`_).
Otherwise, if ``False``, the reduced axes (dimensions)
must not be included in the result.
Default: ``False``.
key_chains
The key-chains to apply or not apply the method to.
Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was
not applied. Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
container, if the maximum value was computed over the entire array,
a zero-dimensional array containing the maximum value;
otherwise, a non-zero-dimensional array containing the
maximum values. The returned array must have the same data type
as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = x.amax()
>>> print(y)
{
a: ivy.array(3),
b: ivy.array(4)
}
>>> x = ivy.Container(a=ivy.array([[1, 2, 3], [-1, 0, 2]]),
... b=ivy.array([[2, 3, 4], [0, 1, 2]]))
>>> y = x.amax(axis=1)
>>> print(y)
{
a:ivy.array([3, 2]),
b:ivy.array([4, 2])
}
"""
return self.static_amax(
self,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_amin(
x: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.amin. This method simply
wraps the function, and so the docstring for ivy.amin also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued data type.
axis
axis or axes along which minimum values must be computed.
By default, the minimum value must be computed over the
entire array. If a tuple of integers, minimum values must
be computed over multiple axes. Default: ``None``.
keepdims
optional boolean, if ``True``, the reduced axes
(dimensions) must be included in the result as
singleton dimensions, and, accordingly, the
result must be compatible with the input array
(see `broadcasting<https://data-apis.org/array-api/latest/
API_specification/broadcasting.html#broadcasting>`_). Otherwise,
if ``False``, the reduced axes (dimensions)
must not be included in the result.
Default: ``False``.
key_chains
The key-chains to apply or not apply the method to.
Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was
not applied. Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
container, if the minimum value was computed over the entire array,
a zero-dimensional array containing the minimum value;
otherwise, a non-zero-dimensional array containing the
minimum values. The returned array must have the same data type
as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container.static_amin(x)
>>> print(y)
{
a: ivy.array(1),
b: ivy.array(2)
}
>>> x = ivy.Container(a=ivy.array([[1, 2, 3], [-1, 0, 2]]),
... b=ivy.array([[2, 3, 4], [0, 1, 2]]))
>>> y = ivy.Container.static_amin(x, axis=1)
>>> print(y)
{
a:ivy.array([1, -1]),
b:ivy.array([2, 0])
}
"""
return ContainerBase.cont_multi_map_in_function(
"amin",
x,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def amin(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.amin. This method
simply wraps the function, and so the docstring for ivy.amin also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued data type.
axis
axis or axes along which minimum values must be computed.
By default, the minimum value must be computed over the
entire array. If a tuple of integers, minimum values must
be computed over multiple axes. Default: ``None``.
keepdims
optional boolean, if ``True``, the reduced axes
(dimensions) must be included in the result as
singleton dimensions, and, accordingly, the
result must be compatible with the input array
(see `broadcasting<https://data-apis.org/array-api/latest/
API_specification/broadcasting.html#broadcasting>`_). Otherwise,
if ``False``, the reduced axes (dimensions)
must not be included in the result.
Default: ``False``.
key_chains
The key-chains to apply or not apply the method to.
Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was
not applied. Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
container, if the minimum value was computed over the entire array,
a zero-dimensional array containing the minimum value;
otherwise, a non-zero-dimensional array containing the
minimum values. The returned array must have the same data type
as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = x.amin()
>>> print(y)
{
a: ivy.array(1),
b: ivy.array(2)
}
>>> x = ivy.Container(a=ivy.array([[1, 2, 3], [-1, 0, 2]]),
... b=ivy.array([[2, 3, 4], [0, 1, 2]]))
>>> y = x.amin(axis=1)
>>> print(y)
{
a:ivy.array([1, -1]),
b:ivy.array([2, 0])
}
"""
return self.static_amin(
self,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_sinc(
x: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.sinc. This method simply
wraps the function, and so the docstring for ivy.sinc also applies to
this method with minimal changes.
Parameters
----------
x
input container whose elements are each expressed in radians.
Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the sinc of each element in ``x``. The returned
container must have a floating-point data type determined by
:ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0.5, 1.5, 2.5]),
... b=ivy.array([3.5, 4.5, 5.5]))
>>> y = ivy.Container.static_sinc(x)
>>> print(y)
{
a: ivy.array([0.636, -0.212, 0.127]),
b: ivy.array([-0.090, 0.070, -0.057])
}
"""
return ContainerBase.cont_multi_map_in_function(
"sinc",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def sinc(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.sinc. This method
simply wraps the function, and so the docstring for ivy.sinc also
applies to this method with minimal changes.
Parameters
----------
self
input container whose elements are each expressed in radians.
Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the sinc of each element in ``self``.
The returned container must have a floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0.5, 1.5, 2.5]),
... b=ivy.array([3.5, 4.5, 5.5]))
>>> y = x.sinc()
>>> print(y)
{
a: ivy.array([0.637,-0.212,0.127]),
b: ivy.array([-0.0909,0.0707,-0.0579])
}
"""
return self.static_sinc(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_fmod(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.fmod. This method simply
wraps the function, and so the docstring for ivy.fmod also applies to
this method with minimal changes.
Parameters
----------
x1
container with the first input arrays.
x2
container with the second input arrays
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise remainder of divisions.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([2, 3, 4]),\
b=ivy.array([ivy.nan, 0, ivy.nan]))
>>> x2 = ivy.Container(a=ivy.array([1, 5, 2]),\
b=ivy.array([0, ivy.nan, ivy.nan]))
>>> ivy.Container.static_fmod(x1, x2)
{
a: ivy.array([ 0, 3, 0])
b: ivy.array([ nan, nan, nan])
}
"""
return ContainerBase.cont_multi_map_in_function(
"fmod",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def fmod(
self: ivy.Container,
x2: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.fmod. This method
simply wraps the function, and so the docstring for ivy.fmod also
applies to this method with minimal changes.
Parameters
----------
self
container with the first input arrays.
x2
container with the second input arrays
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise remainder of divisions.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([2, 3, 4]),\
b=ivy.array([ivy.nan, 0, ivy.nan]))
>>> x2 = ivy.Container(a=ivy.array([1, 5, 2]),\
b=ivy.array([0, ivy.nan, ivy.nan]))
>>> x1.fmod(x2)
{
a: ivy.array([ 0, 3, 0])
b: ivy.array([ nan, nan, nan])
}
"""
return self.static_fmod(self, x2, out=out)
@staticmethod
def static_fmax(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.fmax. This method simply
wraps the function, and so the docstring for ivy.fmax also applies to
this method with minimal changes.
Parameters
----------
x1
container with the first input arrays.
x2
container with the second input arrays
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise maximums.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([2, 3, 4]),\
b=ivy.array([ivy.nan, 0, ivy.nan]))
>>> x2 = ivy.Container(a=ivy.array([1, 5, 2]),\
b=ivy.array([0, ivy.nan, ivy.nan]))
>>> ivy.Container.static_fmax(x1, x2)
{
a: ivy.array([ 2., 5., 4.])
b: ivy.array([ 0, 0, nan])
}
"""
return ContainerBase.cont_multi_map_in_function(
"fmax",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def fmax(
self: ivy.Container,
x2: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.fmax. This method
simply wraps the function, and so the docstring for ivy.fmax also
applies to this method with minimal changes.
Parameters
----------
self
container with the first input arrays.
x2
container with the second input arrays
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise maximums.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([2, 3, 4]),\
b=ivy.array([ivy.nan, 0, ivy.nan]))
>>> x2 = ivy.Container(a=ivy.array([1, 5, 2]),\
b=ivy.array([0, ivy.nan, ivy.nan]))
>>> x1.fmax(x2)
{
a: ivy.array([ 2., 5., 4.])
b: ivy.array([ 0, 0, nan])
}
"""
return self.static_fmax(self, x2, out=out)
@staticmethod
def static_float_power(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container, float, list, tuple],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container, float, list, tuple],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.float_power. This method
simply wraps the function, and so the docstring for ivy.float_power
also applies to this method with minimal changes.
Parameters
----------
x1
container with the base input arrays.
x2
container with the exponent input arrays
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with base arrays raised to the powers
of exponents arrays, element-wise .
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([1, 2, 3]),\
b=ivy.array([2, 10]))
>>> x2 = ivy.Container(a=ivy.array([1, 3, 1]), b=0)
>>> ivy.Container.static_float_power(x1, x2)
{
a: ivy.array([1, 8, 3])
b: ivy.array([1, 1])
}
"""
return ContainerBase.cont_multi_map_in_function(
"float_power",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def float_power(
self: ivy.Container,
x2: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.float_power. This
method simply wraps the function, and so the docstring for
ivy.float_power also applies to this method with minimal changes.
Parameters
----------
self
container with the base input arrays.
x2
container with the exponent input arrays
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with base arrays raised to the powers
of exponents arrays, element-wise .
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([1, 2, 3]),\
b=ivy.array([2, 10]))
>>> x2 = ivy.Container(a=ivy.array([1, 3, 1]), b=0)
>>> x1.float_power(x2)
{
a: ivy.array([1, 8, 3])
b: ivy.array([1, 1])
}
"""
return self.static_float_power(self, x2, out=out)
@staticmethod
def static_copysign(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container, Number],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container, Number],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.copysign. This method
simply wraps the function, and so the docstring for ivy.copysign also
applies to this method with minimal changes.
Parameters
----------
x1
Container, Array, or scalar to change the sign of
x2
Container, Array, or scalar from which the new signs are applied
Unsigned zeroes are considered positive.
out
optional output Container, for writing the result to.
Returns
-------
ret
x1 with the signs of x2.
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([0,1,2]), b=ivy.array(-1))
>>> x2 = ivy.Container(a=-1, b=ivy.array(10))
>>> ivy.Container.static_copysign(x1, x2)
{
a: ivy.array([-0., -1., -2.]),
b: ivy.array(1.)
}
>>> ivy.Container.static_copysign(23, x1)
{
a: ivy.array([23., 23., 23.]),
b: ivy.array(-23.)
}
"""
return ContainerBase.cont_multi_map_in_function(
"copysign",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def copysign(
self: ivy.Container,
x2: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.copysign. This method
simply wraps the function, and so the docstring for ivy.copysign also
applies to this method with minimal changes.
Parameters
----------
self
Container to change the sign of
x2
Container from which the new signs are applied
Unsigned zeroes are considered positive.
out
optional output Container, for writing the result to.
Returns
-------
ret
x1 with the signs of x2.
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([0,1,2]), b=ivy.array(-1))
>>> x2 = ivy.Container(a=-1, b=ivy.array(10))
>>> x1.copysign(x2)
{
a: ivy.array([-0., -1., -2.]),
b: ivy.array(1.)
}
>>> x1.copysign(-1)
{
a: ivy.array([-0., -1., -2.]),
b: ivy.array(-1.)
}
"""
return self.static_copysign(self, x2, out=out)
@staticmethod
def static_count_nonzero(
a: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axis: Optional[Union[int, Tuple[int, ...], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.count_nonzero. This
method simply wraps the function, and so the docstring for
ivy.count_nonzero also applies to this method with minimal changes.
Parameters
----------
a
container with the base input arrays.
axis
optional axis or tuple of axes along which to count non-zeros. Default is
None, meaning that non-zeros will be counted along a flattened
version of the input array.
keepdims
optional, if this is set to True, the axes that are counted are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
dtype
optional output dtype. Default is of type integer.
key_chains
The key-chains to apply or not apply the method to. Default is None.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is True.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is False.
map_sequences
Whether to also map method to sequences (lists, tuples). Default is False.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including number of non-zero values in the array along a
given axis. Otherwise, container with the total number of non-zero
values in the array is returned.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[0, 1, 2, 3],[4, 5, 6, 7]]),\
b=ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]]))
>>> ivy.Container.static_count_nonzero(x)
{
a: ivy.array(7),
b: ivy.array(7)
}
>>> x = ivy.Container(a=ivy.array([[0, 1, 2, 3],[4, 5, 6, 7]]),\
b=ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]]))
>>> ivy.Container.static_count_nonzero(x, axis=0)
{
a: ivy.array([1, 2, 2, 2]),
b: ivy.array([[1, 2],
[2, 2]])
}
>>> x = ivy.Container(a=ivy.array([[0, 1, 2, 3],[4, 5, 6, 7]]),\
b=ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]]))
>>> ivy.Container.static_count_nonzero(x, axis=(0,1), keepdims=True)
{
a: ivy.array([[7]]),
b: ivy.array([[[3, 4]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"count_nonzero",
a,
axis=axis,
keepdims=keepdims,
dtype=dtype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def count_nonzero(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Tuple[int, ...], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.count_nonzero. This
method simply wraps the function, and so the docstring for
ivy.count_nonzero also applies to this method with minimal changes.
Parameters
----------
self
container with the base input arrays.
axis
optional axis or tuple of axes along which to count non-zeros. Default is
None, meaning that non-zeros will be counted along a flattened
version of the input array.
keepdims
optional, if this is set to True, the axes that are counted are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
dtype
optional output dtype. Default is of type integer.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``
out
optional output container, for writing the result to.
Returns
-------
ret
Container including number of non-zero values in the array along a
given axis. Otherwise, container with the total number of non-zero
values in the array is returned.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[0, 1, 2, 3],[4, 5, 6, 7]]),\
b=ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]]))
>>> x.count_nonzero()
{
a: ivy.array(7),
b: ivy.array(7)
}
>>> x = ivy.Container(a=ivy.array([[0, 1, 2, 3],[4, 5, 6, 7]]),\
b=ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]]))
>>> x.count_nonzero(axis=0)
{
a: ivy.array([1, 2, 2, 2]),
b: ivy.array([[1, 2],
[2, 2]])
}
>>> x = ivy.Container(a=ivy.array([[0, 1, 2, 3],[4, 5, 6, 7]]),\
b=ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]]))
>>> x.count_nonzero(axis=(0,1), keepdims=True)
{
a: ivy.array([[7]]),
b: ivy.array([[[3, 4]]])
}
"""
return self.static_count_nonzero(
self,
axis=axis,
keepdims=keepdims,
dtype=dtype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_nansum(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[tuple, int, ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.nansum. This method
simply wraps the function, and so the docstring for ivy.nansum also
applies to this method with minimal changes.
Parameters
----------
x
Input array.
axis
Axis or axes along which the sum is computed.
The default is to compute the sum of the flattened array.
dtype
The type of the returned array and of the accumulator in
which the elements are summed. By default, the dtype of input is used.
keepdims
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
A new array holding the result is returned unless out is specified,
in which it is returned.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[10, 7, 4], [3, 2, 1]]),\
b=ivy.array([[1, 4, 2], [ivy.nan, ivy.nan, 0]]))
>>> ivy.Container.static_nansum(x)
{
a: 27,
b: 7.0
}
>>> ivy.Container.static_nansum(x, axis=0)
{
a: ivy.array([13, 9, 5]),
b: ivy.array([1., 4., 2.])
}
>>> ivy.Container.static_nansum(x, axis=1)
{
a: ivy.array([21, 6]),
b: ivy.array([7., 0.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"nansum",
x,
axis=axis,
dtype=dtype,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def nansum(
self: ivy.Container,
/,
*,
axis: Optional[Union[tuple, int, ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.nansum. This method
simply wraps the function, and so the docstring for ivy.nansum also
applies to this method with minimal changes.
Parameters
----------
self
Input container including arrays.
axis
Axis or axes along which the sum is computed.
The default is to compute the sum of the flattened array.
dtype
The type of the returned array and of the accumulator in
which the elements are summed. By default, the dtype of input is used.
keepdims
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
A new array holding the result is returned unless out is specified,
in which it is returned.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[10, 7, 4], [3, 2, 1]]),\
b=ivy.array([[1, 4, 2], [ivy.nan, ivy.nan, 0]]))
>>> x.nansum(axis=0)
{
a: ivy.array([13, 9, 5]),
b: ivy.array([1., 4., 2.])
}
>>> x.nansum(axis=1)
{
a: ivy.array([21, 6]),
b: ivy.array([7., 0.])
}
"""
return self.static_nansum(
self, axis=axis, dtype=dtype, keepdims=keepdims, out=out
)
@staticmethod
def static_isclose(
a: Union[ivy.Container, ivy.Array, ivy.NativeArray],
b: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
rtol: Union[float, ivy.Container] = 1e-05,
atol: Union[float, ivy.Container] = 1e-08,
equal_nan: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.isclose. This method
simply wraps the function, and so the docstring for ivy.isclose also
applies to this method with minimal changes.
Parameters
----------
a
Input container containing first input array.
b
Input container containing second input array.
rtol
The relative tolerance parameter.
atol
The absolute tolerance parameter.
equal_nan
Whether to compare NaN's as equal. If True, NaN's in a will be
considered equal to NaN's in b in the output array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
A new array holding the result is returned unless out is specified,
in which it is returned.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.0, ivy.nan]),\
b=ivy.array([1.0, ivy.nan]))
>>> y = ivy.Container(a=ivy.array([1.0, ivy.nan]),\
b=ivy.array([1.0, ivy.nan]))
>>> ivy.Container.static_isclose(x, y)
{
a: ivy.array([True, False]),
b: ivy.array([True, False])
}
>>> ivy.Container.static_isclose(x, y, equal_nan=True)
{
a: ivy.array([True, True]),
b: ivy.array([True, True])
}
>>> x = ivy.Container(a=ivy.array([1.0, 2.0]),\
b=ivy.array([1.0, 2.0]))
>>> y = ivy.Container(a=ivy.array([1.0, 2.001]),\
b=ivy.array([1.0, 2.0]))
>>> ivy.Container.static_isclose(x, y, atol=0.0)
{
a: ivy.array([True, False]),
b: ivy.array([True, True])
}
>>> ivy.Container.static_isclose(x, y, rtol=0.01, atol=0.0)
{
a: ivy.array([True, True]),
b: ivy.array([True, True])
}
"""
return ContainerBase.cont_multi_map_in_function(
"isclose",
a,
b,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def isclose(
self: ivy.Container,
b: ivy.Container,
/,
*,
rtol: Union[float, ivy.Container] = 1e-05,
atol: Union[float, ivy.Container] = 1e-08,
equal_nan: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.isclose. This method
simply wraps the function, and so the docstring for ivy.isclose also
applies to this method with minimal changes.
Parameters
----------
self
Input container containing first input array.
b
Input container containing second input array.
rtol
The relative tolerance parameter.
atol
The absolute tolerance parameter.
equal_nan
Whether to compare NaN's as equal. If True, NaN's in a will be
considered equal to NaN's in b in the output array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
A new array holding the result is returned unless out is specified,
in which it is returned.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.0, ivy.nan]),\
b=ivy.array([1.0, ivy.nan]))
>>> y = ivy.Container(a=ivy.array([1.0, ivy.nan]),\
b=ivy.array([1.0, ivy.nan]))
>>> x.isclose(y)
{
a: ivy.array([True, False]),
b: ivy.array([True, False])
}
>>> x.isclose(y, equal_nan=True)
{
a: ivy.array([True, True]),
b: ivy.array([True, True])
}
>>> x = ivy.Container(a=ivy.array([1.0, 2.0]),\
b=ivy.array([1.0, 2.0]))
>>> y = ivy.Container(a=ivy.array([1.0, 2.001]),\
b=ivy.array([1.0, 2.0]))
>>> x.isclose(y, atol=0.0)
{
a: ivy.array([True, False]),
b: ivy.array([True, True])
}
>>> x.isclose(y, rtol=0.01, atol=0.0)
{
a: ivy.array([True, True]),
b: ivy.array([True, True])
}
"""
return self.static_isclose(
self,
b,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_signbit(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container, float, int, list, tuple],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.signbit. This method
simply wraps the function, and so the docstring for ivy.signbit also
applies to this method with minimal changes.
Parameters
----------
x
input container with array-like items.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise signbit of input arrays.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, -2, 3]),\
b=-5)
>>> ivy.Container.static_signbit(x)
{
a: ivy.array([False, True, False])
b: ivy.array([True])
}
"""
return ContainerBase.cont_multi_map_in_function(
"signbit",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def signbit(
self: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.signbit. This method
simply wraps the function, and so the docstring for ivy.signbit also
applies to this method with minimal changes.
Parameters
----------
self
input container with array-like items.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise signbit of input arrays.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, -2, 3]),\
b=-5)
>>> x.signbit()
{
a: ivy.array([False, True, False])
b: ivy.array([True])
}
"""
return self.static_signbit(self, out=out)
@staticmethod
def static_hypot(
x1: Union[ivy.Container, ivy.Array, ivy.NativeArray],
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.hypot. This method simply
wraps the function, and so the docstring for ivy.hypot also applies to
this method with minimal changes.
Parameters
----------
x1
Input container containing first input array.
x2
Input container containing second input array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
container including the hypot function computed element-wise
Examples
--------
>>> x = ivy.Container(a=ivy.array([2.0]),\
... b=ivy.array([3.0]))
>>> y = ivy.Container(a=ivy.array([3.0]),\
b=ivy.array([4.0]))
>>> ivy.Container.static_hypot(x, y)
{
a: ivy.array([3.6055]),
b: ivy.array([5.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"hypot",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def hypot(
self: ivy.Container,
x2: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.hypot. This method
simply wraps the function, and so the docstring for ivy.hypot also
applies to this method with minimal changes.
Parameters
----------
self
Input container containing first input array.
x2
Input container containing second input array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
container including the hypot function computed element-wise
Examples
--------
>>> x = ivy.Container(a=ivy.array([2.0]),\
... b=ivy.array([3.0]))
>>> y = ivy.Container(a=ivy.array([3.0]),\
b=ivy.array([4.0]))
>>> x.hypot(y)
{
a: ivy.array([3.6055]),
b: ivy.array([5.])
}
"""
return self.static_hypot(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_allclose(
x1: Union[ivy.Container, ivy.Array, ivy.NativeArray],
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
rtol: Union[float, ivy.Container] = 1e-05,
atol: Union[float, ivy.Container] = 1e-08,
equal_nan: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.allclose. This method
simply wraps the function, and so the docstring for ivy.allclose also
applies to this method with minimal changes.
Parameters
----------
x1
Input container containing first input array.
x2
Input container containing second input array.
rtol
The relative tolerance parameter.
atol
The absolute tolerance parameter.
equal_nan
Whether to compare NaN's as equal. If True, NaN's in x1 will be
considered equal to NaN's in x2 in the output array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
A new container holding the result is returned unless out is specified,
in which it is returned.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([1., 2., 3.]),\
... b=ivy.array([1., 2., 3.]))
>>> x2 = ivy.Container(a=ivy.array([1., 2., 3.]),\
... b=ivy.array([1., 2., 3.]))
>>> y = ivy.Container.static_allclose(x1, x2)
>>> print(y)
{
a: ivy.array(True),
b: ivy.array(True)
}
>>> x1 = ivy.Container(a=ivy.array([1., 2., 3.]),\
... b=ivy.array([1., 2., 3.]))
>>> x2 = ivy.Container(a=ivy.array([1., 2., 3.0003]),\
... b=ivy.array([1.0006, 2., 3.]))
>>> y = ivy.Container.static_allclose(x1, x2, rtol=1e-3)
>>> print(y)
{
a: ivy.array(True),
b: ivy.array(True)
}
"""
return ContainerBase.cont_multi_map_in_function(
"allclose",
x1,
x2,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def allclose(
self: ivy.Container,
x2: ivy.Container,
/,
*,
rtol: Union[float, ivy.Container] = 1e-05,
atol: Union[float, ivy.Container] = 1e-08,
equal_nan: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.allclose. This method
simply wraps the function, and so the docstring for ivy.allclose also
applies to this method with minimal changes.
Parameters
----------
self
Input container containing first input array.
x2
Input container containing second input array.
rtol
The relative tolerance parameter.
atol
The absolute tolerance parameter.
equal_nan
Whether to compare NaN's as equal. If True, NaN's in x1 will be
considered equal to NaN's in x2 in the output array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
A new container holding the result is returned unless out is specified,
in which it is returned.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([1., 2., 3.]), b=ivy.array([1., 2., 3.]))
>>> x2 = ivy.Container(a=ivy.array([1., 2., 3.]), b=ivy.array([1., 2., 3.]))
>>> y = x1.allclose(x2)
>>> print(y)
{
a: ivy.array(True),
b: ivy.array(True)
}
>>> x1 = ivy.Container(a=ivy.array([1., 2., 3.]),
... b=ivy.array([1., 2., 3.]))
>>> x2 = ivy.Container(a=ivy.array([1., 2., 3.0003]),
... b=ivy.array([1.0006, 2., 3.]))
>>> y = x1.allclose(x2, rtol=1e-3)
>>> print(y)
{
a: ivy.array(True),
b: ivy.array(True)
}
"""
return self.static_allclose(
self,
x2,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_diff(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
n: Union[int, ivy.Container] = 1,
axis: Union[int, ivy.Container] = -1,
prepend: Optional[
Union[ivy.Array, ivy.NativeArray, int, list, tuple, ivy.Container]
] = None,
append: Optional[
Union[ivy.Array, ivy.NativeArray, int, list, tuple, ivy.Container]
] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.diff. This method simply
wraps the function, and so the docstring for ivy.diff also applies to
this method with minimal changes.
Parameters
----------
x
input container with array-like items.
n
The number of times values are differenced. If zero, the input is returned
as-is.
axis
The axis along which the difference is taken, default is the last axis.
prepend,append
Values to prepend/append to x along given axis prior to performing the
difference. Scalar values are expanded to arrays with length 1 in the
direction of axis and the shape of the input array in along all other
axes. Otherwise the dimension and shape must match x except along axis.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with the n-th discrete difference along
the given axis.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2, 4, 7, 0]),
b=ivy.array([1, 2, 4, 7, 0]))
>>> ivy.Container.static_diff(x)
{
a: ivy.array([ 1, 2, 3, -7]),
b: ivy.array([ 1, 2, 3, -7])
}
"""
return ContainerBase.cont_multi_map_in_function(
"diff",
x,
n=n,
axis=axis,
prepend=prepend,
append=append,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def diff(
self: ivy.Container,
/,
*,
n: Union[int, ivy.Container] = 1,
axis: Union[int, ivy.Container] = -1,
prepend: Optional[
Union[ivy.Array, ivy.NativeArray, int, list, tuple, ivy.Container]
] = None,
append: Optional[
Union[ivy.Array, ivy.NativeArray, int, list, tuple, ivy.Container]
] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.diff. This method
simply wraps the function, and so the docstring for ivy.diff also
applies to this method with minimal changes.
Parameters
----------
self
input container with array-like items.
n
The number of times values are differenced. If zero, the input is returned
as-is.
axis
The axis along which the difference is taken, default is the last axis.
prepend,append
Values to prepend/append to x along given axis prior to performing the
difference. Scalar values are expanded to arrays with length 1 in the
direction of axis and the shape of the input array in along all other
axes. Otherwise the dimension and shape must match x except along axis.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with the n-th discrete difference along the
given axis.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2, 4, 7, 0]),
b=ivy.array([1, 2, 4, 7, 0]))
>>> x.diff()
{
a: ivy.array([1, 2, 3, -7]),
b: ivy.array([1, 2, 3, -7])
}
"""
return self.static_diff(
self, n=n, axis=axis, prepend=prepend, append=append, out=out
)
@staticmethod
def static_fix(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.fix. This method simply
wraps the function, and so the docstring for ivy.fix also applies to
this method with minimal changes.
Parameters
----------
x
input container with array items.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise rounding of
input arrays elements.
Examples
--------
>>> x = ivy.Container(a=ivy.array([2.1, 2.9, -2.1]),\
b=ivy.array([3.14]))
>>> ivy.Container.static_fix(x)
{
a: ivy.array([ 2., 2., -2.])
b: ivy.array([ 3.0 ])
}
"""
return ContainerBase.cont_multi_map_in_function(
"fix",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def fix(
self: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.fix. This method simply
wraps the function, and so the docstring for ivy.fix also applies to
this method with minimal changes.
Parameters
----------
self
input container with array items.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise rounding of
input arrays elements.
Examples
--------
>>> x = ivy.Container(a=ivy.array([2.1, 2.9, -2.1]),\
b=ivy.array([3.14]))
>>> x.fix()
{
a: ivy.array([ 2., 2., -2.])
b: ivy.array([ 3.0 ])
}
"""
return self.static_fix(self, out=out)
@staticmethod
def static_nextafter(
x1: Union[ivy.Container, ivy.Array, ivy.NativeArray],
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.nextafter. This method
simply wraps the function, and so the docstring for ivy.nextafter also
applies to this method with minimal changes.
Parameters
----------
x1
Input container containing first input arrays.
x2
Input container containing second input arrays.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
container including the next representable values of
input container's arrays, element-wise
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([1.0e-50, 2.0e+50]),\
... b=ivy.array([2.0, 1.0])
>>> x2 = ivy.Container(a=ivy.array([5.5e-30]),\
... b=ivy.array([-2.0]))
>>> ivy.Container.static_nextafter(x1, x2)
{
a: ivy.array([1.4013e-45., 3.4028e+38]),
b: ivy.array([5.5e-30])
}
"""
return ContainerBase.cont_multi_map_in_function(
"nextafter",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def nextafter(
self: ivy.Container,
x2: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.nextafter. This method
simply wraps the function, and so the docstring for ivy.nextafter also
applies to this method with minimal changes.
Parameters
----------
self
Input container containing first input array.
x2
Input container containing second input array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
container including the next representable values of
input container's arrays, element-wise
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([1.0e-50, 2.0e+50]),\
... b=ivy.array([2.0, 1.0])
>>> x2 = ivy.Container(a=ivy.array([5.5e-30]),\
... b=ivy.array([-2.0]))
>>> x1.nextafter(x2)
{
a: ivy.array([1.4013e-45., 3.4028e+38]),
b: ivy.array([5.5e-30])
}
"""
return self.static_nextafter(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_zeta(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
q: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.zeta. This method simply
wraps the function, and so the docstring for ivy.zeta also applies to
this method with minimal changes.
Parameters
----------
x
Input container containing first input arrays.
q
Input container containing second input arrays.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
container including the zeta function computed element-wise
Examples
--------
>>> x = ivy.Container(a=ivy.array([5.0, 3.0]),\
... b=ivy.array([2.0, 1.0])
>>> q = ivy.Container(a=ivy.array([2.0]),\
... b=ivy.array([5.0]))
>>> ivy.Container.static_zeta(x1, x2)
{
a: ivy.array([0.0369, 0.2021]),
b: ivy.array([0.0006, 0.0244])
}
"""
return ContainerBase.cont_multi_map_in_function(
"zeta",
x,
q,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def zeta(
self: ivy.Container,
q: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.zeta. This method
simply wraps the function, and so the docstring for ivy.zeta also
applies to this method with minimal changes.
Parameters
----------
self
Input container containing first input array.
q
Input container containing second input array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
container including the zeta function computed element-wise
Examples
--------
>>> x = ivy.Container(a=ivy.array([5.0, 3.0]),\
... b=ivy.array([2.0, 1.0])
>>> q = ivy.Container(a=ivy.array([2.0]),\
... b=ivy.array([5.0]))
>>> x.zeta(q)
{
a: ivy.array([0.0369, 0.2021]),
b: ivy.array([0.0006, 0.0244])
}
"""
return self.static_zeta(
self,
q,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_gradient(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
spacing: Union[int, list, tuple, ivy.Container] = 1,
edge_order: Union[int, ivy.Container] = 1,
axis: Optional[Union[int, list, tuple, ivy.Container]] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"gradient",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
spacing=spacing,
edge_order=edge_order,
axis=axis,
)
def gradient(
self: ivy.Container,
/,
*,
spacing: Union[int, list, tuple, ivy.Container] = 1,
edge_order: Union[int, ivy.Container] = 1,
axis: Optional[Union[int, list, tuple, ivy.Container]] = None,
) -> ivy.Container:
"""Calculate gradient of x with respect to (w.r.t.) spacing.
Parameters
----------
x
input array representing outcomes of the function
spacing
if not given, indices of x will be used
if scalar indices of x will be scaled with this value
if array gradient of x w.r.t. spacing
edge_order
1 or 2, for 'frist order' and 'second order' estimation
of boundary values of gradient respectively.
axis
dimension(s) to approximate the gradient over.
By default, partial gradient is computed in every dimension
Returns
-------
ret
Array with values computed from gradient function from
inputs
Examples
--------
>>> coordinates = ivy.Container(
>>> a=(ivy.array([-2., -1., 1., 4.]),),
>>> b=(ivy.array([2., 1., -1., -4.]),)
>>> )
>>> values = ivy.Container(
>>> a=ivy.array([4., 1., 1., 16.]),
>>> b=ivy.array([4., 1., 1., 16.])
>>> )
>>> ivy.gradient(values, spacing=coordinates)
{
a: ivy.array([-3., -2., 2., 5.]),
b: ivy.array([3., 2., -2., -5.])
}
>>> values = ivy.Container(
>>> a=ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]]),
>>> b=ivy.array([[-1, -2, -4, -8], [-10, -20, -40, -80]])
>>> )
>>> ivy.gradient(values)
[{
a: ivy.array([[9., 18., 36., 72.],
[9., 18., 36., 72.]]),
b: ivy.array([[-9., -18., -36., -72.],
[-9., -18., -36., -72.]])
}, {
a: ivy.array([[1., 1.5, 3., 4.],
[10., 15., 30., 40.]]),
b: ivy.array([[-1., -1.5, -3., -4.],
[-10., -15., -30., -40.]])
}]
>>> values = ivy.Container(
>>> a=ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]]),
>>> b=ivy.array([[-1, -2, -4, -8], [-10, -20, -40, -80]])
>>> )
>>> ivy.gradient(values, spacing=2.0)
[{
a: ivy.array([[4.5, 9., 18., 36.],
[4.5, 9., 18., 36.]]),
b: ivy.array([[-4.5, -9., -18., -36.],
[-4.5, -9., -18., -36.]])
}, {
a: ivy.array([[0.5, 0.75, 1.5, 2.],
[5., 7.5, 15., 20.]]),
b: ivy.array([[-0.5, -0.75, -1.5, -2.],
[-5., -7.5, -15., -20.]])
}]
>>> values = ivy.Container(
>>> a=ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]]),
>>> b=ivy.array([[-1, -2, -4, -8], [-10, -20, -40, -80]])
>>> )
>>> ivy.gradient(values, axis=1)
{
a: ivy.array([[1., 1.5, 3., 4.],
[10., 15., 30., 40.]]),
b: ivy.array([[-1., -1.5, -3., -4.],
[-10., -15., -30., -40.]])
}
>>> values = ivy.Container(
>>> a=ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]]),
>>> b=ivy.array([[-1, -2, -4, -8], [-10, -20, -40, -80]])
>>> )
>>> ivy.gradient(values, spacing = [3., 2.])
[{
a: ivy.array([[3., 6., 12., 24.],
[3., 6., 12., 24.]]),
b: ivy.array([[-3., -6., -12., -24.],
[-3., -6., -12., -24.]])
}, {
a: ivy.array([[0.5, 0.75, 1.5, 2.],
[5., 7.5, 15., 20.]]),
b: ivy.array([[-0.5, -0.75, -1.5, -2.],
[-5., -7.5, -15., -20.]])
}]
>>> coords = ivy.Container(
>>> a=(ivy.array([0, 2]), ivy.array([0, 3, 6, 9])),
>>> b=(ivy.array([0, -2]), ivy.array([0, -3, -6, -9]))
>>>)
>>> values = ivy.Container(
>>> a=ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]]),
>>> b=ivy.array([[-1, -2, -4, -8], [-10, -20, -40, -80]])
>>>)
>>> ivy.gradient(values, spacing = coords)
[{
a: ivy.array([[4.5, 9., 18., 36.],
[4.5, 9., 18., 36.]]),
b: ivy.array([[4.5, 9., 18., 36.],
[4.5, 9., 18., 36.]])
}, {
a: ivy.array([[0.33333333, 0.5, 1., 1.33333333],
[3.33333333, 5., 10., 13.33333333]]),
b: ivy.array([[0.33333333, 0.5, 1., 1.33333333],
[3.33333333, 5., 10., 13.33333333]])
}]
"""
return self.static_gradient(
self, spacing=spacing, edge_order=edge_order, axis=axis
)
@staticmethod
def static_xlogy(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
y: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.xlogy. This method simply
wraps the function, and so the docstring for ivy.xlogy also applies to
this method with minimal changes.
Parameters
----------
x
Input container containing first input arrays.
y
Input container containing second input arrays.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
container including the next representable values of
input container's arrays, element-wise
Examples
--------
>>> x = ivy.Container(a=ivy.zeros(3)),\
... b=ivy.array([1.0, 2.0, 3.0]))
>>> y = ivy.Container(a=ivy.array([-1.0, 0.0, 1.0]),\
... b=ivy.array([3.0, 2.0, 1.0]))
>>> ivy.Container.static_xlogy(x, y)
{
a: ivy.array([0.0, 0.0, 0.0]),
b: ivy.array([1.0986, 1.3863, 0.0000])
}
"""
return ContainerBase.cont_multi_map_in_function(
"xlogy",
x,
y,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def xlogy(
self: ivy.Container,
y: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.xlogy. This method
simply wraps the function, and so the docstring for ivy.xlogy also
applies to this method with minimal changes.
Parameters
----------
self
Input container containing first input array.
y
Input container containing second input array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
container including the next representable values of
input container's arrays, element-wise
Examples
--------
>>> x = ivy.Container(a=ivy.zeros(3)),\
... b=ivy.array([1.0, 2.0, 3.0]))
>>> y = ivy.Container(a=ivy.array([-1.0, 0.0, 1.0]),\
... b=ivy.array([3.0, 2.0, 1.0]))
>>> x.xlogy(y)
{
a: ivy.array([0.0, 0.0, 0.0]),
b: ivy.array([1.0986, 1.3863, 0.0000])
}
"""
return self.static_xlogy(
self,
y,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_binarizer(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
threshold: Union[float, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""Map the values of the input tensor to either 0 or 1, element-wise,
based on the outcome of a comparison against a threshold value.
Parameters
----------
self
input container. Should have a real-valued floating-point data type.
threshold
Values greater than this are
mapped to 1, others to 0.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Binarized output data
"""
return ContainerBase.cont_multi_map_in_function(
"binarizer",
x,
threshold=threshold,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def binarizer(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
*,
threshold: Union[float, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""Map the values of the input tensor to either 0 or 1, element-wise,
based on the outcome of a comparison against a threshold value.
Parameters
----------
threshold
Values greater than this are
mapped to 1, others to 0.
key_chains
The keychains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Binarized output data
"""
return self.static_binarizer(
self,
threshold=threshold,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_conj(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.conj. This method simply
wraps the function, and so the docstring for ivy.conj also applies to
this method with minimal changes.
Parameters
----------
x
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing output array(s) of the same
dtype as the input array(s) with the complex conjugates of
the complex values present in the input array. If x is a
container of scalar(s) then a container of scalar(s)
will be returned.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1+5j, 0-0j, 1.23j]),
... b=ivy.array([7.9, 0.31+3.3j, -4.2-5.9j]))
>>> z = ivy.Container.static_conj(x)
>>> print(z)
{
a: ivy.array([-1-5j, 0+0j, -1.23j]),
b: ivy.array([7.9, 0.31-3.3j, -4.2+5.9j])
}
"""
return ContainerBase.cont_multi_map_in_function(
"conj",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def conj(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.conj. This method
simply wraps the function, and so the docstring for ivy.conj also
applies to this method with minimal changes.
Parameters
----------
self
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing output array(s) of the same dtype
as the input array(s) with the complex conjugates of the
complex values present in the input array.
If x is a container of scalar(s) then a container of
scalar(s) will be returned.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1j, 0.335+2.345j, 1.23+7j]),\
b=ivy.array([0.0, 1.2+3.3j, 1+0j]))
>>> x.conj()
{
a: ivy.array([1j, 0.335-2345j, 1.23-7j]),
b: ivy.array([0.0, 1.2-3.3j, 1-0j])
}
"""
return self.static_conj(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_ldexp(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.ldexp. This method simply
wraps the function, and so the docstring for ivy.ldexp also applies to
this method with minimal changes.
Parameters
----------
x1
The container whose arrays should be multiplied by 2**i.
x2
The container whose arrays should be used to multiply x by 2**i.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
Returns
-------
ret
container including x1 * 2**x2.
Examples
--------
With one :class:`ivy.Container` input:
>>> x1 = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([1, 5, 10]))
>>> x2 = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([1, 5, 10]))
>>> ivy.Container.static_ldexp(x1, x2)
{
a: ivy.array([2, 8, 24]),
b: ivy.array([2, 160, 10240])
}
"""
return ContainerBase.cont_multi_map_in_function(
"ldexp",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def ldexp(
self: ivy.Container,
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.ldexp. This method
simply wraps the function, and so the docstring for ivy.ldexp also
applies to this method with minimal changes.
Parameters
----------
self
The container whose arrays should be multiplied by 2**x2.
x2
The container whose arrays should be used to multiply x1 by 2**x2.
out
optional output container, for writing the result to.
Returns
-------
ret
container including x1 * 2**x2.
Examples
--------
With one :class:`ivy.Container` input:
>>> x1 = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([1, 5, 10]))
>>> x2 = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([1, 5, 10]))
>>> x1.ldexp(x2)
{
a: ivy.array([2, 8, 24]),
b: ivy.array([2, 160, 10240])
}
"""
return self.static_ldexp(self, x2, out=out)
@staticmethod
def static_lerp(
input: Union[ivy.Array, ivy.NativeArray, ivy.Container],
end: Union[ivy.Array, ivy.NativeArray, ivy.Container],
weight: Union[ivy.Array, ivy.NativeArray, float, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.lerp. This method simply
wraps the function, and so the docstring for ivy.lerp also applies to
this method with minimal changes.
Parameters
----------
input
The container whose arrays should be used as parameter: input
end
The container whose arrays should be used as parameter: end
weight
The container whose arrays or scalar should be used as parameter: weight
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
Returns
-------
ret
container including input + ((end - input) * weight)
Examples
--------
With one :class:`ivy.Container` input:
>>> input = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> end = ivy.array([10.])
>>> weight = 1.1
>>> y = ivy.Container.static_lerp(input, end, weight)
>>> print(y)
{
a: ivy.array([11., 10.90000057, 10.80000019]),
b: ivy.array([10.70000076, 10.60000038, 10.5])
}
>>> input = ivy.Container(a=ivy.array([10.1, 11.1]), b=ivy.array([10, 11]))
>>> end = ivy.Container(a=ivy.array([5]))
>>> weight = ivy.Container(a=0.5)
>>> y = ivy.Container.static_lerp(input, end, weight)
>>> print(y)
{
a: ivy.array([7.55000019, 8.05000019]),
b: {
a: ivy.array([7.5, 8.])
}
}
"""
return ContainerBase.cont_multi_map_in_function(
"lerp",
input,
end,
weight,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def lerp(
self: ivy.Container,
end: Union[ivy.Array, ivy.NativeArray, ivy.Container],
weight: Union[ivy.Array, ivy.NativeArray, float, ivy.Container],
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.lerp. This method
simply wraps the function, and so the docstring for ivy.lerp also
applies to this method with minimal changes.
Parameters
----------
self
The container whose arrays should be used as parameter: input
end
The container whose arrays should be used as parameter: end
weight
The container whose arrays or scalar should be used as parameter: weight
out
optional output container, for writing the result to.
Returns
-------
ret
container including input + ((end - input) * weight)
Examples
--------
With one :class:`ivy.Container` input:
>>> input = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([1, 5, 10]))
>>> end = ivy.Container(a=ivy.array([10, 10, 10]), b=ivy.array([20, 20, 20]))
>>> weight = ivy.Container(a=ivy.array(0.5), b=ivy.array([0.4, 0.5, 0.6]))
>>> input.lerp(end, weight)
{
a: ivy.array([5.5, 6., 6.5]),
b: ivy.array([8.60000038, 12.5, 16.])
}
"""
return self.static_lerp(self, end, weight, out=out)
@staticmethod
def static_frexp(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.frexp. This method simply
wraps the function, and so the docstring for ivy.frexp also applies to
this method with minimal changes.
Parameters
----------
x
The container whose arrays should be split into mantissa and exponent.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
Returns
-------
ret
container including the mantissa and exponent of x.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([1, 5, 10]))
>>> ivy.Container.static_frexp(x)
{
a: (ivy.array([0.5, 0.5, 0.75]), ivy.array([1, 1, 2])),
b: (ivy.array([0.5, 0.625, 0.625]), ivy.array([1, 3, 4]))
}
"""
return ContainerBase.cont_multi_map_in_function(
"frexp",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def frexp(
self: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.frexp. This method
simply wraps the function, and so the docstring for ivy.frexp also
applies to this method with minimal changes.
Parameters
----------
self
The container whose arrays should be split into mantissa and exponent.
out
optional output container, for writing the result to.
Returns
-------
ret
container including the mantissa and exponent of x.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),\
b=ivy.array([1, 5, 10]))
>>> x.frexp()
{
a: (ivy.array([0.5, 0.5, 0.75]), ivy.array([1, 1, 2])),
b: (ivy.array([0.5, 0.625, 0.625]), ivy.array([1, 3, 4]))
}
"""
return self.static_frexp(self, out=out)
@staticmethod
def static_modf(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.modf. This method simply
wraps the function, and so the docstring for ivy.modf also applies to
this method with minimal changes.
Parameters
----------
x
The container whose arrays should be split into
the fractional and integral parts.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
Returns
-------
ret
container including the fractional and integral parts of x.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.2, 2.7, 3.9]),
>>> b = ivy.array([-1.5, 5.3, -10.7]))
>>> ivy.Container.static_modf(x)
{
a: (ivy.array([0.2, 0.7, 0.9]), ivy.array([1.0, 2.0, 3.0])),
b: (ivy.array([-0.5, 0.3, -0.7]), ivy.array([-1.0, 5.0, -10.0]))
}
"""
return ContainerBase.cont_multi_map_in_function(
"modf",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def modf(
self: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
r"""ivy.Container instance method variant of ivy.modf. This method
simply wraps the function, and so the docstring for ivy.modf also
applies to this method with minimal changes.
Parameters
----------
self
The container whose arrays should be split into
the fractional and integral parts.
out
optional output container, for writing the result to.
Returns
-------
ret
container including the fractional and integral parts of x.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.2, 2.7, 3.9]),
>>> b = ivy.array([-1.5, 5.3, -10.7]))
>>> x.modf()
{
a: (ivy.array([0.2, 0.7, 0.9]), ivy.array([1.0, 2.0, 3.0])),
b: (ivy.array([-0.5, 0.3, -0.7]), ivy.array([-1.0, 5.0, -10.0]))
}
"""
return self.static_modf(self, out=out)
@staticmethod
def static_digamma(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.digamma. This method
simply wraps the function, and so the docstring for ivy.digamma also
applies to this method with minimal changes.
Note
----
The Ivy version only accepts real-valued inputs.
Parameters
----------
x
Input container containing input arrays.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
container including the digamma function computed element-wise
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 0.5]),\
... b=ivy.array([-2.0, 3.0]))
>>> ivy.Container.static_digamma(x)
{
a: ivy.array([-0.57721537, -1.96351004]),
b: ivy.array([nan, 0.92278427])
}
"""
return ContainerBase.cont_multi_map_in_function(
"digamma",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def digamma(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.digamma. This method
simply wraps the function, and so the docstring for ivy.digamma also
applies to this method with minimal changes.
Note
----
The Ivy version only accepts real-valued inputs.
Parameters
----------
self
Input container containing input arrays.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
container including the digamma function computed element-wise
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 0.5]), b=ivy.array([2.0, 3.0])
>>> x.digamma()
{
a: ivy.array([-0.5772, -1.9635]),
b: ivy.array([0.4228, 0.9228])
}
"""
return self.static_digamma(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_sparsify_tensor(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
card: Union[int, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.sparsify_tensor. This
method simply wraps the function, and so the docstring for
ivy.sparsify_tensor also applies to this method with minimal changes.
Parameters
----------
x
Input container containing input arrays.
card
The number of values to keep in each tensor.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output container in which to place the result.
The default is None.
Returns
-------
ret
container including the sparsified tensor computed element-wise
Examples
--------
>>> x = ivy.Container(
a=ivy.reshape(ivy.arange(100), (10, 10)),
b=ivy.reshape(ivy.arange(100), (10, 10)),
)
>>> ivy.Container.static_sparsify_tensor(x, 10)
{
a: (<class ivy.data_classes.array.array.Array> shape=[10, 10]),
b: (<class ivy.data_classes.array.array.Array> shape=[10, 10])
}
"""
return ContainerBase.cont_multi_map_in_function(
"sparsify_tensor",
x,
card,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def sparsify_tensor(
self: Union[ivy.Container, ivy.Array, ivy.NativeArray],
card: Union[int, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.sparsify_tensor.
This method simply wraps the function, and so the docstring for
ivy.sparsify_tensor also applies to this method with minimal
changes.
"""
return self.static_sparsify_tensor(
self,
card,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_erfc(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.erfc. This method simply
wraps the function, and so the docstring for ivy.erfc also applies to
this method with minimal changes.
Parameters
----------
x
The container whose array contains real or complex valued argument.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
Returns
-------
ret
container with values of the complementary error function.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1., 2.]), b=ivy.array([-3., -4.]))
>>> ivy.Container.static_erfc(x)
{
a: ivy.array([0.15729921, 0.00467773]),
b: ivy.array([1.99997795, 2.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"erfc",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def erfc(
self: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.erfc. This method
simply wraps the function, and so the docstring for ivy.erfc also
applies to this method with minimal changes.
Parameters
----------
self
The container whose array contains real or complex valued argument.
out
optional output container, for writing the result to.
Returns
-------
ret
container with values of the complementary error function.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1., 2., 3.]), b=ivy.array([-1., -2., -3.]))
>>> x.erfc()
{
a: ivy.array([1.57299206e-01, 4.67773480e-03, 2.20904985e-05]),
b: ivy.array([1.84270084, 1.99532223, 1.99997795])
}
"""
return self.static_erfc(self, out=out)
@staticmethod
def static_erfinv(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.erfinv. This method
simply wraps the function, and so the docstring for ivy.erfinv also
applies to this method with minimal changes.
Parameters
----------
x
The container whose array contains real or complex valued argument.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
Returns
-------
ret
container with values of the inverse error function.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1., 2.]), b=ivy.array([-3., -4.]))
>>> ivy.Container.static_erfinv(x)
{
a: ivy.array([0.15729921, 0.00467773]),
b: ivy.array([1.99997795, 2.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"erfinv",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def erfinv(
self: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.erfinv. This method
simply wraps the function, and so the docstring for ivy.erfinv also
applies to this method with minimal changes.
Parameters
----------
self
The container whose array contains real or complex valued argument.
out
optional output container, for writing the result to.
Returns
-------
ret
container with values of the inverse error function.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1., 2., 3.]), b=ivy.array([-1., -2., -3.]))
>>> x.erfinv()
{
a: ivy.array([1.57299206e-01, 4.67773480e-03, 2.20904985e-05]),
b: ivy.array([1.84270084, 1.99532223, 1.99997795])
}
"""
return self.static_erfinv(self, out=out)
| ivy/ivy/data_classes/container/experimental/elementwise.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/elementwise.py",
"repo_id": "ivy",
"token_count": 62764
} | 12 |
from typing import Optional, Union, List, Dict
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
# noinspection PyMissingConstructor
class _ContainerWithGradients(ContainerBase):
@staticmethod
def _static_stop_gradient(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
preserve_type: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.stop_gradient. This
method simply wraps the function, and so the docstring for
ivy.stop_gradient also applies to this method with minimal changes.
Parameters
----------
x
Array or Container for which to stop the gradient.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
preserve_type
Whether to preserve gradient computation on ivy.Array instances. Default is
True.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The same array x, but with no gradient information.
Examples
--------
With one :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> y = ivy.Container.static_stop_gradient(x, preserve_type=False)
>>> print(y)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> ivy.Container.static_stop_gradient(x, preserve_type=True, out=x)
>>> print(x)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"stop_gradient",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
preserve_type=preserve_type,
out=out,
)
def stop_gradient(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
preserve_type: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.stop_gradient. This
method simply wraps the function, and so the docstring for
ivy.stop_gradient also applies to this method with minimal changes.
Parameters
----------
self
Container for which to stop the gradient.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
preserve_type
Whether to preserve gradient computation on ivy.Array instances. Default is
True.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The same array x, but with no gradient information.
Examples
--------
With one :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> y = x.stop_gradient(preserve_type=False)
>>> print(y)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> x.stop_gradient(preserve_type=True, out=x)
>>> print(x)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
"""
return self._static_stop_gradient(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
preserve_type=preserve_type,
out=out,
)
def adam_step(
self: ivy.Container,
mw: Union[ivy.Array, ivy.NativeArray, ivy.Container],
vw: Union[ivy.Array, ivy.NativeArray, ivy.Container],
step: Union[int, float, ivy.Container],
/,
*,
beta1: Union[float, ivy.Container] = 0.9,
beta2: Union[float, ivy.Container] = 0.999,
epsilon: Union[float, ivy.Container] = 1e-7,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.adam_step. This method
simply wraps the function, and so the docstring for ivy.adam_step also
applies to this method with minimal changes.
Parameters
----------
self
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
mw
running average of the gradients.
vw
running average of second moments of the gradients.
step
training step.
beta1
gradient forgetting factor (Default value = 0.9).
beta2
second moment of gradient forgetting factor (Default value = 0.999).
epsilon
divisor during adam update, preventing division by zero
(Default value = 1e-7).
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The adam step delta.
Examples
--------
With one :class:`ivy.Container` input:
>>> dcdw = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> mw = ivy.array([1., 4., 9.])
>>> vw = ivy.array([0.,])
>>> step = ivy.array([3.4])
>>> beta1 = 0.87
>>> beta2 = 0.976
>>> epsilon = 1e-5
>>> adam_step_delta = dcdw.adam_step(mw, vw, step, beta1=beta1, beta2=beta2,
... epsilon=epsilon)
>>> print(adam_step_delta)
({
a: ivy.array([6.49e+04, 1.74e+01, 1.95e+01]),
b: ivy.array([2.02, 4.82, 8.17])
}, {
a: ivy.array([0.87, 3.61, 8.09]),
b: ivy.array([1.26, 4., 8.48])
}, {
a: ivy.array([0., 0.024, 0.096]),
b: ivy.array([0.216, 0.384, 0.6])
})
With multiple :class:`ivy.Container` inputs:
>>> dcdw = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> mw = ivy.Container(a=ivy.array([0., 0., 0.]),
... b=ivy.array([0., 0., 0.]))
>>> vw = ivy.Container(a=ivy.array([0.,]),
... b=ivy.array([0.,]))
>>> step = ivy.array([3.4])
>>> beta1 = 0.87
>>> beta2 = 0.976
>>> epsilon = 1e-5
>>> adam_step_delta = dcdw.adam_step(mw, vw, step, beta1=beta1, beta2=beta2,
... epsilon=epsilon)
>>> print(adam_step_delta)
({
a: ivy.array([0., 0.626, 0.626]),
b: ivy.array([0.626, 0.626, 0.626])
}, {
a: ivy.array([0., 0.13, 0.26]),
b: ivy.array([0.39, 0.52, 0.65])
}, {
a: ivy.array([0., 0.024, 0.096]),
b: ivy.array([0.216, 0.384, 0.6])
})
"""
return ivy.adam_step(
self, mw, vw, step, beta1=beta1, beta2=beta2, epsilon=epsilon, out=out
)
def optimizer_update(
self: ivy.Container,
effective_grad: Union[ivy.Array, ivy.NativeArray, ivy.Container],
lr: Union[float, ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
stop_gradients: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""Update weights ws of some function, given the true or effective
derivatives of some cost c with respect to ws, [dc/dw for w in ws].
Parameters
----------
self
Weights of the function to be updated.
effective_grad
Effective gradients of the cost c with respect to the weights ws,
[dc/dw for w in ws].
lr
Learning rate(s), the rate(s) at which the weights should be updated
relative to the gradient.
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The new function weights ws_new, following the optimizer updates.
Examples
--------
With one :class:`ivy.Container` input:
>>> w = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> effective_grad = ivy.array([0., 0., 0.])
>>> lr = 3e-4
>>> ws_new = w.optimizer_update(effective_grad, lr)
>>> print(ws_new)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
With multiple :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> effective_grad = ivy.Container(a=ivy.array([0., 0., 0.]),
... b=ivy.array([0., 0., 0.]))
>>> lr = 3e-4
>>> ws_new = w.optimizer_update(effective_grad, lr, out=w)
>>> print(w)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
>>> w = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> effective_grad = ivy.Container(a=ivy.array([0., 0., 0.]),
... b=ivy.array([0., 0., 0.]))
>>> lr = ivy.array([3e-4])
>>> ws_new = w.optimizer_update(effective_grad, lr, stop_gradients=False)
>>> print(ws_new)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., 5.])
}
"""
return ivy.optimizer_update(
self, effective_grad, lr, stop_gradients=stop_gradients, out=out
)
def gradient_descent_update(
self: ivy.Container,
dcdw: Union[ivy.Array, ivy.NativeArray, ivy.Container],
lr: Union[float, ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
stop_gradients: Union[bool, ivy.Container] = True,
out: ivy.Container = None,
) -> ivy.Container:
"""ivy.Container instance method variant of
ivy.gradient_descent_update. This method simply wraps the function, and
so the docstring for ivy.gradient_descent_update also applies to this
method with minimal changes.
Parameters
----------
self
Weights of the function to be updated.
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
lr
Learning rate(s), the rate(s) at which the weights should be
updated relative to the gradient.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The new weights, following the gradient descent updates.
Examples
--------
With one :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([1., 2., 3.]),
... b=ivy.array([3.48, 5.72, 1.98]))
>>> dcdw = ivy.array([0.5, 0.2, 0.1])
>>> lr = ivy.array(0.3)
>>> w_new = w.gradient_descent_update(dcdw, lr)
>>> print(w_new)
{
a: ivy.array([0.85, 1.94, 2.97]),
b: ivy.array([3.33, 5.66, 1.95])
}
With multiple :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([1., 2., 3.]),
... b=ivy.array([3.48, 5.72, 1.98]))
>>> dcdw = ivy.Container(a=ivy.array([0.5, 0.2, 0.1]),
... b=ivy.array([2., 3.42, 1.69]))
>>> lr = ivy.array(0.3)
>>> w_new = w.gradient_descent_update(dcdw, lr)
>>> print(w_new)
{
a: ivy.array([0.85, 1.94, 2.97]),
b: ivy.array([2.88, 4.69, 1.47])
}
"""
return ivy.gradient_descent_update(
self,
dcdw,
lr,
stop_gradients=stop_gradients,
out=out,
)
def lars_update(
self: ivy.Container,
dcdw: Union[ivy.Array, ivy.NativeArray, ivy.Container],
lr: Union[float, ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
decay_lambda: Union[float, ivy.Container] = 0,
stop_gradients: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
):
"""Update weights ws of some function, given the derivatives of some
cost c with respect to ws, [dc/dw for w in ws], by applying Layerwise
Adaptive Rate Scaling (LARS) method.
Parameters
----------
self
Weights of the function to be updated.
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
lr
Learning rate, the rate at which the weights should be updated relative to
the gradient.
decay_lambda
The factor used for weight decay. Default is zero.
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The new function weights ws_new, following the LARS updates.
Examples
--------
With one :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([3.2, 2.6, 1.3]),
... b=ivy.array([1.4, 3.1, 5.1]))
>>> dcdw = ivy.array([0.2, 0.4, 0.1])
>>> lr = ivy.array(0.1)
>>> new_weights = w.lars_update(dcdw, lr)
>>> print(new_weights)
{
a: ivy.array([3.01132035, 2.22264051, 1.2056601]),
b: ivy.array([1.1324538, 2.56490755, 4.96622658])
}
With multiple :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([3.2, 2.6, 1.3]),
... b=ivy.array([1.4, 3.1, 5.1]))
>>> dcdw = ivy.Container(a=ivy.array([0.2, 0.4, 0.1]),
... b=ivy.array([0.3,0.1,0.2]))
>>> lr = ivy.array(0.1)
>>> new_weights = w.lars_update(dcdw, lr)
>>> print(new_weights)
{
a: ivy.array([3.01132035, 2.22264051, 1.2056601]),
b: ivy.array([0.90848625, 2.93616199, 4.77232409])
}
"""
return ivy.lars_update(
self,
dcdw,
lr,
decay_lambda=decay_lambda,
stop_gradients=stop_gradients,
out=out,
)
def adam_update(
self: ivy.Container,
dcdw: Union[ivy.Array, ivy.NativeArray, ivy.Container],
lr: Union[float, ivy.Array, ivy.NativeArray, ivy.Container],
mw_tm1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
vw_tm1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
step: Union[int, ivy.Container],
/,
*,
beta1: Union[float, ivy.Container] = 0.9,
beta2: Union[float, ivy.Container] = 0.999,
epsilon: Union[float, ivy.Container] = 1e-7,
stop_gradients: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""Update weights ws of some function, given the derivatives of some
cost c with respect to ws, using ADAM update. `[reference]
<https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam>`_
Parameters
----------
self
Weights of the function to be updated.
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
lr
Learning rate(s), the rate(s) at which the weights should be updated
relative to the gradient.
mw_tm1
running average of the gradients, from the previous time-step.
vw_tm1
running average of second moments of the gradients, from the previous
time-step.
step
training step.
beta1
gradient forgetting factor (Default value = 0.9).
beta2
second moment of gradient forgetting factor (Default value = 0.999).
epsilon
divisor during adam update, preventing division by zero
(Default value = 1e-7).
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The new function weights ws_new, and also new mw and vw, following the adam
updates.
Examples
--------
With one :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([1., 2., 3.]), b=ivy.array([4., 5., 6.]))
>>> dcdw = ivy.array([1., 0.2, 0.4])
>>> mw_tm1 = ivy.array([0., 0., 0.])
>>> vw_tm1 = ivy.array([0.])
>>> lr = ivy.array(0.01)
>>> step = 2
>>> updated_weights = w.adam_update(dcdw, mw_tm1, vw_tm1, lr, step)
>>> print(updated_weights)
({
a: ivy.array([1., 2., 3.]),
b: ivy.array([4., 5., 6.])
}, ivy.array([0.1 , 0.02, 0.04]), ivy.array([0.01099, 0.01003, 0.01015]))
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> dcdw = ivy.Container(a=ivy.array([0.1,0.3,0.3]),
... b=ivy.array([0.3,0.2,0.2]))
>>> lr = ivy.array(0.001)
>>> mw_tm1 = ivy.Container(a=ivy.array([0.,0.,0.]),
... b=ivy.array([0.,0.,0.]))
>>> vw_tm1 = ivy.Container(a=ivy.array([0.,]),
... b=ivy.array([0.,]))
>>> step = 3
>>> beta1 = 0.9
>>> beta2 = 0.999
>>> epsilon = 1e-7
>>> stop_gradients = False
>>> updated_weights = w.adam_update(dcdw, lr, mw_tm1, vw_tm1, step, beta1=beta1,
... beta2=beta2, epsilon=epsilon,
... stop_gradients=stop_gradients)
>>> print(updated_weights)
({
a: ivy.array([0.99936122, 1.99936116, 2.99936128]),
b: ivy.array([3.99936128, 4.99936104, 5.99936104])
}, {
a: ivy.array([0.01, 0.03, 0.03]),
b: ivy.array([0.03, 0.02, 0.02])
}, {
a: ivy.array([1.00000016e-05, 9.00000086e-05, 9.00000086e-05]),
b: ivy.array([9.00000086e-05, 4.00000063e-05, 4.00000063e-05])
})
"""
return ivy.adam_update(
self,
dcdw,
lr,
mw_tm1,
vw_tm1,
step,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
stop_gradients=stop_gradients,
out=out,
)
def lamb_update(
self: ivy.Container,
dcdw: Union[ivy.Array, ivy.NativeArray, ivy.Container],
lr: Union[float, ivy.Array, ivy.NativeArray, ivy.Container],
mw_tm1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
vw_tm1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
step: Union[int, ivy.Container],
/,
*,
beta1: Union[float, ivy.Container] = 0.9,
beta2: Union[float, ivy.Container] = 0.999,
epsilon: Union[float, ivy.Container] = 1e-7,
max_trust_ratio: Union[int, float, ivy.Container] = 10,
decay_lambda: Union[float, ivy.Container] = 0,
stop_gradients: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""Update weights ws of some function, given the derivatives of some
cost c with respect to ws, [dc/dw for w in ws], by applying LAMB
method.
Parameters
----------
self
Weights of the function to be updated.
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
lr
Learning rate(s), the rate(s) at which the weights should be updated
relative to the gradient.
mw_tm1
running average of the gradients, from the previous time-step.
vw_tm1
running average of second moments of the gradients, from the previous
time-step.
step
training step.
beta1
gradient forgetting factor (Default value = 0.9).
beta2
second moment of gradient forgetting factor (Default value = 0.999).
epsilon
divisor during adam update, preventing division by zero
(Default value = 1e-7).
max_trust_ratio
The maximum value for the trust ratio. Default is 10.
decay_lambda
The factor used for weight decay. Default is zero.
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The new function weights ws_new, following the LAMB updates.
Examples
--------
With one :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([1., 2., 3.]), b=ivy.array([4., 5., 6.]))
>>> dcdw = ivy.array([3., 4., 5.])
>>> mw_tm1 = ivy.array([0., 0., 0.])
>>> vw_tm1 = ivy.array([0.])
>>> lr = ivy.array(1.)
>>> step = ivy.array([2])
>>> new_weights = w.lamb_update(dcdw, mw_tm1, vw_tm1, lr, step)
>>> print(new_weights)
({
a: ivy.array([1., 2., 3.]),
b: ivy.array([4., 5., 6.])
}, ivy.array([0.3, 0.4, 0.5]), ivy.array([1.01, 1.01, 1.02]))
With multiple :class:`ivy.Container` inputs:
>>> w = ivy.Container(a=ivy.array([1.,3.,5.]),
... b=ivy.array([3.,4.,2.]))
>>> dcdw = ivy.Container(a=ivy.array([0.2,0.3,0.6]),
... b=ivy.array([0.6,0.4,0.7]))
>>> mw_tm1 = ivy.Container(a=ivy.array([0.,0.,0.]),
... b=ivy.array([0.,0.,0.]))
>>> vw_tm1 = ivy.Container(a=ivy.array([0.,]),
... b=ivy.array([0.,]))
>>> step = ivy.array([3.4])
>>> beta1 = 0.9
>>> beta2 = 0.999
>>> epsilon = 1e-7
>>> max_trust_ratio = 10
>>> decay_lambda = 0
>>> stop_gradients = True
>>> lr = ivy.array(0.5)
>>> new_weights = w.lamb_update(dcdw, lr, mw_tm1, vw_tm1, step, beta1=beta1,
... beta2=beta2, epsilon=epsilon,
... max_trust_ratio=max_trust_ratio,
... decay_lambda=decay_lambda,
... stop_gradients=stop_gradients)
>>> print(new_weights)
({
a: ivy.array([-0.708, 1.29, 3.29]),
b: ivy.array([1.45, 2.45, 0.445])
}, {
a: ivy.array([0.02, 0.03, 0.06]),
b: ivy.array([0.06, 0.04, 0.07])
}, {
a: ivy.array([4.0e-05, 9.0e-05, 3.6e-04]),
b: ivy.array([0.00036, 0.00016, 0.00049])
})
"""
return ivy.lamb_update(
self,
dcdw,
lr,
mw_tm1,
vw_tm1,
step,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
max_trust_ratio=max_trust_ratio,
decay_lambda=decay_lambda,
stop_gradients=stop_gradients,
out=out,
)
| ivy/ivy/data_classes/container/gradients.py/0 | {
"file_path": "ivy/ivy/data_classes/container/gradients.py",
"repo_id": "ivy",
"token_count": 14193
} | 13 |
# local
from .base import FactorizedTensor
import ivy
class CPTensor(FactorizedTensor):
def __init__(self, cp_tensor):
super().__init__()
shape, rank = ivy.CPTensor.validate_cp_tensor(cp_tensor)
weights, factors = cp_tensor
if weights is None:
weights = ivy.ones(rank, dtype=factors[0].dtype)
self.shape = shape
self.rank = rank
self.factors = factors
self.weights = weights
# Built-ins #
def __getitem__(self, index):
if index == 0:
return self.weights
elif index == 1:
return self.factors
else:
raise IndexError(
f"You tried to access index {index} of a CP tensor.\n"
"You can only access index 0 and 1 of a CP tensor"
"(corresponding respectively to the weights and factors)"
)
def __setitem__(self, index, value):
if index == 0:
self.weights = value
elif index == 1:
self.factors = value
else:
raise IndexError(
f"You tried to set the value at index {index} of a CP tensor.\n"
"You can only set index 0 and 1 of a CP tensor"
"(corresponding respectively to the weights and factors)"
)
def __iter__(self):
yield self.weights
yield self.factors
def __len__(self):
return 2
def __repr__(self):
message = (
f"(weights, factors) : rank-{self.rank} CPTensor of shape {self.shape}"
)
return message
# Public Methods #
# ---------------#
def to_tensor(self):
return ivy.CPTensor.cp_to_tensor(self)
def to_vec(self):
return ivy.CPTensor.cp_to_vec(self)
def to_unfolded(self, mode):
return ivy.CPTensor.cp_to_unfolded(self, mode)
def cp_copy(self):
return CPTensor(
(
ivy.copy_array(self.weights),
[ivy.copy_array(self.factors[i]) for i in range(len(self.factors))],
)
)
def mode_dot(self, matrix_or_vector, mode, keep_dim=False, copy=True):
"""N-mode product of a CP tensor and a matrix or vector at the
specified mode.
Parameters
----------
cp_tensor
matrix_or_vector
1D or 2D array of shape ``(J, i_k)`` or ``(i_k, )``
matrix or vectors to which to n-mode multiply the tensor
mode
int
Returns
-------
CPTensor = (core, factors)
`mode`-mode product of `tensor` by `matrix_or_vector`
* of shape :math:`(i_1, ..., i_{k-1}, J, i_{k+1}, ..., i_N)`
if matrix_or_vector is a matrix
* of shape :math:`(i_1, ..., i_{k-1}, i_{k+1}, ..., i_N)`
if matrix_or_vector is a vector
See Also
--------
cp_mode_dot : chaining several mode_dot in one call
"""
return ivy.CPTensor.cp_mode_dot(
self, matrix_or_vector, mode, keep_dim=keep_dim, copy=copy
)
def norm(self):
"""Return the l2 norm of a CP tensor.
Parameters
----------
cp_tensor
ivy.CPTensor or (core, factors)
Returns
-------
l2-norm
int
Notes
-----
This is ||cp_to_tensor(factors)||^2
You can see this using the fact that
khatria-rao(A, B)^T x khatri-rao(A, B) = A^T x A * B^T x B
"""
return ivy.CPTensor.cp_norm(self)
def normalize(self, inplace=True):
"""Normalize the factors to unit length.
Turns ``factors = [|U_1, ... U_n|]`` into ``[weights; |V_1, ... V_n|]``,
where the columns of each `V_k` are normalized to unit Euclidean length
from the columns of `U_k` with the normalizing constants absorbed into
`weights`. In the special case of a symmetric tensor, `weights` holds the
eigenvalues of the tensor.
Parameters
----------
cp_tensor
CPTensor = (weight, factors)
factors is list of matrices, all with the same number of columns
i.e.::
for u in U:
u[i].shape == (s_i, R)
where `R` is fixed while `s_i` can vary with `i`
inplace
if False, returns a normalized Copy
otherwise the tensor modifies itself and returns itself
Returns
-------
CPTensor = (normalisation_weights, normalised_factors)
returns itself if inplace is True, a normalized copy otherwise
"""
weights, factors = ivy.CPTensor.cp_normalize(self)
if inplace:
self.weights, self.factors = weights, factors
return self
return ivy.CPTensor((weights, factors))
# Properties #
# ---------------#
@property
def n_param(self):
factors_params = self.rank * ivy.sum(self.shape)
if self.weights:
return factors_params + self.rank
else:
return factors_params
# Class Methods #
# ---------------#
@staticmethod
def validate_cp_tensor(cp_tensor):
"""Validate a cp_tensor in the form (weights, factors)
Return the rank and shape of the validated tensor
Parameters
----------
cp_tensor
CPTensor or (weights, factors)
Returns
-------
(shape, rank)
size of the full tensor and rank of the CP tensor
"""
if isinstance(cp_tensor, CPTensor):
# it's already been validated at creation
return cp_tensor.shape, cp_tensor.rank
elif isinstance(cp_tensor, (float, int)): # 0-order tensor
return 0, 0
weights, factors = cp_tensor
ndim = len(factors[0].shape)
if ndim == 2:
rank = int(ivy.shape(factors[0])[1])
elif ndim == 1:
rank = 1
else:
raise ValueError(
"Got a factor with 3 dimensions but CP factors should be at most 2D, of"
" shape (size, rank)."
)
shape = []
for i, factor in enumerate(factors):
s = ivy.shape(factor)
if len(s) == 2:
current_mode_size, current_rank = s
else: # The shape is just (size, ) if rank 1
current_mode_size, current_rank = *s, 1
if current_rank != rank:
raise ValueError(
"All the factors of a CP tensor should have the same number of"
f" column.However, factors[0].shape[1]={rank} but"
f" factors[{i}].shape[1]={ivy.shape(factor)[1]}."
)
shape.append(current_mode_size)
if weights is not None and len(weights) != rank:
raise ValueError(
f"Given factors for a rank-{rank} CP tensor but"
f" len(weights)={ivy.shape(weights)}."
)
return tuple(shape), rank
@staticmethod
def cp_n_param(tensor_shape, rank, weights=False):
"""Return number of parameters of a CP decomposition for a given `rank`
and full `tensor_shape`.
Parameters
----------
tensor_shape
shape of the full tensor to decompose (or approximate)
rank
rank of the CP decomposition
Returns
-------
n_params
Number of parameters of a CP decomposition of rank `rank`
of a full tensor of shape `tensor_shape`
"""
factors_params = rank * ivy.sum(tensor_shape)
if weights:
return factors_params + rank
else:
return factors_params
@staticmethod
def validate_cp_rank(tensor_shape, rank="same", rounding="round"):
"""Return the rank of a CP Decomposition.
Parameters
----------
tensor_shape
shape of the tensor to decompose
rank
way to determine the rank, by default 'same'
if 'same': rank is computed to keep the number
of parameters (at most) the same
if float, computes a rank so as to keep rank
percent of the original number of parameters
if int, just returns rank
rounding
{'round', 'floor', 'ceil'}
Returns
-------
rank
rank of the decomposition
"""
if rounding == "ceil":
rounding_fun = ivy.ceil
elif rounding == "floor":
rounding_fun = ivy.floor
elif rounding == "round":
rounding_fun = ivy.round
else:
raise ValueError(
f"Rounding should be of round, floor or ceil, but got {rounding}"
)
if rank == "same":
rank = float(1)
if isinstance(rank, float):
rank = int(
rounding_fun(ivy.prod(tensor_shape) * rank / ivy.sum(tensor_shape))
)
return rank
@staticmethod
def cp_normalize(cp_tensor):
"""Return cp_tensor with factors normalised to unit length.
Turns ``factors = [|U_1, ... U_n|]`` into ``[weights;
|V_1, ... V_n|]``, where the columns of each `V_k` are
normalized to unit Euclidean length from the columns of
`U_k` with the normalizing constants absorbed into
`weights`. In the special case of a symmetric tensor,
`weights` holds the eigenvalues of the tensor.
Parameters
----------
cp_tensor
factors is list of matrices,
all with the same number of columns
i.e.::
for u in U:
u[i].shape == (s_i, R)
where `R` is fixed while `s_i` can vary with `i`
Returns
-------
CPTensor = (normalisation_weights, normalised_factors)
"""
_, rank = ivy.CPTensor.validate_cp_tensor(cp_tensor)
weights, factors = cp_tensor
if weights is None:
weights = ivy.ones(rank, dtype=factors[0].dtype)
normalized_factors = []
for i, factor in enumerate(factors):
if i == 0:
factor = factor * weights
weights = ivy.ones((rank,), dtype=factor.dtype)
scales = ivy.sqrt(ivy.sum(ivy.square(factor), axis=0))
scales_non_zero = ivy.where(
scales == 0, ivy.ones(ivy.shape(scales), dtype=factor.dtype), scales
)
weights = weights * scales
normalized_factors.append(factor / ivy.reshape(scales_non_zero, (1, -1)))
return CPTensor((weights, normalized_factors))
@staticmethod
def cp_flip_sign(cp_tensor, mode=0, func=None):
"""Return cp_tensor with factors flipped to have positive signs. The
sign of a given column is determined by `func`, which is the mean by
default. Any negative signs are assigned to the mode indicated by
`mode`.
Parameters
----------
cp_tensor
CPTensor = (weight, factors)
factors is list of matrices, all with the same number of columns
i.e.::
for u in U:
u[i].shape == (s_i, R)
where `R` is fixed while `s_i` can vary with `i`
mode
mode that should receive negative signs
func
a function that should summarize the sign of a column
it must be able to take an axis argument
Returns
-------
CPTensor = (normalisation_weights, normalised_factors)
"""
ivy.CPTensor.validate_cp_tensor(cp_tensor)
weights, factors = cp_tensor
if func is None:
func = ivy.mean
for jj in range(0, len(factors)):
# Skip the target mode
if jj == mode:
continue
# Calculate the sign of the current factor in each component
column_signs = ivy.sign(func(factors[jj], axis=0))
# Update both the current and receiving factor
factors[mode] = factors[mode] * column_signs[ivy.newaxis, :]
factors[jj] = factors[jj] * column_signs[ivy.newaxis, :]
# Check the weight signs
weight_signs = ivy.sign(weights)
factors[mode] = factors[mode] * weight_signs[ivy.newaxis, :]
weights = ivy.abs(weights)
return CPTensor((weights, factors))
@staticmethod
def cp_lstsq_grad(cp_tensor, tensor, return_loss=False, mask=None):
r"""Compute (for a third-order tensor)
.. math::
\nabla 0.5 ||\\mathcal{X} - [\\mathbf{w}; \\mathbf{A}, \\mathbf{B}, \\mathbf{C}]||^2
where :math:`[\\mathbf{w}; \\mathbf{A}, \\mathbf{B}, \\mathbf{C}]`
is the CP decomposition with weights
:math:`\\mathbf{w}` and factor matrices :math:`\\mathbf{A}`, :math:`\\mathbf{B}` and :math:`\\mathbf{C}`.
Note that this does not return the gradient
with respect to the weights even if CP is normalized.
Parameters
----------
cp_tensor
CPTensor = (weight, factors)
factors is a list of factor matrices,
all with the same number of columns
i.e. for all matrix U in factor_matrices:
U has shape ``(s_i, R)``, where R is fixed and s_i varies with i
mask
A mask to be applied to the final tensor. It should be
broadcastable to the shape of the final tensor, that is
``(U[1].shape[0], ... U[-1].shape[0])``.
return_loss
Optionally return the scalar loss function along with the gradient.
Returns
-------
cp_gradient : CPTensor = (None, factors)
factors is a list of factor matrix gradients,
all with the same number of columns
i.e. for all matrix U in factor_matrices:
U has shape ``(s_i, R)``, where R is fixed and s_i varies with i
loss : float
Scalar quantity of the loss function corresponding to cp_gradient. Only returned
if return_loss = True.
""" # noqa: E501
ivy.CPTensor.validate_cp_tensor(cp_tensor)
_, factors = cp_tensor
diff = tensor - ivy.CPTensor.cp_to_tensor(cp_tensor)
if mask is not None:
diff = diff * mask
grad_fac = [
-ivy.CPTensor.unfolding_dot_khatri_rao(diff, cp_tensor, ii)
for ii in range(len(factors))
]
if return_loss:
return CPTensor((None, grad_fac)), 0.5 * ivy.sum(diff**2)
return CPTensor((None, grad_fac))
@staticmethod
def cp_to_tensor(cp_tensor, mask=None):
"""Turn the Khatri-product of matrices into a full tensor.
``factor_matrices = [|U_1, ... U_n|]`` becomes
a tensor shape ``(U[1].shape[0], U[2].shape[0], ... U[-1].shape[0])``
Parameters
----------
cp_tensor
factors is a list of factor matrices,
all with the same number of columns
i.e. for all matrix U in factor_matrices:
U has shape ``(s_i, R)``, where R is fixed and s_i varies with i
mask
mask to be applied to the final tensor. It should be
broadcastable to the shape of the final tensor, that is
``(U[1].shape[0], ... U[-1].shape[0])``.
Returns
-------
ivy.Array
full tensor of shape ``(U[1].shape[0], ... U[-1].shape[0])``
Notes
-----
This version works by first computing the mode-0 unfolding of the tensor
and then refolding it.
There are other possible and equivalent alternate implementation, e.g.
summing over r and updating an outer product of vectors.
"""
shape, _ = ivy.CPTensor.validate_cp_tensor(cp_tensor)
if not shape: # 0-order tensor
return cp_tensor
weights, factors = cp_tensor
if len(shape) == 1: # just a vector
return ivy.sum(weights * factors[0], axis=1)
if weights is None:
weights = 1
if mask is None:
full_tensor = ivy.matmul(
factors[0] * weights,
ivy.permute_dims(ivy.khatri_rao(factors, skip_matrix=0), (1, 0)),
)
else:
full_tensor = ivy.sum(
ivy.khatri_rao([factors[0] * weights] + factors[1:], mask=mask), axis=1
)
return ivy.fold(full_tensor, 0, shape)
@staticmethod
def cp_to_unfolded(cp_tensor, mode):
"""Turn the khatri-product of matrices into an unfolded tensor.
turns ``factors = [|U_1, ... U_n|]`` into a mode-`mode`
unfolding of the tensor
Parameters
----------
cp_tensor
factors is a list of matrices, all with the same number of columns
ie for all u in factor_matrices:
u[i] has shape (s_u_i, R), where R is fixed
mode
mode of the desired unfolding
Returns
-------
ivy.Array
unfolded tensor of shape (tensor_shape[mode], -1)
Notes
-----
Writing factors = [U_1, ..., U_n], we exploit the fact that
``U_k = U[k].dot(khatri_rao(U_1, ..., U_k-1, U_k+1, ..., U_n))``
"""
ivy.CPTensor.validate_cp_tensor(cp_tensor)
weights, factors = cp_tensor
if weights is not None:
return ivy.dot(
factors[mode] * weights,
ivy.permute_dims(ivy.khatri_rao(factors, skip_matrix=mode), (1, 0)),
)
else:
return ivy.dot(
factors[mode],
ivy.permute_dims(ivy.khatri_rao(factors, skip_matrix=mode), (1, 0)),
)
@staticmethod
def cp_to_vec(cp_tensor):
"""Turn the khatri-product of matrices into a vector.
(the tensor ``factors = [|U_1, ... U_n|]``
is converted into a raveled mode-0 unfolding)
Parameters
----------
cp_tensor
factors is a list of matrices, all with the same number of columns
i.e.::
for u in U:
u[i].shape == (s_i, R)
where `R` is fixed while `s_i` can vary with `i`
Returns
-------
ivy.Array
vectorised tensor
"""
return ivy.reshape(ivy.CPTensor.cp_to_tensor(cp_tensor), (-1))
@staticmethod
def cp_mode_dot(cp_tensor, matrix_or_vector, mode, keep_dim=False, copy=False):
"""N-mode product of a CP tensor and a matrix or vector at the
specified mode.
Parameters
----------
cp_tensor
ivy.CPTensor or (core, factors)
matrix_or_vector
1D or 2D array of shape ``(J, i_k)`` or ``(i_k, )``
matrix or vectors to which to n-mode multiply the tensor
mode : int
Returns
-------
CPTensor = (core, factors)
`mode`-mode product of `tensor` by `matrix_or_vector`
* of shape :math:`(i_1, ..., i_{k-1}, J, i_{k+1}, ..., i_N)`
if matrix_or_vector is a matrix
* of shape :math:`(i_1, ..., i_{k-1}, i_{k+1}, ..., i_N)`
if matrix_or_vector is a vector
See Also
--------
cp_multi_mode_dot : chaining several mode_dot in one call
"""
shape, _ = ivy.CPTensor.validate_cp_tensor(cp_tensor)
weights, factors = cp_tensor
contract = False
ndims = len(matrix_or_vector.shape)
if ndims == 2: # Tensor times matrix
# Test for the validity of the operation
if matrix_or_vector.shape[1] != shape[mode]:
raise ValueError(
f"shapes {shape} and {matrix_or_vector.shape} not aligned in"
f" mode-{mode} multiplication: {shape[mode]} (mode {mode}) !="
f" {matrix_or_vector.shape[1]} (dim 1 of matrix)"
)
elif ndims == 1: # Tensor times vector
if matrix_or_vector.shape[0] != shape[mode]:
raise ValueError(
f"shapes {shape} and {matrix_or_vector.shape} not aligned for"
f" mode-{mode} multiplication: {shape[mode]} (mode {mode}) !="
f" {matrix_or_vector.shape[0]} (vector size)"
)
if not keep_dim:
contract = True # Contract over that mode
else:
raise ValueError("Can only take n_mode_product with a vector or a matrix.")
if copy:
factors = [ivy.copy_array(f) for f in factors]
weights = ivy.copy_array(weights)
if contract:
factor = factors.pop(mode)
factor = ivy.dot(matrix_or_vector, factor)
mode = max(mode - 1, 0)
factors[mode] *= factor
else:
factors[mode] = ivy.dot(matrix_or_vector, factors[mode])
if copy:
return CPTensor((weights, factors))
else:
cp_tensor.shape = tuple(f.shape[0] for f in factors)
return cp_tensor
@staticmethod
def cp_norm(cp_tensor):
"""Return the l2 norm of a CP tensor.
Parameters
----------
cp_tensor
ivy.CPTensor or (core, factors)
Returns
-------
l2-norm
Notes
-----
This is ||cp_to_tensor(factors)||^2
You can see this using the fact that
khatria-rao(A, B)^T x khatri-rao(A, B) = A^T x A * B^T x B
"""
_ = ivy.CPTensor.validate_cp_tensor(cp_tensor)
weights, factors = cp_tensor
norm = ivy.ones(
(factors[0].shape[1], factors[0].shape[1]), dtype=factors[0].dtype
)
for f in factors:
norm = norm * ivy.dot(ivy.permute_dims(f, (1, 0)), ivy.conj(f))
if weights is not None:
# norm = T.dot(T.dot(weights, norm), weights)
norm = norm * (
ivy.reshape(weights, (-1, 1)) * ivy.reshape(weights, (1, -1))
)
return ivy.sqrt(ivy.sum(norm))
# uncomment when ivy.congruence_coefficient has been implemented
# which inturn requires linear_sum_assignment to be implemented.
# @staticmethod
# def cp_permute_factors(ref_cp_tensor, tensors_to_permute):
# """
# Compare factors of a reference cp tensor
# with factors of other another tensor
# (or list of tensor) in order to match
# component order. Permutation occurs on the
# columns of factors, minimizing the cosine distance
# to reference cp tensor with
# scipy Linear Sum Assignment method. The permuted
# tensor (or list of tensors) and
# list of permutation for each
# permuted tensors are returned.
# Parameters
# ----------
# ref_cp_tensor : cp tensor
# The tensor that serves as a reference for permutation.
# tensors_to_permute : cp tensor or list of cp tensors
# The tensors to permute so that the order of components
# match the reference tensor. Number of components must match.
# Returns
# -------
# permuted_tensors : permuted cp tensor or list of cp tensors
# permutation : list
# list of permuted indices. Length is equal to rank of cp_tensors.
# """
# if not isinstance(tensors_to_permute, list):
# permuted_tensors = [tensors_to_permute.cp_copy()]
# tensors_to_permute = [tensors_to_permute]
# else:
# permuted_tensors = []
# for i in range(len(tensors_to_permute)):
# permuted_tensors.append(tensors_to_permute[i].cp_copy())
# tensors_to_permute[i] = ivy.CPTensor.cp_normalize(tensors_to_permute[i]) # noqa
# ref_cp_tensor = ivy.CPTensor.cp_normalize(ref_cp_tensor)
# n_tensors = len(tensors_to_permute)
# n_factors = len(ref_cp_tensor.factors)
# permutation = []
# for i in range(n_tensors):
# _, col = ivy.congruence_coefficient(
# ref_cp_tensor.factors, tensors_to_permute[i].factors
# )
# col = ivy.array(col, dtype=ivy.int64)
# for f in range(n_factors):
# permuted_tensors[i].factors[f] = permuted_tensors[i].factors[f][:, col]
# permuted_tensors[i].weights = permuted_tensors[i].weights[col]
# permutation.append(col)
# if len(permuted_tensors) == 1:
# permuted_tensors = permuted_tensors[0]
# return permuted_tensors, permutation
@staticmethod
def unfolding_dot_khatri_rao(x, cp_tensor, mode):
"""Mode-n unfolding times khatri-rao product of factors.
Parameters
----------
x
tensor to unfold
factors
list of matrices of which to the khatri-rao product
mode
mode on which to unfold `tensor`
Returns
-------
mttkrp
dot(unfold(x, mode), khatri-rao(factors))
"""
mttkrp_parts = []
weights, factors = cp_tensor
rank = ivy.shape(factors[0])[1]
for r in range(rank):
component = ivy.multi_mode_dot(
x, [ivy.conj(f[:, r]) for f in factors], skip=mode
)
mttkrp_parts.append(component)
if weights is None:
return ivy.stack(mttkrp_parts, axis=1)
else:
return ivy.stack(mttkrp_parts, axis=1) * ivy.reshape(weights, (1, -1))
| ivy/ivy/data_classes/factorized_tensor/cp_tensor.py/0 | {
"file_path": "ivy/ivy/data_classes/factorized_tensor/cp_tensor.py",
"repo_id": "ivy",
"token_count": 12594
} | 14 |
use super::{
ArrayElement, ArrayShape, ElementType, FromPrimitive, NativeType, PrimitiveType, Shape,
};
use crate::{c_lib, Error, Result};
use pyo3::prelude::*;
/// A literal represent a value, typically a multi-dimensional array, stored on the host device.
#[derive(Debug)]
#[pyclass(unsendable)]
pub struct Literal(pub(super) c_lib::literal);
impl Clone for Literal {
fn clone(&self) -> Self {
let v = unsafe { c_lib::literal_clone(self.0) };
Self(v)
}
}
impl Literal {
/// Create an uninitialized literal based on some primitive type and some dimensions.
pub fn create_from_shape(ty: PrimitiveType, dims: &[usize]) -> Self {
let dims: Vec<_> = dims.iter().map(|x| *x as i64).collect();
let v = unsafe { c_lib::literal_create_from_shape(ty as i32, dims.as_ptr(), dims.len()) };
Self(v)
}
/// Create an uninitialized literal based on some primitive type, some dimensions, and some data.
/// The data is untyped, i.e. it is a sequence of bytes represented as a slice of `u8` even if
/// the primitive type is not `U8`.
pub fn create_from_shape_and_untyped_data(
ty: ElementType,
dims: &[usize],
untyped_data: &[u8],
) -> Result<Self> {
let dims64: Vec<_> = dims.iter().map(|x| *x as i64).collect();
let ty = ty.primitive_type();
let v = unsafe {
c_lib::literal_create_from_shape_and_data(
ty as i32,
dims64.as_ptr(),
dims64.len(),
untyped_data.as_ptr() as *const libc::c_void,
untyped_data.len(),
)
};
if v.is_null() {
return Err(Error::CannotCreateLiteralWithData {
data_len_in_bytes: untyped_data.len(),
ty,
dims: dims.to_vec(),
});
}
Ok(Self(v))
}
/// Get the first element from a literal. This returns an error if type `T` is not the
/// primitive type that the literal uses.
pub fn get_first_element<T: NativeType + ArrayElement>(&self) -> Result<T> {
let ty = self.ty()?;
if ty != T::TY {
Err(Error::ElementTypeMismatch { on_device: ty, on_host: T::TY })?
}
if self.element_count() == 0 {
Err(Error::EmptyLiteral)?
}
let v = unsafe { T::literal_get_first_element(self.0) };
Ok(v)
}
/// The number of elements stored in the literal.
pub fn element_count(&self) -> usize {
unsafe { c_lib::literal_element_count(self.0) as usize }
}
/// The primitive type used by element stored in this literal.
pub fn primitive_type(&self) -> Result<PrimitiveType> {
let ty = unsafe { c_lib::literal_element_type(self.0) };
match FromPrimitive::from_i32(ty) {
None => Err(Error::UnexpectedElementType(ty)),
Some(ty) => Ok(ty),
}
}
/// The element type used by element stored in this literal.
pub fn element_type(&self) -> Result<ElementType> {
self.primitive_type()?.element_type()
}
/// The element type used by element stored in this literal, shortcut for `element_type`.
pub fn ty(&self) -> Result<ElementType> {
self.element_type()
}
/// The literal size in bytes, this is the same as `element_count` multiplied by
/// `element_size_in_bytes`.
pub fn size_bytes(&self) -> usize {
unsafe { c_lib::literal_size_bytes(self.0) as usize }
}
/// The [`Shape`] of the literal, this contains information about the dimensions of the
/// underlying array, as well as the primitive type of the array's elements.
pub fn shape(&self) -> Result<Shape> {
let mut out: c_lib::shape = std::ptr::null_mut();
unsafe { c_lib::literal_shape(self.0, &mut out) };
let c_shape = super::shape::CShape::from_ptr(out);
c_shape.shape()
}
pub fn array_shape(&self) -> Result<ArrayShape> {
ArrayShape::try_from(&self.shape()?)
}
/// Copy the literal data to a slice. This returns an error if the primitive type used by the
/// literal is not `T` or if the number of elements in the slice and literal are different.
pub fn copy_raw_to<T: ArrayElement>(&self, dst: &mut [T]) -> Result<()> {
let ty = self.ty()?;
let element_count = self.element_count();
if ty != T::TY {
Err(Error::ElementTypeMismatch { on_device: ty, on_host: T::TY })?
}
if dst.len() > element_count {
Err(Error::BinaryBufferIsTooLarge { element_count, buffer_len: dst.len() })?
}
unsafe {
c_lib::literal_copy_to(
self.0,
dst.as_mut_ptr() as *mut libc::c_void,
element_count * T::ELEMENT_SIZE_IN_BYTES,
)
};
Ok(())
}
/// Copy data from a slice to the literal. This returns an error if the primitive type used
/// by the literal is not `T` or if number of elements in the slice and the literal are
/// different.
pub fn copy_raw_from<T: ArrayElement>(&mut self, src: &[T]) -> Result<()> {
let ty = self.ty()?;
let element_count = self.element_count();
if ty != T::TY {
Err(Error::ElementTypeMismatch { on_device: ty, on_host: T::TY })?
}
if src.len() > element_count {
Err(Error::BinaryBufferIsTooLarge { element_count, buffer_len: src.len() })?
}
unsafe {
c_lib::literal_copy_from(
self.0,
src.as_ptr() as *const libc::c_void,
element_count * T::ELEMENT_SIZE_IN_BYTES,
)
};
Ok(())
}
/// Copy the values stored in the literal in a newly created vector. The data is flattened out
/// for literals with more than one dimension.
pub fn to_vec<T: ArrayElement>(&self) -> Result<Vec<T>> {
let element_count = self.element_count();
// Maybe we should use an uninitialized vec instead?
let mut data = vec![T::ZERO; element_count];
self.copy_raw_to(&mut data)?;
Ok(data)
}
/// Create a literal from a scalar value, the resulting literal has zero dimensions and stores
/// a single element.
pub fn scalar<T: NativeType>(t: T) -> Self {
let ptr = unsafe { T::create_r0(t) };
Literal(ptr)
}
/// Create a literal from a slice of data, the resulting literal has one dimension which size
/// is the same as the slice passed as argument.
pub fn vec1<T: NativeType>(f: &[T]) -> Self {
let ptr = unsafe { T::create_r1(f.as_ptr(), f.len()) };
Literal(ptr)
}
/// Create a new literal containing the same data but using a different shape. This returns an
/// error if the number of elements in the literal is different from the product of the target
/// dimension sizes.
pub fn reshape(&self, dims: &[i64]) -> Result<Literal> {
let mut result: c_lib::literal = std::ptr::null_mut();
let status =
unsafe { c_lib::literal_reshape(self.0, dims.as_ptr(), dims.len(), &mut result) };
super::handle_status(status)?;
Ok(Literal(result))
}
/// Create a new literal containing the data from the original literal casted to a new
/// primitive type. The dimensions of the resulting literal are the same as the dimensions of
/// the original literal.
pub fn convert(&self, ty: PrimitiveType) -> Result<Literal> {
let mut result: c_lib::literal = std::ptr::null_mut();
let status = unsafe { c_lib::literal_convert(self.0, ty as i32, &mut result) };
super::handle_status(status)?;
Ok(Literal(result))
}
/// When the input is a tuple, return a vector of its elements. This replaces the original
/// value by an empty tuple, no copy is performed.
pub fn decompose_tuple(&mut self) -> Result<Vec<Literal>> {
match self.shape()? {
Shape::Array(_) | Shape::Unsupported(_) => Ok(vec![]),
Shape::Tuple(shapes) => {
let tuple_len = shapes.len();
let mut outputs = vec![std::ptr::null_mut::<c_lib::_literal>(); tuple_len];
unsafe { c_lib::literal_decompose_tuple(self.0, outputs.as_mut_ptr(), tuple_len) };
Ok(outputs.into_iter().map(Literal).collect())
}
}
}
pub fn to_tuple(mut self) -> Result<Vec<Literal>> {
self.decompose_tuple()
}
pub fn to_tuple1(mut self) -> Result<Self> {
let mut tuple = self.decompose_tuple()?;
if tuple.len() != 1 {
Err(Error::UnexpectedNumberOfElemsInTuple { expected: 1, got: tuple.len() })?
}
let v1 = tuple.pop().unwrap();
Ok(v1)
}
pub fn to_tuple2(mut self) -> Result<(Self, Self)> {
let mut tuple = self.decompose_tuple()?;
if tuple.len() != 2 {
Err(Error::UnexpectedNumberOfElemsInTuple { expected: 2, got: tuple.len() })?
}
let v2 = tuple.pop().unwrap();
let v1 = tuple.pop().unwrap();
Ok((v1, v2))
}
pub fn to_tuple3(mut self) -> Result<(Self, Self, Self)> {
let mut tuple = self.decompose_tuple()?;
if tuple.len() != 3 {
Err(Error::UnexpectedNumberOfElemsInTuple { expected: 3, got: tuple.len() })?
}
let v3 = tuple.pop().unwrap();
let v2 = tuple.pop().unwrap();
let v1 = tuple.pop().unwrap();
Ok((v1, v2, v3))
}
pub fn to_tuple4(mut self) -> Result<(Self, Self, Self, Self)> {
let mut tuple = self.decompose_tuple()?;
if tuple.len() != 4 {
Err(Error::UnexpectedNumberOfElemsInTuple { expected: 4, got: tuple.len() })?
}
let v4 = tuple.pop().unwrap();
let v3 = tuple.pop().unwrap();
let v2 = tuple.pop().unwrap();
let v1 = tuple.pop().unwrap();
Ok((v1, v2, v3, v4))
}
pub fn tuple(elems: Vec<Self>) -> Self {
let elem_ptrs: Vec<_> = elems.iter().map(|e| e.0).collect();
let literal =
unsafe { c_lib::literal_make_tuple_owned(elem_ptrs.as_ptr(), elem_ptrs.len()) };
// Ensure that elems are only dropped after the pointers have been used.
drop(elems);
Self(literal)
}
}
impl<T: NativeType> From<T> for Literal {
fn from(f: T) -> Self {
Literal::scalar(f)
}
}
impl<T: NativeType> From<&[T]> for Literal {
fn from(f: &[T]) -> Self {
Literal::vec1(f)
}
}
impl Drop for Literal {
fn drop(&mut self) {
unsafe { c_lib::literal_free(self.0) }
}
}
| ivy/ivy/engines/XLA/rust_api/src/wrappers/literal.rs/0 | {
"file_path": "ivy/ivy/engines/XLA/rust_api/src/wrappers/literal.rs",
"repo_id": "ivy",
"token_count": 4705
} | 15 |
# global
import sys
from packaging import version
import jaxlib
import jax
import jax.numpy as jnp
import importlib
from typing import Union
# make ivy.Container compatible with jax pytree traversal
from jax.tree_util import register_pytree_node
from jax.tree_util import tree_flatten, tree_unflatten
# local
import ivy
from ivy.func_wrapper import _dtype_from_version
backend_version = {"version": jax.__version__}
try:
register_pytree_node(
ivy.Container,
lambda c: tree_flatten(c.cont_to_dict()),
lambda a, c: ivy.Container(tree_unflatten(a, c)),
)
except Exception as e:
if "Duplicate custom PyTreeDef type registration" not in str(e):
raise
# make ivy.Array compatible with jax pytree traversal
def _array_flatten(tree):
return ((tree.data,), None)
def _array_unflatten(aux_data, children):
if type(*children) == object:
return children
return ivy.Array(*children)
try:
register_pytree_node(ivy.Array, _array_flatten, _array_unflatten)
except Exception as e:
if "Duplicate custom PyTreeDef type registration" not in str(e):
raise
# noinspection PyUnresolvedReferences
if not ivy.is_local():
_module_in_memory = sys.modules[__name__]
else:
_module_in_memory = sys.modules[ivy.import_module_path].import_cache[__name__]
use = ivy.utils.backend.ContextManager(_module_in_memory)
if version.parse(jax.__version__) >= version.parse("0.4.1"):
JaxArray = jax.Array
NativeArray = (jax.Array,)
else:
JaxArray = jaxlib.xla_extension.DeviceArray
NativeArray = (jaxlib.xla_extension.DeviceArray,)
if version.parse(jax.__version__) <= version.parse("0.4.8"):
JaxArray = Union[JaxArray, jax.interpreters.xla._DeviceArray]
NativeArray += (jax.interpreters.xla._DeviceArray,)
# noinspection PyUnresolvedReferences,PyProtectedMember
NativeDevice = jaxlib.xla_extension.Device
NativeDtype = jnp.dtype
NativeShape = tuple
NativeSparseArray = None
# devices
valid_devices = ("cpu", "gpu")
invalid_devices = ("tpu",)
# native data types
native_int8 = jnp.dtype("int8")
native_int16 = jnp.dtype("int16")
native_int32 = jnp.dtype("int32")
native_int64 = jnp.dtype("int64")
native_uint8 = jnp.dtype("uint8")
native_uint16 = jnp.dtype("uint16")
native_uint32 = jnp.dtype("uint32")
native_uint64 = jnp.dtype("uint64")
native_bfloat16 = jnp.dtype("bfloat16")
native_float16 = jnp.dtype("float16")
native_float32 = jnp.dtype("float32")
native_float64 = jnp.dtype("float64")
native_complex64 = jnp.dtype("complex64")
native_complex128 = jnp.dtype("complex128")
native_double = native_float64
native_bool = jnp.dtype("bool")
# valid data types
# ToDo: Add complex dtypes to valid_dtypes and fix all resulting failures.
# update these to add new dtypes
valid_dtypes = {
"0.4.24 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.uint16,
ivy.uint32,
ivy.uint64,
ivy.bfloat16,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
ivy.bool,
)
}
valid_numeric_dtypes = {
"0.4.24 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.uint16,
ivy.uint32,
ivy.uint64,
ivy.bfloat16,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
)
}
valid_int_dtypes = {
"0.4.24 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.uint16,
ivy.uint32,
ivy.uint64,
)
}
valid_uint_dtypes = {
"0.4.24 and below": (ivy.uint8, ivy.uint16, ivy.uint32, ivy.uint64)
}
valid_float_dtypes = {
"0.4.24 and below": (ivy.bfloat16, ivy.float16, ivy.float32, ivy.float64)
}
valid_complex_dtypes = {"0.4.24 and below": (ivy.complex64, ivy.complex128)}
# leave these untouched
valid_dtypes = _dtype_from_version(valid_dtypes, backend_version)
valid_numeric_dtypes = _dtype_from_version(valid_numeric_dtypes, backend_version)
valid_int_dtypes = _dtype_from_version(valid_int_dtypes, backend_version)
valid_float_dtypes = _dtype_from_version(valid_float_dtypes, backend_version)
valid_uint_dtypes = _dtype_from_version(valid_uint_dtypes, backend_version)
valid_complex_dtypes = _dtype_from_version(valid_complex_dtypes, backend_version)
# invalid data types
# update these to add new dtypes
invalid_dtypes = {"0.4.24 and below": ()}
invalid_numeric_dtypes = {"0.4.24 and below": ()}
invalid_int_dtypes = {"0.4.24 and below": ()}
invalid_float_dtypes = {"0.4.24 and below": ()}
invalid_uint_dtypes = {"0.4.24 and below": ()}
invalid_complex_dtypes = {"0.4.24 and below": ()}
# leave these untouched
invalid_dtypes = _dtype_from_version(invalid_dtypes, backend_version)
invalid_numeric_dtypes = _dtype_from_version(invalid_numeric_dtypes, backend_version)
invalid_int_dtypes = _dtype_from_version(invalid_int_dtypes, backend_version)
invalid_float_dtypes = _dtype_from_version(invalid_float_dtypes, backend_version)
invalid_uint_dtypes = _dtype_from_version(invalid_uint_dtypes, backend_version)
invalid_complex_dtypes = _dtype_from_version(invalid_complex_dtypes, backend_version)
native_inplace_support = False
supports_gradients = True
def closest_valid_dtype(type=None, /, as_native=False):
if type is None:
type = ivy.default_dtype()
if isinstance(type, str) and type in invalid_dtypes:
return {"int64": ivy.int32, "uint64": ivy.uint32, "float64": ivy.float32}[type]
return ivy.as_ivy_dtype(type) if not as_native else ivy.as_native_dtype(type)
backend = "jax"
# local sub-modules
from . import activations
from .activations import *
from . import creation
from .creation import *
from . import data_type
from .data_type import *
from . import device
from .device import *
from . import elementwise
from .elementwise import *
from . import general
from .general import *
from . import gradients
from .gradients import *
from . import layers
from .layers import *
from . import linear_algebra as linalg
from .linear_algebra import *
from . import manipulation
from .manipulation import *
from . import random
from .random import *
from . import searching
from .searching import *
from . import set
from .set import *
from . import sorting
from .sorting import *
from . import statistical
from .statistical import *
from . import utility
from .utility import *
from . import experimental
from .experimental import *
from . import control_flow_ops
from .control_flow_ops import *
from . import module
from .module import *
# sub-backends
from . import sub_backends
from .sub_backends import *
if importlib.util.find_spec("flax"):
import flax
NativeModule = flax.linen.Module
elif importlib.util.find_spec("haiku"):
import haiku as hk
NativeModule = hk.Module
else:
NativeModule = None
| ivy/ivy/functional/backends/jax/__init__.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/__init__.py",
"repo_id": "ivy",
"token_count": 2788
} | 16 |
# global
from typing import Optional, Union, Tuple, List, Literal, Sequence, Callable
import jax
import jax.lax as jlax
import jax.numpy as jnp
import math
# local
import ivy
from ivy import output_to_native_arrays
from ivy.functional.backends.jax import JaxArray
from ivy.functional.backends.jax.random import RNG
from ivy.functional.ivy.experimental.general import _correct_ivy_callable
from ivy.functional.ivy.layers import (
_handle_padding,
_validate_max_pool_params,
_depth_max_pooling_helper,
)
from ivy.functional.ivy.experimental.layers import (
_padding_ceil_mode,
_get_size,
)
from ivy.func_wrapper import with_supported_dtypes
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
from ivy.functional.backends.jax.experimental.manipulation import _to_nested_tuple
def _determine_depth_max_pooling(x, kernel, strides, dims, data_format="channel_last"):
# determine depth pooling
_, _, depth_pooling = _depth_max_pooling_helper(
x.shape, kernel, strides, dims=dims, data_format=data_format
)
if depth_pooling:
kernel = [1, 1, 1, kernel[-1]]
strides = [1, 1, 1, strides[-1]]
return x, kernel, strides, depth_pooling
def _pad_str_to_list(inputs, dims, padding, strides, new_window_shape):
pad_int = [
_handle_padding(
inputs.shape[i + 1], strides[i + 1], new_window_shape[i], padding
)
for i in range(len(dims) - 2)
]
pad_list = [
(pad_int[i] // 2, pad_int[i] - pad_int[i] // 2) for i in range(len(pad_int))
]
pad_list = [(0, 0)] + pad_list + [(0, 0)]
return pad_list
def general_pool(
inputs,
init,
reduce_fn,
window_shape,
strides,
padding,
dim,
dilation=1,
ceil_mode=False,
count_include_pad=False,
):
# This function assumes that param validation is already done
window_shape = tuple(window_shape)
strides = (1,) + strides + (1,) if len(strides) == dim else strides
dims = (1,) + window_shape + (1,) if len(window_shape) == dim else window_shape
if isinstance(dilation, int):
dilation = (1,) + (dilation,) * dim + (1,)
else:
dilation = (1,) + tuple(dilation) + (1,)
is_single_input = False
if inputs.ndim == len(dims) - 1:
# add singleton batch dimension because lax.reduce_window always
# needs a batch dimension.
inputs = inputs[None]
is_single_input = True
assert inputs.ndim == len(dims), f"len({inputs.shape}) != len({dims})"
# shape of window after dilation
new_window_shape = tuple(
window_shape[i - 1] + (dilation[i] - 1) * (window_shape[i - 1] - 1)
for i in range(1, len(dims) - 1)
)
inputs, window_shape, strides, depth_pooling = _determine_depth_max_pooling(
inputs, window_shape, strides, dim, data_format="channel_last"
)
if not depth_pooling:
# manually creating padding list
if isinstance(padding, str):
pad_list = _pad_str_to_list(
inputs, dims, padding, strides, new_window_shape
)
else:
if isinstance(padding, int):
padding = [(padding,) * 2] * dim
pad_list = [(0, 0)] + list(padding) + [(0, 0)]
if ceil_mode:
c = []
for i in range(len(dims) - 2):
pad_list[i + 1], ceil = _padding_ceil_mode(
inputs.shape[i + 1],
new_window_shape[i],
pad_list[i + 1],
strides[i + 1],
True,
)
c.append(ceil)
if count_include_pad:
# manually pad inputs with 0 if ceil_mode is True
# because they're not counted in average calculation
if ceil_mode:
ceil = [(0, c[i]) for i in range(len(dims) - 2)]
for i in range(len(dims) - 2):
pad_list[i + 1] = (
pad_list[i + 1][0],
pad_list[i + 1][1] - ceil[i][1],
)
inputs = jnp.pad(inputs, pad_list, mode="constant", constant_values=1.0)
inputs = jnp.pad(
inputs,
[(0, 0)] + ceil + [(0, 0)],
mode="constant",
constant_values=0.0,
)
else:
# manually pad inputs with 1s
# because they are counted in average calculation
inputs = jnp.pad(inputs, pad_list, mode="constant", constant_values=1.0)
pad_list = [(0, 0)] * len(pad_list)
elif isinstance(padding, list) and any(
item != 0 for sublist in padding for item in sublist
):
raise NotImplementedError(
"Nonzero explicit padding is not supported for depthwise max pooling"
)
else:
pad_list = [(0, 0)] * (dim + 2)
if not ivy.is_array(inputs):
# if dtype is not set here, jax casts it to float64
inputs = jnp.array(inputs, dtype=jnp.float32)
if not ivy.is_array(init):
init = jnp.array(init, dtype=inputs.dtype)
promoted_type = jnp.promote_types(inputs.dtype, init.dtype)
inputs = inputs.astype(promoted_type)
init = init.astype(promoted_type)
y = jlax.reduce_window(
inputs, init, reduce_fn, dims, strides, pad_list, window_dilation=dilation
)
if is_single_input:
y = jnp.squeeze(y, axis=0)
return y
def max_pool1d(
x: JaxArray,
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
dilation: Union[int, Tuple[int]] = 1,
ceil_mode: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
dims = 1
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
if data_format == "NCW":
x = jnp.transpose(x, (0, 2, 1))
kernel = [kernel[i] for i in [0, 2, 1]] if len(kernel) == (dims + 2) else kernel
strides = (
[strides[i] for i in [0, 2, 1]] if len(strides) == (dims + 2) else strides
)
padding = (
[padding[i] for i in [0, 2, 1]]
if isinstance(padding, list) and len(padding) == (dims + 2)
else padding
)
res = general_pool(
x, -jnp.inf, jlax.max, kernel, strides, padding, dims, dilation, ceil_mode
)
if data_format == "NCW":
res = jnp.transpose(res, (0, 2, 1))
return res
def max_pool2d(
x: JaxArray,
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
dilation: Union[int, Tuple[int, ...]] = 1,
ceil_mode: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
dims = 2
odtype = x.dtype
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
if data_format == "NCHW":
x = jnp.transpose(x, (0, 2, 3, 1))
kernel = (
[kernel[i] for i in [0, 2, 3, 1]] if len(kernel) == (dims + 2) else kernel
)
strides = (
[strides[i] for i in [0, 2, 3, 1]]
if len(strides) == (dims + 2)
else strides
)
padding = (
[padding[i] for i in [0, 2, 3, 1]]
if isinstance(padding, list) and len(padding) == (dims + 2)
else padding
)
res = general_pool(
x, -jnp.inf, jlax.max, kernel, strides, padding, dims, dilation, ceil_mode
)
if data_format == "NCHW":
res = jnp.transpose(res, (0, 3, 1, 2))
return res.astype(odtype)
def max_pool3d(
x: JaxArray,
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
dilation: Union[int, Tuple[int, ...]] = 1,
ceil_mode: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
dims = 3
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
if data_format == "NCDHW":
x = jnp.transpose(x, (0, 2, 3, 4, 1))
kernel = (
[kernel[i] for i in [0, 2, 3, 4, 1]]
if len(kernel) == (dims + 2)
else kernel
)
strides = (
[strides[i] for i in [0, 2, 3, 4, 1]]
if len(strides) == (dims + 2)
else strides
)
padding = (
[padding[i] for i in [0, 2, 3, 4, 1]]
if isinstance(padding, list) and len(padding) == (dims + 2)
else padding
)
res = general_pool(
x, -jnp.inf, jlax.max, kernel, strides, padding, dims, dilation, ceil_mode
)
if data_format == "NCDHW":
res = jnp.transpose(res, (0, 4, 1, 2, 3))
return res
def avg_pool1d(
x: JaxArray,
kernel: Union[int, Tuple[int]],
strides: Union[int, Tuple[int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if data_format in ("NCW", "NCL"):
x = jnp.transpose(x, (0, 2, 1))
if isinstance(kernel, int):
kernel = (kernel,)
elif len(kernel) == 1:
kernel = (kernel[0],)
if isinstance(strides, int):
strides = (strides,)
elif len(strides) == 1:
strides = (strides[0],)
res = general_pool(
x, 0.0, jlax.add, kernel, strides, padding, 1, ceil_mode=ceil_mode
)
div_shape = x.shape[:-1] + (1,)
if len(div_shape) - 2 == len(kernel):
div_shape = (1,) + div_shape[1:]
res = res / general_pool(
jnp.ones(div_shape, dtype=res.dtype),
0.0,
jlax.add,
kernel,
strides,
padding,
1,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
)
if data_format in ("NCW", "NCL"):
res = jnp.transpose(res, (0, 2, 1))
if x.dtype == "float16":
res = res.astype("float16")
return res
def avg_pool2d(
x: JaxArray,
kernel: Union[int, Tuple[int], Tuple[int, int]],
strides: Union[int, Tuple[int], Tuple[int, int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if isinstance(kernel, int):
kernel = (kernel,) * 2
elif len(kernel) == 1:
kernel = (kernel[0],) * 2
if isinstance(strides, int):
strides = (strides,) * 2
elif len(strides) == 1:
strides = (strides[0],) * 2
if data_format == "NCHW":
x = jnp.transpose(x, (0, 2, 3, 1))
res = general_pool(
x, 0.0, jlax.add, kernel, strides, padding, 2, ceil_mode=ceil_mode
)
div_shape = x.shape[:-1] + (1,)
if len(div_shape) - 2 == len(kernel):
div_shape = (1,) + div_shape[1:]
if divisor_override is not None:
divisor = divisor_override
else:
divisor = general_pool(
jnp.ones(div_shape, dtype=res.dtype),
0.0,
jlax.add,
kernel,
strides,
padding,
2,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
)
res = res / divisor
if data_format == "NCHW":
return jnp.transpose(res, (0, 3, 1, 2))
return res
def avg_pool3d(
x: JaxArray,
kernel: Union[int, Tuple[int], Tuple[int, int, int]],
strides: Union[int, Tuple[int], Tuple[int, int, int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if isinstance(kernel, int):
kernel = (kernel,) * 3
elif len(kernel) == 1:
kernel = (kernel[0],) * 3
if isinstance(strides, int):
strides = (strides,) * 3
elif len(strides) == 1:
strides = (strides[0],) * 3
if data_format == "NCDHW":
x = jnp.transpose(x, (0, 2, 3, 4, 1))
res = general_pool(
x, 0.0, jlax.add, kernel, strides, padding, 3, ceil_mode=ceil_mode
)
if divisor_override is not None:
divisor = divisor_override
else:
divisor = general_pool(
jnp.ones_like(x, dtype=res.dtype),
0.0,
jlax.add,
kernel,
strides,
padding,
3,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
)
res = res / divisor
if data_format == "NCDHW":
res = jnp.transpose(res, (0, 4, 1, 2, 3))
return res
@with_supported_dtypes({"0.4.24 and below": ("float32", "float64")}, backend_version)
def dct(
x: JaxArray,
/,
*,
type: Literal[1, 2, 3, 4] = 2,
n: Optional[int] = None,
axis: int = -1,
norm: Optional[Literal["ortho"]] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if norm not in (None, "ortho"):
raise ValueError("Norm must be either None or 'ortho'")
if axis < 0:
axis += len(x.shape)
if n is not None:
signal_len = x.shape[axis]
if n <= signal_len:
local_idx = [slice(None)] * len(x.shape)
local_idx[axis] = slice(None, n)
x = x[tuple(local_idx)]
else:
pad_idx = [[0, 0] for _ in range(len(x.shape))]
pad_idx[axis][1] = n - signal_len
x = jnp.pad(x, pad_idx)
real_zero = jnp.array(0.0, dtype=x.dtype)
axis_dim = x.shape[axis]
axis_dim_float = jnp.array(axis_dim, dtype=x.dtype)
if type == 1:
if norm:
raise ValueError("Normalization not supported for type-I DCT")
axis_idx = [slice(None)] * len(x.shape)
axis_idx[axis] = slice(-2, 0, -1)
x = jnp.concatenate([x, x[tuple(axis_idx)]], axis=axis)
dct_out = jnp.real(jnp.fft.rfft(x, axis=axis))
return dct_out
elif type == 2:
dct_out = jax.scipy.fft.dct(x, type=2, n=n, axis=axis, norm=norm)
return dct_out
elif type == 3:
scale_dims = [1] * len(x.shape)
scale_dims[axis] = axis_dim
scale = 2.0 * jnp.exp(
jlax.complex(
real_zero, jnp.arange(axis_dim_float) * math.pi * 0.5 / axis_dim_float
)
).reshape(scale_dims)
if norm == "ortho":
n1 = jnp.sqrt(axis_dim_float)
n2 = n1 * jnp.sqrt(0.5)
sf = jnp.pad(jnp.expand_dims(n1, 0), (0, axis_dim - 1), constant_values=n2)
x = x * sf.reshape(scale_dims)
else:
x = x * axis_dim_float
axis_idx = [slice(None)] * len(x.shape)
axis_idx[axis] = slice(None, axis_dim)
dct_out = jnp.real(
jnp.fft.irfft(scale * jlax.complex(x, real_zero), n=2 * axis_dim, axis=axis)
)[tuple(axis_idx)]
return dct_out
elif type == 4:
dct_2 = jax.scipy.fft.dct(x, type=2, n=2 * axis_dim, axis=axis, norm=None)
axis_idx = [slice(None)] * len(x.shape)
axis_idx[axis] = slice(1, None, 2)
dct_out = dct_2[tuple(axis_idx)]
if norm == "ortho":
dct_out *= math.sqrt(0.5) * jlax.rsqrt(axis_dim_float)
return dct_out
def idct(
x: JaxArray,
/,
*,
type: Literal[1, 2, 3, 4] = 2,
n: Optional[int] = None,
axis: int = -1,
norm: Optional[Literal["ortho"]] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type]
return dct(x, type=inverse_type, n=n, axis=axis, norm=norm, out=out)
def fft(
x: JaxArray,
dim: int,
/,
*,
norm: str = "backward",
n: Optional[Union[int, Tuple[int]]] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if not isinstance(dim, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(dim)}"
)
if n is None:
n = x.shape[dim]
if n < -len(x.shape):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {dim}, expecting ranging"
" from {-len(x.shape)} to {len(x.shape)-1} "
)
if not isinstance(n, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(n)}"
)
if n <= 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {n}, expecting more than 1"
)
if norm not in {"backward", "ortho", "forward"}:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
return jnp.fft.fft(x, n, dim, norm)
def dropout1d(
x: JaxArray,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NWC",
out: Optional[JaxArray] = None,
) -> JaxArray:
if training:
x_shape = x.shape
is_batched = len(x_shape) == 3
if data_format == "NCW":
perm = (0, 2, 1) if is_batched else (1, 0)
x = jnp.transpose(x, perm)
x_shape = x.shape
_, rng_input = jax.random.split(RNG.key)
mask = jax.random.bernoulli(rng_input, 1 - prob, x_shape)
res = jnp.where(mask, x / (1 - prob), 0)
if data_format == "NCW":
res = jnp.transpose(res, perm)
else:
res = x
return res
def dropout2d(
x: JaxArray,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NHWC",
out: Optional[JaxArray] = None,
) -> JaxArray:
if training:
x_shape = x.shape
is_batched = len(x.shape) == 4
if data_format == "NCHW":
perm = (0, 2, 3, 1) if is_batched else (1, 2, 0)
x = jnp.transpose(x, perm)
x_shape = x.shape
_, rng_input = jax.random.split(RNG.key)
mask = jax.random.bernoulli(rng_input, 1 - prob, x_shape)
res = jnp.where(mask, x / (1 - prob), 0)
if data_format == "NCHW":
perm = (0, 3, 1, 2) if is_batched else (2, 0, 1)
res = jnp.transpose(res, perm)
else:
res = x
return res
def dropout3d(
x: JaxArray,
prob: float,
/,
*,
training: bool = True,
data_format: str = "NDHWC",
out: Optional[JaxArray] = None,
) -> JaxArray:
if training:
x_shape = x.shape
is_batched = len(x_shape) == 5
if data_format == "NCDHW":
perm = (0, 2, 3, 4, 1) if is_batched else (1, 2, 3, 0)
x = jnp.transpose(x, perm)
x_shape = x.shape
_, rng_input = jax.random.split(RNG.key)
mask = jax.random.bernoulli(rng_input, 1 - prob, x_shape)
res = jnp.where(mask, x / (1 - prob), 0)
if data_format == "NCDHW":
perm = (0, 4, 1, 2, 3) if is_batched else (3, 0, 1, 2)
res = jnp.transpose(res, perm)
else:
res = x
return res
def ifft(
x: JaxArray,
dim: int,
*,
norm: str = "backward",
n: Optional[Union[int, Tuple[int]]] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if not isinstance(dim, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(dim)}"
)
if n is None:
n = x.shape[dim]
if n < -len(x.shape):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {dim}, expecting ranging"
" from {-len(x.shape)} to {len(x.shape)-1} "
)
if not isinstance(n, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(n)}"
)
if n <= 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {n}, expecting more than 1"
)
if norm not in {"backward", "ortho", "forward"}:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
return jnp.fft.ifft(x, n, dim, norm)
def interpolate(
x: JaxArray,
size: Union[Sequence[int], int],
/,
*,
mode: Literal[
"linear",
"bilinear",
"trilinear",
"nd",
"nearest",
"area",
"nearest_exact",
"tf_area",
"tf_bicubic",
"bicubic",
"mitchellcubic",
"lanczos3",
"lanczos5",
"gaussian",
] = "linear",
scale_factor: Optional[Union[Sequence[int], int]] = None,
recompute_scale_factor: Optional[bool] = None,
align_corners: bool = False,
antialias: bool = False,
out: Optional[JaxArray] = None,
):
input_size = ivy.shape(x)[2:]
dims = len(input_size)
size, _ = _get_size(scale_factor, size, dims, input_size)
if all(a == b for a, b in zip(size, input_size)):
ret = x
else:
mode = (
"nearest"
if mode == "nearest-exact"
else "bicubic" if mode == "tf_bicubic" else mode
)
size = [x.shape[0], *size, x.shape[1]]
x = jnp.transpose(x, (0, *range(2, dims + 2), 1))
ret = jnp.transpose(
jax.image.resize(x, shape=size, method=mode, antialias=antialias),
(0, dims + 1, *range(1, dims + 1)),
)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
interpolate.partial_mixed_handler = (
lambda *args, mode="linear", recompute_scale_factor=None, align_corners=None, **kwargs: mode # noqa: E501
not in [
"area",
"nearest",
"nd",
"tf_area",
"mitchellcubic",
"gaussian",
"bicubic",
]
and not align_corners
and recompute_scale_factor
)
def reduce_window(
operand: JaxArray,
init_value: Union[int, float],
computation: Callable,
window_dimensions: Union[int, Sequence[int]],
/,
*,
window_strides: Union[int, Sequence[int]] = 1,
padding: Union[str, int, Sequence[Tuple[int, int]]] = "VALID",
base_dilation: Union[int, Sequence[int]] = 1,
window_dilation: Union[int, Sequence[int]] = 1,
) -> JaxArray:
computation = _correct_ivy_callable(computation)
computation = output_to_native_arrays(computation)
window_dimensions, window_strides, padding, base_dilation, window_dilation = map(
lambda x: tuple([x] * len(operand.shape)) if isinstance(x, int) else x,
[window_dimensions, window_strides, padding, base_dilation, window_dilation],
)
if not isinstance(padding, str):
# for containers the padding reaches the function as a list of lists instead of
# a list of tuples, which gives an unhashable dtype error
# this is similarly a problem in the jax backend of ivy.pad
padding = _to_nested_tuple(padding)
return jlax.reduce_window(
operand,
jnp.array(init_value).astype(operand.dtype),
computation,
window_dimensions,
window_strides,
padding,
base_dilation,
window_dilation,
)
def fft2(
x: JaxArray,
*,
s: Optional[Sequence[int]] = None,
dim: Sequence[int] = (-2, -1),
norm: str = "backward",
out: Optional[JaxArray] = None,
) -> JaxArray:
ivy.utils.assertions.check_elem_in_list(
norm,
["backward", "ortho", "forward"],
message=f"Unrecognized normalization mode {norm}",
)
if not all(isinstance(j, int) for j in dim):
raise ivy.utils.exceptions.IvyError(
f"Expecting {dim} to be a sequence of integers <class integer>"
)
if s is None:
s = (x.shape[dim[0]], x.shape[dim[1]])
if all(j < -len(x.shape) for j in s):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {dim}, expecting ranging"
" from {-len(x.shape)} to {len(x.shape)-1} "
)
if not all(isinstance(j, int) for j in s):
raise ivy.utils.exceptions.IvyError(
f"Expecting {s} to be a sequence of integers <class integer>"
)
if all(j <= 1 for j in s):
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {s}, expecting s points larger than 1"
)
return jnp.fft.fft2(x, s, dim, norm).astype(jnp.complex128)
def ifftn(
x: JaxArray,
s: Optional[Union[int, Tuple[int]]] = None,
axes: Optional[Union[int, Tuple[int]]] = None,
*,
norm: str = "backward",
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.fft.ifftn(x, s, axes, norm)
@with_unsupported_dtypes(
{"0.4.24 and below": ("bfloat16", "float16", "complex")}, backend_version
)
def embedding(
weights: JaxArray,
indices: JaxArray,
/,
*,
max_norm: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
ivy.utils.assertions.check_equal(
len(weights.shape), 2, message="weights must be 2-d", as_array=False
)
embeddings = jnp.take(weights, indices, axis=0)
if max_norm is not None:
norms = jnp.linalg.norm(embeddings, axis=-1, keepdims=True)
embeddings = jnp.where(
norms > max_norm, embeddings * max_norm / norms, embeddings
)
embeddings = jnp.where(
norms < -max_norm, embeddings * -max_norm / norms, embeddings
)
return embeddings
def rfft(
x: JaxArray,
/,
*,
n: Optional[int] = None,
axis: int = -1,
norm: Literal["backward", "ortho", "forward"] = "backward",
out: Optional[JaxArray] = None,
) -> JaxArray:
x = x.real
if x.dtype == jnp.float16:
x = x.astype(jnp.float32)
ret = jnp.fft.rfft(x, n=n, axis=axis, norm=norm)
if x.dtype != jnp.float64:
ret = ret.astype(jnp.complex64)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
@with_unsupported_dtypes({"0.4.24 and below": ("float16", "complex")}, backend_version)
def rfftn(
x: JaxArray,
s: Optional[Sequence[int]] = None,
axes: Optional[Sequence[int]] = None,
*,
norm: str = "backward",
out: Optional[JaxArray] = None,
) -> JaxArray:
if not all(isinstance(j, int) for j in axes):
raise ivy.utils.exceptions.IvyError(
f"Expecting {axes} to be a sequence of integers <class integer>"
)
if s is None:
s = (x.shape[axes[0]], x.shape[axes[1]])
if all(j < -len(x.shape) for j in s):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {axes}, expecting ranging"
f" from {-len(x.shape)} to {len(x.shape)-1}"
)
if not all(isinstance(j, int) for j in s):
raise ivy.utils.exceptions.IvyError(
f"Expecting {s} to be a sequence of integers <class integer>"
)
if all(j <= 1 for j in s):
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {s}, expecting s points larger than 1"
)
if norm not in {"backward", "ortho", "forward"}:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
return jnp.fft.rfftn(x, s, axes, norm).astype(jnp.complex128)
# stft
def stft(
signals: JaxArray,
frame_length: int,
frame_step: int,
/,
*,
fft_length: Optional[int] = None,
window_fn: Optional[Callable] = None,
pad_end: Optional[bool] = False,
name: Optional[str] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if not isinstance(frame_length, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(frame_length)}"
)
if frame_length < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {frame_length}, expecting frame_length larger than or"
" equal to 1"
)
if not isinstance(frame_step, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(frame_step)}"
)
if frame_step < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {frame_length}, expecting frame_length larger than or"
" equal to 1"
)
if fft_length is not None:
if not isinstance(fft_length, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(fft_length)}"
)
if fft_length < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {frame_length}, expecting frame_length larger"
" than or equal to 1"
)
input_dtype = signals.dtype
if input_dtype == jnp.float32:
dtype = jnp.complex64
elif input_dtype == jnp.float64:
dtype = jnp.complex128
def stft_1D(signals, frame_length, frame_step, fft_length, pad_end):
if fft_length is None:
fft_length = 1
while fft_length < frame_length:
fft_length *= 2
num_samples = signals.shape[-1]
if pad_end:
num_samples = signals.shape[-1]
num_frames = -(-num_samples // frame_step)
pad_length = max(
0, frame_length + frame_step * (num_frames - 1) - num_samples
)
signals = jnp.pad(signals, [(0, pad_length)])
else:
num_frames = 1 + (num_samples - frame_length) // frame_step
stft_result = []
if window_fn is None:
window = 1
else:
window = window_fn(frame_length)
for i in range(num_frames):
start = i * frame_step
end = start + frame_length
frame = signals[..., start:end]
windowed_frame = frame * window
pad_length = fft_length - frame_length
windowed_frame = jnp.pad(windowed_frame, [(0, pad_length)])
windowed_frame = jnp.asarray(windowed_frame, dtype=dtype)
fft_frame = jnp.fft.fft(windowed_frame, axis=-1)
slit = int(fft_length // 2 + 1)
stft_result.append(fft_frame[..., 0:slit])
stft = jnp.stack(stft_result, axis=0)
return stft
def stft_helper(nested_list, frame_length, frame_step, fft_length):
nested_list = nested_list
if len(jnp.shape(nested_list)) > 1:
return [
stft_helper(sublist, frame_length, frame_step, fft_length)
for sublist in nested_list
]
else:
return stft_1D(nested_list, frame_length, frame_step, fft_length, pad_end)
to_return = stft_helper(signals, frame_length, frame_step, fft_length)
return jnp.asarray(to_return, dtype=dtype)
| ivy/ivy/functional/backends/jax/experimental/layers.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/experimental/layers.py",
"repo_id": "ivy",
"token_count": 15384
} | 17 |
# global
import math
from numbers import Number
from typing import Union, Tuple, Optional, List, Sequence, Iterable
import jax.numpy as jnp
import numpy as np
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.backends.jax import JaxArray
from . import backend_version
def _flat_array_to_1_dim_array(x):
return x.reshape((1,)) if x.shape == () else x
# Array API Standard #
# -------------------#
def concat(
xs: Union[Tuple[JaxArray, ...], List[JaxArray]],
/,
*,
axis: int = 0,
out: Optional[JaxArray] = None,
) -> JaxArray:
is_tuple = type(xs) is tuple
if axis is None:
if is_tuple:
xs = list(xs)
for i in range(len(xs)):
if xs[i].shape == ():
xs[i] = jnp.ravel(xs[i])
if is_tuple:
xs = tuple(xs)
try:
return jnp.concatenate(xs, axis)
except ValueError as error:
raise ivy.utils.exceptions.IvyIndexError(error) from error
def expand_dims(
x: JaxArray,
/,
*,
copy: Optional[bool] = None,
axis: Union[int, Sequence[int]] = 0,
out: Optional[JaxArray] = None,
) -> JaxArray:
try:
ret = jnp.expand_dims(x, axis)
return ret
except ValueError as error:
raise ivy.utils.exceptions.IvyIndexError(error) from error
def flip(
x: JaxArray,
/,
*,
copy: Optional[bool] = None,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.flip(x, axis=axis)
def permute_dims(
x: JaxArray,
/,
axes: Tuple[int, ...],
*,
copy: Optional[bool] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.transpose(x, axes)
def reshape(
x: JaxArray,
/,
shape: Union[ivy.NativeShape, Sequence[int]],
*,
copy: Optional[bool] = None,
order: str = "C",
allowzero: bool = True,
out: Optional[JaxArray] = None,
) -> JaxArray:
ivy.utils.assertions.check_elem_in_list(order, ["C", "F"])
if not allowzero:
shape = [
new_s if con else old_s
for new_s, con, old_s in zip(shape, jnp.array(shape) != 0, x.shape)
]
if copy:
newarr = jnp.copy(x)
return jnp.reshape(newarr, shape, order=order)
return jnp.reshape(x, shape, order=order)
def roll(
x: JaxArray,
/,
shift: Union[int, Sequence[int]],
*,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if isinstance(axis, jnp.ndarray):
axis = axis.tolist()
return jnp.roll(x, shift, axis)
def squeeze(
x: JaxArray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
copy: Optional[bool] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if x.shape == ():
if axis is None or axis == 0 or axis == -1:
return x
raise ivy.utils.exceptions.IvyException(
f"tried to squeeze a zero-dimensional input by axis {axis}"
)
else:
ret = jnp.squeeze(x, axis=axis)
return ret
def stack(
arrays: Union[Tuple[JaxArray], List[JaxArray]],
/,
*,
axis: int = 0,
out: Optional[JaxArray] = None,
) -> JaxArray:
try:
return jnp.stack(arrays, axis=axis)
except ValueError as error:
raise ivy.utils.exceptions.IvyIndexError(error) from error
# Extra #
# ------#
def split(
x: JaxArray,
/,
*,
copy: Optional[bool] = None,
num_or_size_splits: Optional[Union[int, Sequence[int], JaxArray]] = None,
axis: int = 0,
with_remainder: bool = False,
) -> List[JaxArray]:
if x.shape == ():
if num_or_size_splits is not None and num_or_size_splits != 1:
raise ivy.utils.exceptions.IvyException(
"input array had no shape, but num_sections specified was"
f" {num_or_size_splits}"
)
return [x]
if isinstance(num_or_size_splits, jnp.ndarray):
num_or_size_splits = num_or_size_splits.tolist()
if num_or_size_splits is None:
num_or_size_splits = x.shape[axis]
elif isinstance(num_or_size_splits, int) and with_remainder:
num_chunks = x.shape[axis] / num_or_size_splits
num_chunks_int = math.floor(num_chunks)
remainder = num_chunks - num_chunks_int
if remainder != 0:
num_or_size_splits = [num_or_size_splits] * num_chunks_int + [
int(remainder * num_or_size_splits)
]
if isinstance(num_or_size_splits, (list, tuple)):
num_or_size_splits = np.cumsum(np.array(num_or_size_splits[:-1]))
return jnp.split(x, num_or_size_splits, axis)
def repeat(
x: JaxArray,
/,
repeats: Union[int, Iterable[int]],
*,
axis: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.repeat(x, repeats, axis)
def tile(
x: JaxArray, /, repeats: Iterable[int], *, out: Optional[JaxArray] = None
) -> JaxArray:
return jnp.tile(x, repeats)
def clip(
x: JaxArray,
/,
x_min: Optional[Union[Number, JaxArray]] = None,
x_max: Optional[Union[Number, JaxArray]] = None,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
if x_min is None and x_max is None:
raise ValueError("At least one of the x_min or x_max must be provided")
promoted_type = x.dtype
if x_min is not None:
if not hasattr(x_min, "dtype"):
x_min = ivy.array(x_min).data
promoted_type = ivy.as_native_dtype(ivy.promote_types(x.dtype, x_min.dtype))
x = jnp.where(x < x_min, x_min.astype(promoted_type), x.astype(promoted_type))
if x_max is not None:
if not hasattr(x_max, "dtype"):
x_max = ivy.array(x_max).data
promoted_type = ivy.as_native_dtype(
ivy.promote_types(promoted_type, x_max.dtype)
)
x = jnp.where(x > x_max, x_max.astype(promoted_type), x.astype(promoted_type))
return x
@with_unsupported_dtypes({"0.4.24 and below": ("uint64",)}, backend_version)
def constant_pad(
x: JaxArray,
/,
pad_width: List[List[int]],
*,
value: Number = 0.0,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.pad(_flat_array_to_1_dim_array(x), pad_width, constant_values=value)
def unstack(
x: JaxArray,
/,
*,
copy: Optional[bool] = None,
axis: int = 0,
keepdims: bool = False,
) -> List[JaxArray]:
if x.shape == ():
return [x]
dim_size = x.shape[axis]
# ToDo: make this faster somehow, jnp.split is VERY slow for large dim_size
x_split = jnp.split(x, dim_size, axis)
if keepdims:
return x_split
return [jnp.squeeze(item, axis) for item in x_split]
def zero_pad(
x: JaxArray, /, pad_width: List[List[int]], *, out: Optional[JaxArray] = None
):
return jnp.pad(_flat_array_to_1_dim_array(x), pad_width, constant_values=0)
def swapaxes(
x: JaxArray,
axis0: int,
axis1: int,
/,
*,
copy: Optional[bool] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.swapaxes(x, axis0, axis1)
| ivy/ivy/functional/backends/jax/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/manipulation.py",
"repo_id": "ivy",
"token_count": 3348
} | 18 |
import mxnet as mx
backend_version = {"version": mx.__version__}
from .activations import *
from .creation import *
from .data_type import *
from .device import *
from .elementwise import *
from .general import *
from .gradients import *
from .layers import *
from .linear_algebra import *
from .manipulation import *
from .norms import *
from .random import *
from .searching import *
from .set import *
from .sorting import *
from .sparse_array import *
from .statistical import *
from .utility import *
| ivy/ivy/functional/backends/mxnet/experimental/__init__.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/experimental/__init__.py",
"repo_id": "ivy",
"token_count": 152
} | 19 |
from ivy.utils.exceptions import IvyNotImplementedException
def is_native_sparse_array(x):
raise IvyNotImplementedException()
def native_sparse_array(
data=None,
*,
coo_indices=None,
crow_indices=None,
col_indices=None,
ccol_indices=None,
row_indices=None,
values=None,
dense_shape=None,
format="coo"
):
raise IvyNotImplementedException()
def native_sparse_array_to_indices_values_and_shape(x):
raise NotImplementedError(
"mxnet.native_sparse_array_to_indices_values_and_shape Not Implemented"
)
| ivy/ivy/functional/backends/mxnet/experimental/sparse_array.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/experimental/sparse_array.py",
"repo_id": "ivy",
"token_count": 237
} | 20 |
# global
import sys
import numpy as np
# local
import ivy
from ivy.func_wrapper import _dtype_from_version
backend_version = {"version": np.__version__}
# noinspection PyUnresolvedReferences
if not ivy.is_local():
_module_in_memory = sys.modules[__name__]
else:
_module_in_memory = sys.modules[ivy.import_module_path].import_cache[__name__]
use = ivy.utils.backend.ContextManager(_module_in_memory)
# wrap __array_ufunc__ method of ivy.Array to prioritize Ivy array methods when using numpu backend
def wrap__array_ufunc__(func):
def rep_method(self, ufunc, method, *inputs, **kwargs):
methods = {
"not_equal": "not_equal",
"greater": "greater",
"less": "less",
"greater_equal": "greater_equal",
"less_equal": "less_equal",
"multiply": "multiply",
"divide": "divide",
"remainder": "remainder",
"equal": "equal",
"bitwise_and": "bitwise_and",
"matmul": "matmul",
"power": "pow",
"subtract": "subtract",
"add": "add",
}
if ufunc.__name__ in methods:
return eval("ivy." + methods[ufunc.__name__] + "(*inputs, **kwargs)")
return func(self, ufunc, method, *inputs, **kwargs)
return rep_method
ivy.Array.__array_ufunc__ = wrap__array_ufunc__(ivy.Array.__array_ufunc__)
NativeArray = np.ndarray
NativeDevice = str
NativeDtype = np.dtype
NativeShape = tuple
NativeSparseArray = None
# devices
valid_devices = ("cpu",)
invalid_devices = ("gpu", "tpu")
# native data types
native_int8 = np.dtype("int8")
native_int16 = np.dtype("int16")
native_int32 = np.dtype("int32")
native_int64 = np.dtype("int64")
native_uint8 = np.dtype("uint8")
native_uint16 = np.dtype("uint16")
native_uint32 = np.dtype("uint32")
native_uint64 = np.dtype("uint64")
native_float16 = np.dtype("float16")
native_float32 = np.dtype("float32")
native_float64 = np.dtype("float64")
native_complex64 = np.dtype("complex64")
native_complex128 = np.dtype("complex128")
native_double = native_float64
native_bool = np.dtype("bool")
# valid data types
# ToDo: Add complex dtypes to valid_dtypes and fix all resulting failures.
# update these to add new dtypes
valid_dtypes = {
"1.26.3 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.uint16,
ivy.uint32,
ivy.uint64,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
ivy.bool,
)
}
valid_numeric_dtypes = {
"1.26.3 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.uint16,
ivy.uint32,
ivy.uint64,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
)
}
valid_int_dtypes = {
"1.26.3 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.uint16,
ivy.uint32,
ivy.uint64,
)
}
valid_float_dtypes = {"1.26.3 and below": (ivy.float16, ivy.float32, ivy.float64)}
valid_uint_dtypes = {
"1.26.3 and below": (ivy.uint8, ivy.uint16, ivy.uint32, ivy.uint64)
}
valid_complex_dtypes = {"1.26.3 and below": (ivy.complex64, ivy.complex128)}
# leave these untouched
valid_dtypes = _dtype_from_version(valid_dtypes, backend_version)
valid_numeric_dtypes = _dtype_from_version(valid_numeric_dtypes, backend_version)
valid_int_dtypes = _dtype_from_version(valid_int_dtypes, backend_version)
valid_float_dtypes = _dtype_from_version(valid_float_dtypes, backend_version)
valid_uint_dtypes = _dtype_from_version(valid_uint_dtypes, backend_version)
valid_complex_dtypes = _dtype_from_version(valid_complex_dtypes, backend_version)
# invalid data types
# update these to add new dtypes
invalid_dtypes = {"1.26.3 and below": (ivy.bfloat16,)}
invalid_numeric_dtypes = {"1.26.3 and below": (ivy.bfloat16,)}
invalid_int_dtypes = {"1.26.3 and below": ()}
invalid_float_dtypes = {"1.26.3 and below": (ivy.bfloat16,)}
invalid_uint_dtypes = {"1.26.3 and below": ()}
invalid_complex_dtypes = {"1.26.3 and below": ()}
# leave these untouched
invalid_dtypes = _dtype_from_version(invalid_dtypes, backend_version)
invalid_numeric_dtypes = _dtype_from_version(invalid_numeric_dtypes, backend_version)
invalid_int_dtypes = _dtype_from_version(invalid_int_dtypes, backend_version)
invalid_float_dtypes = _dtype_from_version(invalid_float_dtypes, backend_version)
invalid_uint_dtypes = _dtype_from_version(invalid_uint_dtypes, backend_version)
invalid_complex_dtypes = _dtype_from_version(invalid_complex_dtypes, backend_version)
native_inplace_support = True
supports_gradients = False
def closest_valid_dtype(type=None, /, as_native=False):
if type is None:
type = ivy.default_dtype()
elif isinstance(type, str) and type in invalid_dtypes:
type = {"bfloat16": ivy.float16}[type]
return ivy.as_ivy_dtype(type) if not as_native else ivy.as_native_dtype(type)
backend = "numpy"
# local sub-modules
from . import activations
from .activations import *
from . import creation
from .creation import *
from . import data_type
from .data_type import *
from . import device
from .device import *
from . import elementwise
from .elementwise import *
from . import general
from .general import *
from . import gradients
from .gradients import *
from . import layers
from .layers import *
from . import linear_algebra as linalg
from .linear_algebra import *
from . import manipulation
from .manipulation import *
from . import random
from .random import *
from . import searching
from .searching import *
from . import set
from .set import *
from . import sorting
from .sorting import *
from . import statistical
from .statistical import *
from . import utility
from .utility import *
from . import experimental
from .experimental import *
from . import control_flow_ops
from .control_flow_ops import *
from . import module
from .module import *
# sub-backends
from . import sub_backends
from .sub_backends import *
NativeModule = None
| ivy/ivy/functional/backends/numpy/__init__.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/__init__.py",
"repo_id": "ivy",
"token_count": 2591
} | 21 |