text
stringlengths 17
362k
| id
stringlengths 13
115
| metadata
dict | __index_level_0__
int64 0
75
|
---|---|---|---|
# global
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
@handle_frontend_test(
fn_tree="paddle.imag",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_imag(
*,
dtype_and_x,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=input[0],
)
@handle_frontend_test(
fn_tree="paddle.is_complex",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_is_complex(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=input[0],
)
@handle_frontend_test(
fn_tree="paddle.is_floating_point",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_is_floating_point(
*,
dtype_and_x,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=input[0],
)
@handle_frontend_test(
fn_tree="paddle.is_integer",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_is_integer(
*,
dtype_and_x,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=input[0],
)
@handle_frontend_test(
fn_tree="paddle.rank",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_rank(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
)
@handle_frontend_test(
fn_tree="paddle.real",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_real(
*,
dtype_and_x,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=input[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_attribute.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_attribute.py",
"repo_id": "ivy",
"token_count": 1796
} | 64 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_frontends.test_torch.test_nn.test_functional import (
test_pooling_functions,
)
# adaptive_avg_pool1d
@handle_frontend_test(
fn_tree="paddle.nn.functional.adaptive_avg_pool1d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=3,
max_num_dims=3,
min_dim_size=1,
max_value=100,
min_value=-100,
),
output_size=helpers.ints(min_value=1, max_value=5),
)
def test_paddle_adaptive_avg_pool1d(
*,
dtype_and_x,
output_size,
test_flags,
frontend,
on_device,
backend_fw,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
on_device=on_device,
fn_tree=fn_tree,
x=x[0],
output_size=output_size,
)
# adaptive_avg_pool2d
@handle_frontend_test(
fn_tree="paddle.nn.functional.adaptive_avg_pool2d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=4,
max_num_dims=4,
min_dim_size=1,
max_value=100,
min_value=-100,
),
output_size=st.one_of(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=1, max_value=5),
),
helpers.ints(min_value=1, max_value=5),
),
)
def test_paddle_adaptive_avg_pool2d(
*,
dtype_and_x,
output_size,
test_flags,
frontend,
on_device,
backend_fw,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
on_device=on_device,
fn_tree=fn_tree,
x=x[0],
output_size=output_size,
)
# adaptive_avg_pool3d
@handle_frontend_test(
fn_tree="paddle.nn.functional.adaptive_avg_pool3d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=5,
max_num_dims=5,
min_dim_size=1,
max_value=100,
min_value=-100,
),
output_size=st.one_of(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=1, max_value=5),
),
helpers.ints(min_value=1, max_value=5),
),
)
def test_paddle_adaptive_avg_pool3d(
*,
dtype_and_x,
output_size,
test_flags,
frontend,
on_device,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
on_device=on_device,
fn_tree=fn_tree,
x=x[0],
output_size=output_size,
)
# adaptive_max_pool2d
@handle_frontend_test(
fn_tree="paddle.nn.functional.adaptive_max_pool2d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=4,
max_num_dims=4,
min_dim_size=1,
# Setting max and min value because this operation in paddle is not
# numerically stable
max_value=100,
min_value=-100,
),
output_size=st.one_of(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=1, max_value=5),
),
helpers.ints(min_value=1, max_value=5),
),
)
def test_paddle_adaptive_max_pool2d(
*,
dtype_and_x,
output_size,
test_flags,
frontend,
on_device,
backend_fw,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
on_device=on_device,
fn_tree=fn_tree,
x=x[0],
output_size=output_size,
)
# avg_pool1d
@handle_frontend_test(
fn_tree="paddle.nn.functional.avg_pool1d",
x_k_s_p_df=helpers.arrays_for_pooling(
min_dims=3,
max_dims=3,
min_side=2,
max_side=4,
data_format="channel_first",
),
exclusive=st.booleans(),
ceil_mode=st.just(False),
test_with_out=st.just(False),
)
def test_paddle_avg_pool1d(
*,
x_k_s_p_df,
frontend,
test_flags,
backend_fw,
on_device,
fn_tree,
exclusive,
ceil_mode,
):
input_dtype, x, kernel_size, stride, padding = x_k_s_p_df
if padding == "SAME":
padding = test_pooling_functions.calculate_same_padding(
kernel_size, stride, [x[0].shape[2]]
)
else:
padding = (0,)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
on_device=on_device,
fn_tree=fn_tree,
x=x[0],
kernel_size=kernel_size,
stride=stride,
padding=padding,
exclusive=exclusive,
ceil_mode=ceil_mode,
)
# avg_pool2d
@handle_frontend_test(
fn_tree="paddle.nn.functional.pooling.avg_pool2d",
dtype_x_k_s=helpers.arrays_for_pooling(
min_dims=4,
max_dims=4,
min_side=2,
max_side=4,
),
ceil_mode=st.booleans(),
exclusive=st.booleans(),
data_format=st.sampled_from(["NCHW", "NHWC"]),
)
def test_paddle_avg_pool2d(
dtype_x_k_s,
exclusive,
ceil_mode,
data_format,
*,
test_flags,
backend_fw,
frontend,
fn_tree,
on_device,
):
input_dtype, x, kernel, stride, padding = dtype_x_k_s
if data_format == "NCHW":
x[0] = x[0].reshape(
(x[0].shape[0], x[0].shape[3], x[0].shape[1], x[0].shape[2])
)
if len(stride) == 1:
stride = (stride[0], stride[0])
if padding == "SAME":
padding = test_pooling_functions.calculate_same_padding(
kernel, stride, x[0].shape[2:]
)
else:
padding = (0, 0)
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
kernel_size=kernel,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
exclusive=exclusive,
divisor_override=None,
data_format=data_format,
)
# max_pool2d
@handle_frontend_test(
fn_tree="paddle.nn.functional.pooling.max_pool2d",
dtype_x_k_s=helpers.arrays_for_pooling(
min_dims=4, max_dims=4, min_side=2, max_side=4
),
ceil_mode=st.sampled_from([True]),
data_format=st.sampled_from(["NCHW", "NHWC"]),
)
def test_paddle_max_pool2d(
dtype_x_k_s,
ceil_mode,
data_format,
*,
test_flags,
backend_fw,
frontend,
fn_tree,
on_device,
):
input_dtype, x, kernel, stride, padding = dtype_x_k_s
if data_format == "NCHW":
x[0] = x[0].reshape(
(x[0].shape[0], x[0].shape[3], x[0].shape[1], x[0].shape[2])
)
if len(stride) == 1:
stride = (stride[0], stride[0])
if padding == "SAME":
padding = test_pooling_functions.calculate_same_padding(
kernel, stride, x[0].shape[2:]
)
else:
padding = (0, 0)
if padding == "VALID" and ceil_mode:
ceil_mode = False
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
kernel_size=kernel,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
data_format=data_format,
)
# max_unpool1d
@handle_frontend_test(
fn_tree="paddle.nn.functional.max_unpool1d",
x_k_s_p=helpers.arrays_for_pooling(min_dims=3, max_dims=3, min_side=1, max_side=4),
indices=st.lists(st.integers(0, 1), min_size=1, max_size=4),
)
def test_paddle_max_unpool1d(
*,
x_k_s_p,
indices,
test_flags,
frontend,
on_device,
backend_fw,
fn_tree,
):
(input_dtype, x, kernel_size, stride, padding) = x_k_s_p
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
on_device=on_device,
fn_tree=fn_tree,
x=x[0],
indices=indices,
kernel_size=kernel_size,
stride=stride,
padding=padding,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_pooling.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_pooling.py",
"repo_id": "ivy",
"token_count": 4645
} | 65 |
# global
from hypothesis import strategies as st
import numpy as np
import pytest
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_method
CLASS_TREE = "ivy.functional.frontends.pandas.Series"
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="pandas.Series",
method_name="abs",
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
)
def test_pandas_series_abs(
dtype_x,
frontend,
backend_fw,
frontend_method_data,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
backend_to_test=backend_fw,
on_device=on_device,
)
@pytest.mark.xfail(reason="testing pipeline fixes")
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="pandas.Series",
method_name="add",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
axis=st.sampled_from(["index", 0]),
)
def test_pandas_series_add(
dtype_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
axis,
):
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"other": x[1],
"level": None,
"axis": axis,
"fill_value": None,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
backend_to_test=backend_fw,
)
@pytest.mark.xfail(reason="testing pipeline fixes")
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="pandas.Series",
method_name="mean",
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
skipna=st.booleans(),
axis=st.sampled_from([None, 0]),
)
def test_pandas_series_mean(
dtype_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
skipna,
axis,
):
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"skipna": skipna, "axis": axis},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
backend_to_test=backend_fw,
)
@pytest.mark.xfail(reason="testing pipeline fixes")
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="pandas.Series",
method_name="sum",
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
skipna=st.booleans(),
axis=st.sampled_from([None, 0]),
min_count=st.integers(min_value=0, max_value=5),
)
def test_pandas_series_sum(
dtype_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
skipna,
axis,
min_count,
):
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"skipna": skipna,
"axis": axis,
"min_count": min_count,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
backend_to_test=backend_fw,
)
@pytest.mark.xfail(reason="testing pipeline fixes")
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="pandas.Series",
method_name="to_numpy",
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
na_value=st.sampled_from([None, np.nan, np.inf, -np.inf]),
copy=st.booleans(),
)
def test_pandas_series_to_numpy(
dtype_x,
na_value,
copy,
frontend_method_data,
init_flags,
method_flags,
on_device,
frontend,
backend_fw,
):
# todo add castable dtypes for output
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"na_value": na_value,
"copy": copy,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
frontend=frontend,
backend_to_test=backend_fw,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_pandas/test_series.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_pandas/test_series.py",
"repo_id": "ivy",
"token_count": 2608
} | 66 |
from hypothesis import strategies as st
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test, handle_frontend_method
CLASS_TREE = "ivy.functional.frontends.sklearn.model_selection"
@handle_frontend_method(
class_tree=CLASS_TREE + ".KFold",
init_tree="sklearn.model_selection.KFold",
method_name="get_n_splits",
dtype_x=helpers.dtype_and_values(),
)
def test_sklearn_kfold_get_n_split(
dtype_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
):
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"n_splits": 2, # todo test for shuffle
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"X": x[0], # this arg only for compatibility
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
backend_to_test=backend_fw,
)
@handle_frontend_method(
class_tree=CLASS_TREE + ".KFold",
init_tree="sklearn.model_selection.KFold",
method_name="split",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shared_dtype=True,
num_arrays=2,
),
)
def test_sklearn_kfold_split(
dtype_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
):
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"n_splits": 2, # todo test for shuffle
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"X": x[0],
"y": x[1],
"groups": None,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
backend_to_test=backend_fw,
)
@handle_frontend_method(
class_tree=CLASS_TREE + ".StratifiedKFold",
init_tree="sklearn.model_selection.StratifiedKFold",
method_name="split",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
shared_dtype=True,
num_arrays=2,
max_num_dims=2,
min_num_dims=1,
),
)
def test_sklearn_stratfiedkfold_split(
dtype_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
):
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"n_splits": 2,
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"X": x[0],
"y": x[1],
"groups": None,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
backend_to_test=backend_fw,
)
@handle_frontend_method(
class_tree=CLASS_TREE + ".StratifiedKFold",
init_tree="sklearn.model_selection.StratifiedKFold",
method_name="get_n_splits",
dtype_x=helpers.dtype_and_values(),
)
def test_sklearn_stratifiedkfold_get_n_split(
dtype_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
):
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"n_splits": 2,
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"X": x[0], # for compatibility
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
backend_to_test=backend_fw,
)
@handle_frontend_test(
fn_tree="sklearn.model_selection.train_test_split",
arrays_and_dtypes=helpers.dtype_and_values(
num_arrays=helpers.ints(min_value=2, max_value=4),
shape=helpers.lists(
x=helpers.ints(min_value=2, max_value=5),
min_size=2,
max_size=3,
),
),
shuffle=st.booleans(),
)
def test_sklearn_test_train_split(
arrays_and_dtypes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
shuffle,
):
dtypes, values = arrays_and_dtypes
kw = {}
for i, x_ in enumerate(values):
kw[f"x{i}"] = x_
test_flags.num_positional_args = len(values)
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
frontend=frontend,
on_device=on_device,
test_values=False,
**kw,
shuffle=shuffle,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_sklearn/test_model_selection/test_split.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_sklearn/test_model_selection/test_split.py",
"repo_id": "ivy",
"token_count": 2545
} | 67 |
# global
from hypothesis import strategies as st, assume
import numpy as np
from tensorflow import errors as tf_errors
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy.functional.frontends.tensorflow.general_functions import _num_to_bit_list
from ivy_tests.test_ivy.test_frontends.test_numpy.test_creation_routines.test_from_shape_or_value import ( # noqa : E501
_input_fill_and_dtype,
)
from ivy_tests.test_ivy.test_frontends.test_tensorflow.test_tensor import (
_array_and_shape,
) # noqa : E501
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import _matrix_rank_helper
from ivy_tests.test_ivy.test_functional.test_core.test_manipulation import ( # noqa
_get_splits,
)
# --- Helpers --- #
# --------------- #
@st.composite
def _boolean_mask_helper(draw):
tensor_shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=3,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
)
dtype = draw(st.sampled_from(["float32", "float64"]))
# Param: tensor
# tensor = draw(
# helpers.array_values(
# dtype=dtype, shape=tensor_shape, min_value=-5.0, max_value=5.0
# ),
# )
dtype, tensor, axis = draw(
helpers.dtype_values_axis(
available_dtypes=[dtype],
shape=tensor_shape,
min_value=-5.0,
max_value=5.0,
force_int_axis=True,
valid_axis=True,
)
)
mask_dim = draw(helpers.ints(min_value=1, max_value=len(tensor_shape) - axis))
mask_shape = tensor_shape[axis : mask_dim + axis]
# Param:stop
mask = draw(
helpers.array_values(
allow_nan=False,
dtype="bool",
shape=mask_shape,
),
)
return [dtype[0], "bool"], tensor, mask, axis
@st.composite
def _get_clip_inputs(draw):
shape = draw(
helpers.get_shape(
min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10
)
)
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=shape,
)
)
min = draw(
helpers.array_values(dtype=x_dtype[0], shape=shape, min_value=-50, max_value=5)
)
max = draw(
helpers.array_values(dtype=x_dtype[0], shape=shape, min_value=6, max_value=50)
)
return x_dtype, x, min, max
@st.composite
def _get_global_norm_clip_inputs(draw):
t_list_dtype, t_list = draw(
helpers.dtype_and_values(
num_arrays=2,
min_num_dims=1,
shared_dtype=True,
min_value=-100,
max_value=100,
dtype=["float32"] * 2,
)
)
norm_dtype, norm = draw(
helpers.dtype_and_values(
shape=(1,),
shared_dtype=True,
min_value=0,
exclude_min=True,
max_value=100,
dtype=["float32"],
)
)
global_norm_dtype, global_norm = draw(
helpers.dtype_and_values(
shape=(1,),
shared_dtype=True,
min_value=0,
exclude_min=True,
max_value=100,
dtype=["float32"],
)
)
include_global = draw(st.booleans())
if not include_global:
global_norm_dtype, global_norm = None, None
return t_list_dtype, t_list, norm_dtype, norm, global_norm_dtype, global_norm
@st.composite
def _get_norm_clip_inputs(draw):
dtype = draw(helpers.get_dtypes("numeric", full=False))
x_dtype, x, axis = draw(
helpers.dtype_values_axis(
available_dtypes=dtype,
min_num_dims=1,
min_value=-100,
max_value=100,
force_int_axis=True,
valid_axis=True,
)
)
norm_dtype, norm = draw(
helpers.dtype_and_values(available_dtypes=dtype, shape=(1,))
)
return x_dtype[0], x, axis, norm
# transpose
@st.composite
def _get_perm_helper(draw):
shape = draw(st.shared(helpers.get_shape(min_num_dims=1), key="shape"))
dimensions = [x for x in range(len(shape))]
perm = draw(st.permutations(dimensions))
return perm
@st.composite
def _linspace_helper(draw):
shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
)
dtype = draw(st.sampled_from(["float32", "float64"]))
# Param: start
start = draw(
helpers.array_values(
dtype=dtype,
shape=shape,
min_value=-5.0,
max_value=5.0,
),
)
# Param:stop
stop = draw(
helpers.array_values(
dtype=dtype,
shape=shape,
min_value=-4.0,
max_value=10.0,
),
)
return [dtype] * 2, start, stop
# tile
@st.composite
def _multiple_shape_helper(draw):
input_dtype, input_array, input_shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), ret_shape=True
)
)
input_dims = len(input_shape)
dt_n_multiples = draw(
helpers.dtype_and_values(
available_dtypes=["int32", "int64"],
min_value=0,
max_value=10,
shape=draw(
helpers.get_shape(
min_num_dims=1,
max_num_dims=1,
min_dim_size=input_dims,
max_dim_size=input_dims,
)
),
)
)
return input_dtype, input_array, dt_n_multiples
@st.composite
def _pad_helper(draw):
mode = draw(
st.sampled_from(
[
"CONSTANT",
"REFLECT",
"SYMMETRIC",
]
)
)
dtype, input, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
ret_shape=True,
min_num_dims=1,
min_value=-100,
max_value=100,
)
)
ndim = len(shape)
min_dim = min(shape)
paddings = draw(
st.lists(
st.tuples(
st.integers(min_value=0, max_value=min_dim - 1),
st.integers(min_value=0, max_value=min_dim - 1),
),
min_size=ndim,
max_size=ndim,
)
)
constant_values = draw(st.integers(min_value=0, max_value=4))
return dtype, input[0], paddings, mode, constant_values
@st.composite
def _reshape_helper(draw):
shape = draw(helpers.get_shape(min_num_dims=1))
reshape_shape = draw(helpers.reshape_shapes(shape=shape))
dtype = draw(helpers.array_dtypes(num_arrays=1))
x = draw(helpers.array_values(dtype=dtype[0], shape=shape))
return x, dtype, reshape_shape
# sequence_mask
@st.composite
def _sequence_mask_helper(draw):
max_val = draw(st.integers(min_value=1, max_value=100000))
in_dtype, lens = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
min_value=-max_val,
max_value=max_val,
)
)
max_len = draw(st.integers(min_value=max_val, max_value=max_val))
dtype = draw(
st.sampled_from(
[
"float16",
"uint8",
"complex128",
"bool",
"float64",
"int8",
"int16",
"complex64",
"float32",
"int32",
"int64",
]
)
)
return in_dtype, lens, max_len, dtype
@st.composite
def _slice_helper(draw):
dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
ret_shape=True,
),
)
begin, size = [], []
for i in shape:
begin += [draw(st.integers(min_value=0, max_value=i - 1))]
size += [draw(st.integers(min_value=0, max_value=i - begin[-1]))]
return dtype, x, np.array(begin), np.array(size)
# Squeeze
@st.composite
def _squeeze_helper(draw):
shape = draw(st.shared(helpers.get_shape(), key="value_shape"))
valid_axes = []
for index, axis in enumerate(shape):
if axis == 1:
valid_axes.append(index)
valid_axes.insert(0, None)
return draw(st.sampled_from(valid_axes))
@st.composite
def _strided_slice_helper(draw):
dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
ret_shape=True,
),
)
ndims = len(shape)
masks = draw(
st.lists(
st.integers(min_value=0, max_value=2**ndims - 1), min_size=5, max_size=5
).filter(lambda x: bin(x[2])[2:].count("1") <= min(len(shape) - 1, 1))
)
begin, end, strides = [], [], []
for i in shape:
begin += [draw(st.integers(min_value=0, max_value=i - 1))]
end += [draw(st.integers(min_value=0, max_value=i - 1))]
if begin[-1] < end[-1]:
strides += [draw(st.integers(min_value=1, max_value=i))]
else:
strides += [draw(st.integers(max_value=-1, min_value=-i))]
ellipsis_mask = _num_to_bit_list(masks[2], ndims)
for i, v in enumerate(ellipsis_mask):
if v == 1:
skip = draw(st.integers(min_value=0, max_value=ndims))
begin, end, strides = map(
lambda x: x[:i] + x[i + skip :] if i + skip < ndims else x[:i],
[begin, end, strides],
)
break
return dtype, x, np.array(begin), np.array(end), np.array(strides), masks
@st.composite
def _x_cast_dtype_shape(draw):
x_dtype = draw(helpers.get_dtypes("valid", full=False))
x_dtype, x = draw(
helpers.dtype_and_values(
dtype=x_dtype,
shape=st.shared(helpers.get_shape(), key="value_shape"),
large_abs_safety_factor=10,
small_abs_safety_factor=10,
safety_factor_scale="log",
),
)
to_shape = draw(
helpers.reshape_shapes(shape=st.shared(helpers.get_shape(), key="value_shape")),
)
cast_dtype = x_dtype[0]
# known tensorflow bug when trying to cast to a different type
# https://github.com/tensorflow/tensorflow/issues/39554
# cast_dtype = draw(
# helpers.get_dtypes("valid", full=False)
# .map(lambda t: t[0])
# .filter(lambda t: ivy.can_cast(x_dtype[0], t))
# )
return x_dtype, x, cast_dtype, to_shape
# reverse
@st.composite
def reverse_helper(draw):
dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=8,
ret_shape=True,
)
)
axis_dtype, axis = draw(
helpers.dtype_and_values(
available_dtypes=["int32", "int64"],
min_num_dims=1,
max_num_dims=1,
min_value=-(len(shape) - 1),
max_value=len(shape) - 1,
shape=(1,),
)
)
return dtype, x, axis_dtype, axis
# --- Main --- #
# ------------ #
# argsort
@handle_frontend_test(
fn_tree="tensorflow.argsort",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
min_axis=-1,
max_axis=0,
),
direction=st.sampled_from(["ASCENDING", "DESCENDING"]),
)
def test_tensorflow_argsort(
*,
dtype_input_axis,
direction,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, input, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
values=input[0],
axis=axis,
direction=direction,
)
# boolean_mask
@handle_frontend_test(
fn_tree="tensorflow.boolean_mask",
dtype_and_values=_boolean_mask_helper(),
)
def test_tensorflow_boolean_mask(
*,
dtype_and_values,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, tensor, mask, axis = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
tensor=tensor[0],
mask=mask,
axis=axis,
)
# clip_by_global_norm
@handle_frontend_test(
fn_tree="tensorflow.clip_by_global_norm",
input_and_norm=_get_global_norm_clip_inputs(),
test_with_out=st.just(False),
)
def test_tensorflow_clip_by_global_norm(
*,
input_and_norm,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
(
t_list_dtype,
t_list,
norm_dtype,
norm,
global_norm_dtype,
global_norm,
) = input_and_norm
input_dtypes = [t_list_dtype[0], norm_dtype[0]]
use_norm = None
if global_norm_dtype:
input_dtypes.append(global_norm_dtype[0])
use_norm = global_norm[0]
helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
t_list=t_list,
clip_norm=norm[0],
use_norm=use_norm,
)
# clip_by_norm
@handle_frontend_test(
fn_tree="tensorflow.clip_by_norm",
input_and_norm=_get_norm_clip_inputs(),
test_with_out=st.just(False),
)
def test_tensorflow_clip_by_norm(
*,
input_and_norm,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
x_dtype, x, axis, norm = input_and_norm
helpers.test_frontend_function(
input_dtypes=[x_dtype, x_dtype],
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
t=x[0],
clip_norm=norm[0],
axes=axis,
)
# clip_by_value
@handle_frontend_test(
fn_tree="tensorflow.clip_by_value",
input_and_ranges=_get_clip_inputs(),
test_with_out=st.just(False),
)
def test_tensorflow_clip_by_value(
*,
input_and_ranges,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
):
x_dtype, x, min, max = input_and_ranges
helpers.test_frontend_function(
input_dtypes=x_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
t=x[0],
clip_value_min=min,
clip_value_max=max,
)
# concat
@handle_frontend_test(
fn_tree="tensorflow.concat",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=st.integers(min_value=1, max_value=4),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_concat(
*,
dtype_input_axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
values=x,
axis=axis,
)
# cond
@handle_frontend_test(
fn_tree="tensorflow.cond",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=1,
),
pred_cond=st.booleans(),
var=st.integers(min_value=1, max_value=100),
test_with_out=st.just(False),
)
def test_tensorflow_cond(
*,
dtype_and_x,
pred_cond,
var,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
def _test_true_fn():
return var + var
def _test_false_fn():
return var * var
input_dtype, _ = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
pred=pred_cond,
true_fn=_test_true_fn,
false_fn=_test_false_fn,
)
# constant
@handle_frontend_test(
fn_tree="tensorflow.constant",
all_args=_x_cast_dtype_shape(),
test_with_out=st.just(False),
)
def test_tensorflow_constant(
*,
all_args,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
x_dtype, x, cast_dtype, to_shape = all_args
helpers.test_frontend_function(
input_dtypes=x_dtype,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
value=x[0].tolist() if x[0].ndim > 0 else x[0].item(),
dtype=cast_dtype,
shape=to_shape,
)
# convert_to_tensor
@handle_frontend_test(
fn_tree="tensorflow.convert_to_tensor",
dtype_x_cast=_x_cast_dtype_shape(),
dtype_hint=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_tensorflow_convert_to_tensor(
*,
dtype_x_cast,
dtype_hint,
backend_fw,
on_device,
fn_tree,
frontend,
test_flags,
):
x_dtype, x, cast_dtype, _ = dtype_x_cast
helpers.test_frontend_function(
input_dtypes=x_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
value=x[0],
dtype=cast_dtype,
dtype_hint=dtype_hint[0],
)
# einsum
@handle_frontend_test(
fn_tree="tensorflow.einsum",
eq_n_op_n_shp=st.sampled_from(
[
("ii", (np.arange(25).reshape(5, 5),), ()),
("ii->i", (np.arange(25).reshape(5, 5),), (5,)),
("ij,j", (np.arange(25).reshape(5, 5), np.arange(5)), (5,)),
]
),
dtype=helpers.get_dtypes("float", full=False),
)
def test_tensorflow_einsum(
*,
eq_n_op_n_shp,
dtype,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
eq, operands, _ = eq_n_op_n_shp
kw = {}
i = 0
for x_ in operands:
kw[f"x{i}"] = x_
i += 1
# len(operands) + 1 because of the equation
test_flags.num_positional_args = len(operands) + 1
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
equation=eq,
**kw,
)
@handle_frontend_test(
fn_tree="tensorflow.ensure_shape",
dtype_and_x=_array_and_shape(min_num_dims=0, max_num_dims=5),
)
def test_tensorflow_ensure_shape(
*,
dtype_and_x,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
x=x[0],
shape=x[1],
)
# expand_dims
@handle_frontend_test(
fn_tree="tensorflow.expand_dims",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="shape"),
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(), key="shape"),
allow_neg=True,
force_int=True,
),
)
def test_tensorflow_expand_dims(
*,
dtype_value,
axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
axis=axis,
)
# eye
@handle_frontend_test(
fn_tree="tensorflow.linalg.eye",
gt_fn_tree="tensorflow.eye",
n_rows=helpers.ints(min_value=0, max_value=10),
n_cols=st.none() | helpers.ints(min_value=0, max_value=10),
batch_shape=st.lists(
helpers.ints(min_value=1, max_value=10), min_size=1, max_size=2
),
dtype=helpers.get_dtypes("valid", full=False),
)
def test_tensorflow_eye(
*,
n_rows,
n_cols,
batch_shape,
dtype,
frontend,
backend_fw,
test_flags,
fn_tree,
gt_fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
gt_fn_tree=gt_fn_tree,
on_device=on_device,
num_rows=n_rows,
num_columns=n_cols,
batch_shape=batch_shape,
dtype=dtype[0],
)
# full
@handle_frontend_test(
fn_tree="tensorflow.fill",
shape=helpers.get_shape(),
input_fill_dtype=_input_fill_and_dtype(),
)
def test_tensorflow_fill(
shape,
input_fill_dtype,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, _, fill, dtype_to_cast = input_fill_dtype
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-05,
dims=shape,
value=fill,
)
# foldl
@handle_frontend_test(
fn_tree="tensorflow.foldl",
fn=st.sampled_from(
[
lambda a, b: a + b,
lambda a, b: a - b,
lambda a, b: a * b,
],
),
initializer=st.one_of(st.none(), st.floats(min_value=-1000, max_value=1000)),
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", full=False),
min_value=-1000,
max_value=1000,
max_dim_size=10,
max_num_dims=4,
min_dim_size=1,
min_num_dims=1,
),
parallel_iterations=st.just(10),
swap_memory=st.booleans(),
name=st.none(),
)
def test_tensorflow_foldl(
*,
fn,
initializer,
dtype_and_values,
frontend,
backend_fw,
fn_tree,
test_flags,
parallel_iterations,
swap_memory,
name,
):
dtype, elems = dtype_and_values
elems = np.atleast_1d(elems)
helpers.test_frontend_function(
input_dtypes=dtype,
fn=fn,
elems=elems,
initializer=initializer,
backend_to_test=backend_fw,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
name=name,
frontend=frontend,
fn_tree=fn_tree,
test_flags=test_flags,
)
# foldr
@handle_frontend_test(
fn_tree="tensorflow.foldr",
fn=st.sampled_from(
[
lambda a, b: a + b,
lambda a, b: a - b,
lambda a, b: a * b,
],
),
initializer=st.one_of(st.none(), st.floats(min_value=-1e3, max_value=1e3)),
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", full=False),
min_value=-1e3,
max_value=1e3,
max_dim_size=10,
max_num_dims=4,
min_dim_size=1,
min_num_dims=1,
),
parallel_iterations=st.just(10),
swap_memory=st.booleans(),
name=st.none(),
)
def test_tensorflow_foldr(
*,
fn,
initializer,
dtype_and_values,
frontend,
backend_fw,
fn_tree,
test_flags,
parallel_iterations,
swap_memory,
name,
):
dtype, elems = dtype_and_values
elems = np.atleast_1d(elems)
helpers.test_frontend_function(
input_dtypes=dtype,
fn=fn,
elems=elems,
initializer=initializer,
backend_to_test=backend_fw,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
name=name,
frontend=frontend,
fn_tree=fn_tree,
test_flags=test_flags,
)
# gather
@handle_frontend_test(
fn_tree="tensorflow.gather",
params_indices_axis_batch_dims=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("valid"),
indices_dtypes=["int64"],
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
indices_same_dims=True,
),
)
def test_tensorflow_gather(
*,
params_indices_axis_batch_dims,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtypes, params, indices, axis, batch_dims = params_indices_axis_batch_dims
helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
params=params,
indices=indices,
axis=axis,
batch_dims=batch_dims,
)
# gather_nd
@handle_frontend_test(
fn_tree="tensorflow.gather_nd",
params_indices_axis_batch_dims=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("valid"),
indices_dtypes=["int64"],
min_num_dims=5,
max_num_dims=10,
min_dim_size=1,
max_dim_size=5,
indices_same_dims=False,
),
)
def test_tensorflow_gather_nd(
*,
params_indices_axis_batch_dims,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtypes, params, indices, axis, batch_dims = params_indices_axis_batch_dims
helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
params=params,
indices=indices,
batch_dims=batch_dims,
)
# identity
@handle_frontend_test(
fn_tree="tensorflow.identity",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_tensorflow_identity(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# identity_n
@handle_frontend_test(
fn_tree="tensorflow.identity_n",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), max_num_dims=5
),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_tensorflow_identity_n(
dtype_and_x,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
input=x,
)
# is_tensor
@handle_frontend_test(
fn_tree="tensorflow.is_tensor",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
)
def test_tensorflow_is_tensor(
*,
dtype_and_x,
backend_fw,
frontend,
test_flags,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
x=x[0],
)
# linspace
@handle_frontend_test(
fn_tree="tensorflow.linspace",
dtype_and_params=_linspace_helper(),
num=helpers.ints(min_value=2, max_value=10),
axis=helpers.ints(min_value=-1, max_value=0),
)
def test_tensorflow_linspace(
*,
dtype_and_params,
num,
axis,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
dtype, start, stop = dtype_and_params
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
start=start,
stop=stop,
num=num,
axis=axis,
on_device=on_device,
)
# meshgrid
@handle_frontend_test(
fn_tree="tensorflow.meshgrid",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
max_num_dims=2,
min_num_dims=2,
min_dim_size=2,
max_dim_size=5,
),
indexing=st.sampled_from(["xy", "ij"]),
test_with_out=st.just(False),
)
def test_tensorflow_meshgrid(
*,
dtype_and_values,
indexing,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
dtype, arrays = dtype_and_values
arrays = arrays[0]
kwargs = {}
for i, array in enumerate(arrays):
kwargs[f"a{i}"] = array
kwargs["indexing"] = indexing
test_flags.num_positional_args = len(arrays)
test_flags.generate_frontend_arrays = False
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**kwargs,
)
# no_op
@handle_frontend_test(
fn_tree="tensorflow.no_op",
dtype=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_no_op(
*,
dtype,
frontend,
backend_fw,
test_flags,
fn_tree,
):
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
)
@handle_frontend_test(
fn_tree="tensorflow.norm",
aliases=["tensorflow.norm"],
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=3,
max_num_dims=5,
min_dim_size=1,
max_dim_size=4,
min_axis=-3,
max_axis=2,
),
ord=st.sampled_from([1, 2, np.inf]),
keepdims=st.booleans(),
)
def test_tensorflow_norm(
*,
dtype_values_axis,
ord,
keepdims,
backend_fw,
frontend,
test_flags,
fn_tree,
on_device,
):
input_dtype, x, axis = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensor=x[0],
ord=ord,
axis=axis,
keepdims=keepdims,
)
# one_hot
@handle_frontend_test(
fn_tree="tensorflow.one_hot",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=1,
min_value=0,
max_value=10,
),
)
def test_tensorflow_one_hot(
*,
dtype_and_x,
frontend,
backend_fw,
fn_tree,
test_flags,
on_device,
):
input_dtype, x = dtype_and_x
depth = 10
helpers.test_frontend_function(
input_dtypes=["uint8", "int32", "int64"],
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
indices=x[0],
depth=depth,
)
# ones
@handle_frontend_test(
fn_tree="tensorflow.ones",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_tensorflow_ones(
shape,
dtype,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
shape=shape,
dtype=dtype[0],
)
# ones_like
@handle_frontend_test(
fn_tree="tensorflow.ones_like",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_tensorflow_ones_like(
dtype_and_x,
dtype,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dtype=dtype[0],
)
# pad
@handle_frontend_test(
fn_tree="tensorflow.pad",
aliases=["tensorflow.compat.v1.pad"],
dtype_and_values_and_other=_pad_helper(),
test_with_out=st.just(False),
)
def test_tensorflow_pad(
*,
dtype_and_values_and_other,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, tensor, paddings, mode, constant_values = dtype_and_values_and_other
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensor=tensor,
paddings=paddings,
mode=mode,
constant_values=constant_values,
)
# range
@handle_frontend_test(
fn_tree="tensorflow.range",
start=helpers.ints(min_value=-50, max_value=0),
limit=helpers.ints(min_value=1, max_value=50),
delta=helpers.ints(min_value=1, max_value=5),
dtype=helpers.get_dtypes("float"),
test_with_out=st.just(False),
)
def test_tensorflow_range(
*,
start,
limit,
delta,
dtype,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
helpers.test_frontend_function(
input_dtypes=[],
on_device=on_device,
fn_tree=fn_tree,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
start=start,
limit=limit,
delta=delta,
dtype=dtype[0],
)
# rank
@handle_frontend_test(
fn_tree="tensorflow.rank",
dtype_and_x=_matrix_rank_helper(),
test_with_out=st.just(False),
)
def test_tensorflow_rank(
*,
dtype_and_x,
backend_fw,
on_device,
fn_tree,
frontend,
test_flags,
):
dtype, x, _ = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# realdiv
@handle_frontend_test(
fn_tree="tensorflow.realdiv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
num_arrays=2,
min_value=-20,
max_value=20,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_realdiv(
*,
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# repeat
@handle_frontend_test(
fn_tree="tensorflow.repeat",
dtypes_and_value_and_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
max_dim_size=10,
valid_axis=True,
force_int_axis=True,
),
repeats=helpers.ints(min_value=1, max_value=5),
)
def test_tensorflow_repeat(
*,
dtypes_and_value_and_axis,
repeats,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtypes, x, axis = dtypes_and_value_and_axis
repeats = repeats
helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
repeats=repeats,
axis=axis,
)
# reshape
@handle_frontend_test(
fn_tree="tensorflow.reshape",
input_x_shape=_reshape_helper(),
test_with_out=st.just(False),
)
def test_tensorflow_reshape(
*,
input_x_shape,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
x, x_dtype, shape = input_x_shape
helpers.test_frontend_function(
input_dtypes=x_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensor=x,
shape=shape,
)
@handle_frontend_test(
fn_tree="tensorflow.reverse",
dtype_x_axis=reverse_helper(),
)
def test_tensorflow_reverse(
*,
dtype_x_axis,
frontend,
backend_fw,
fn_tree,
test_flags,
on_device,
):
dtype, x, axis_dtype, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype + axis_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
tensor=x[0],
axis=axis[0],
)
# roll
@handle_frontend_test(
fn_tree="tensorflow.roll",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
),
shift=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
force_tuple=True,
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
force_tuple=True,
),
)
def test_tensorflow_roll(
*,
dtype_and_values,
shift,
axis,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, value = dtype_and_values
if isinstance(shift, int) and isinstance(axis, tuple):
axis = axis[0]
if isinstance(shift, tuple) and isinstance(axis, tuple):
if len(shift) != len(axis):
mn = min(len(shift), len(axis))
shift = shift[:mn]
axis = axis[:mn]
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
shift=shift,
axis=axis,
)
# scan
@handle_frontend_test(
fn_tree="tensorflow.scan",
dtypes_values=helpers.dtype_and_values(
available_dtypes=["float32"], num_arrays=1, min_num_dims=2, max_dim_size=3
),
test_with_out=st.just(False),
)
def test_tensorflow_scan(
*,
dtypes_values,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
def _test_fn(a, x):
return a + x
x_dtype, elems = dtypes_values
helpers.test_frontend_function(
input_dtypes=x_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
fn=_test_fn,
elems=elems[0],
)
# searchsorted
@handle_frontend_test(
fn_tree="tensorflow.searchsorted",
dtype_x_v=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shared_dtype=True,
min_num_dims=1,
max_num_dims=1,
num_arrays=2,
),
side=st.sampled_from(["left", "right"]),
out_type=st.sampled_from(["int32", "int64"]),
)
def test_tensorflow_searchsorted(
dtype_x_v,
side,
out_type,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtypes, xs = dtype_x_v
helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
sorted_sequence=np.sort(xs[0]),
values=xs[1],
side=side,
out_type=out_type,
)
@handle_frontend_test(
fn_tree="tensorflow.sequence_mask",
dtype_lens_maxlen=_sequence_mask_helper(),
test_with_out=st.just(False),
)
def test_tensorflow_sequence_mask(
*,
dtype_lens_maxlen,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, lens, max_len, dtype = dtype_lens_maxlen
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
lengths=lens[0],
maxlen=max_len,
dtype=dtype,
)
# shape
@handle_frontend_test(
fn_tree="tensorflow.shape",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
output_dtype=st.sampled_from(["int32", "int64"]),
)
def test_tensorflow_shape(
*,
dtype_and_x,
output_dtype,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
(
input_dtype,
x,
) = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
out_type=output_dtype,
)
@handle_frontend_test(
fn_tree="tensorflow.shape_n",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, max_num_dims=5
),
output_dtype=st.sampled_from(["int32", "int64"]),
)
def test_tensorflow_shape_n(
*,
dtype_and_x,
output_dtype,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input,
out_type=output_dtype,
)
# size
# output_dtype not generated as tf only accepts tf dtypes
@handle_frontend_test(
fn_tree="tensorflow.size",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), max_num_dims=4
),
# output_dtype=st.sampled_from(["int32", "int64"]),
test_with_out=st.just(False),
)
def test_tensorflow_size(
*,
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device, # output_dtype
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
# out_type=output_dtype,
)
# slice
@handle_frontend_test(
fn_tree="tensorflow.slice",
dtype_x_params=_slice_helper(),
test_with_out=st.just(False),
)
def test_tensorflow_slice(
*,
dtype_x_params,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
dtype, x, begin, size = dtype_x_params
helpers.test_frontend_function(
input_dtypes=dtype + 3 * ["int64"],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input_=x[0],
begin=begin,
size=size,
)
# sort
@handle_frontend_test(
fn_tree="tensorflow.sort",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
min_axis=-1,
max_axis=0,
),
descending=st.sampled_from(["ASCENDING", "DESCENDING"]),
)
def test_tensorflow_sort(
*,
dtype_input_axis,
descending,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, input, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
values=input[0],
axis=axis,
direction=descending,
)
# split
@handle_frontend_test(
fn_tree="tensorflow.split",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
num_or_size_splits=_get_splits(allow_none=False, min_num_dims=1),
axis=st.shared(
helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
force_int=True,
),
key="target_axis",
),
test_with_out=st.just(False),
)
def test_tensorflow_split(
*,
dtype_value,
num_or_size_splits,
axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
value=value[0],
num_or_size_splits=num_or_size_splits,
axis=axis,
)
@handle_frontend_test(
fn_tree="tensorflow.squeeze",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="value_shape"),
),
axis=_squeeze_helper(),
)
def test_tensorflow_squeeze_general(
*,
dtype_value,
axis,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
dtype, xs = dtype_value
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=xs[0],
axis=axis,
)
# stack
@handle_frontend_test(
fn_tree="tensorflow.stack",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays"),
shape=helpers.get_shape(min_num_dims=1),
shared_dtype=True,
valid_axis=True,
allow_neg_axes=True,
force_int_axis=True,
),
)
def test_tensorflow_stack(
dtype_values_axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, values, axis = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
values=values,
axis=axis,
)
# stop_gradient
@handle_frontend_test(
fn_tree="tensorflow.stop_gradient",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
)
def test_tensorflow_stop_gradient(
*, dtype_and_x, test_flags, backend_fw, fn_tree, frontend, on_device
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# strided_slice
@handle_frontend_test(
fn_tree="tensorflow.strided_slice",
dtype_x_params=_strided_slice_helper(),
test_with_out=st.just(False),
)
def test_tensorflow_strided_slice(
*,
dtype_x_params,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
dtype, x, begin, end, strides, masks = dtype_x_params
try:
helpers.test_frontend_function(
input_dtypes=dtype + 3 * ["int64"] + 5 * ["int32"],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input_=x[0],
begin=begin,
end=end,
strides=strides,
begin_mask=masks[0],
end_mask=masks[1],
ellipsis_mask=masks[2],
new_axis_mask=masks[3],
shrink_axis_mask=masks[4],
)
except tf_errors.InvalidArgumentError:
assume(False)
except Exception as e:
if (
hasattr(e, "message")
and "only stride 1 allowed on non-range indexing" in e.message
):
assume(False)
raise e
# tensor_scatter_nd_add
@handle_frontend_test(
fn_tree="tensorflow.tensor_scatter_nd_add",
all_arguments=_multiple_shape_helper(),
tensor=helpers.array_values(
dtype=helpers.get_dtypes("numeric"), shape=(8,), min_value=2, max_value=49
),
indices=helpers.array_values(
dtype=helpers.get_dtypes("integer"), shape=(4, 1), min_value=0, max_value=7
),
updates=helpers.array_values(
dtype=helpers.get_dtypes("integer"),
shape=(4,),
min_value=9,
max_value=12,
),
)
def test_tensorflow_tensor_scatter_nd_add(
*,
all_arguments,
tensor,
indices,
updates,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, input_matrix, dt_and_multiples = all_arguments
dt_mul, multiples = dt_and_multiples
helpers.test_frontend_function(
input_dtypes=input_dtype + dt_mul,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensor=tensor[0],
indices=indices[0],
updates=updates[0],
)
@handle_frontend_test(fn_tree="tensorflow.tile", all_arguments=_multiple_shape_helper())
def test_tensorflow_tile(
*,
all_arguments,
test_flags,
frontend,
fn_tree,
on_device,
backend_fw,
):
input_dtype, input_matrix, dt_and_multiples = all_arguments
dt_mul, multiples = dt_and_multiples
helpers.test_frontend_function(
input_dtypes=input_dtype + dt_mul,
input=input_matrix[0],
multiples=multiples[0],
test_flags=test_flags,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
)
@handle_frontend_test(
fn_tree="tensorflow.transpose",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
),
perm=_get_perm_helper(),
conjugate=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_transpose(
*,
dtype_and_x,
perm,
conjugate,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
perm=perm,
conjugate=conjugate,
)
# truncatediv
@handle_frontend_test(
fn_tree="tensorflow.truncatediv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=-20,
max_value=20,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_truncatediv(
*,
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# Truncatemod
@handle_frontend_test(
fn_tree="tensorflow.truncatemod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_truncatemod(
*,
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="tensorflow.unique",
dtype_x=helpers.dtype_and_values(
available_dtypes=["int64", "int32"],
min_value=1,
max_value=100,
min_dim_size=1,
max_dim_size=10,
min_num_dims=1,
max_num_dims=1,
),
test_with_out=st.just([False]),
)
def test_tensorflow_unique(
*,
dtype_x,
frontend,
backend_fw,
fn_tree,
test_flags,
on_device,
):
dtype, x = dtype_x
helpers.test_frontend_function(
input_dtypes=dtype,
x=x[0],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
)
@handle_frontend_test(
fn_tree="tensorflow.unique_with_counts",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=1,
)
),
dtype=["int32", "int64"],
),
output_dtype=st.sampled_from(["int32", "int64"]),
)
def test_tensorflow_unique_with_counts(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
output_dtype,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
x=x[0],
input_dtypes=input_dtype,
out_idx=output_dtype,
backend_to_test=backend_fw,
fn_tree=fn_tree,
frontend=frontend,
test_flags=test_flags,
on_device=on_device,
)
@handle_frontend_test(
fn_tree="tensorflow.unravel_index",
indices=helpers.array_values(
dtype=helpers.get_dtypes("integer"), shape=(1, 2), min_value=0, max_value=49
),
dims=helpers.array_values(
dtype=helpers.get_dtypes("integer"), shape=(1, 2), min_value=50
),
)
def test_tensorflow_unravel_index(
*, indices, dims, frontend, test_flags, fn_tree, on_device, backend_fw
):
helpers.test_frontend_function(
input_dtypes=["int32"],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
indices=indices[0],
dims=dims[0],
)
# unstack
@handle_frontend_test(
fn_tree="tensorflow.unstack",
dtypes_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=2,
max_dim_size=1,
),
number_positional_args=st.just(1),
axis=st.integers(-1, 0),
test_with_out=st.just(False),
)
def test_tensorflow_unstack(
*,
dtypes_values,
axis,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
x_dtype, x = dtypes_values
axis = axis
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
value=x[0],
axis=axis,
)
# where
@handle_frontend_test(
fn_tree="tensorflow.where",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=1,
min_value=0,
max_value=10,
min_num_dims=1,
),
)
def test_tensorflow_where_no_xy(
*,
dtype_and_input,
frontend,
backend_fw,
fn_tree,
test_flags,
on_device,
):
input_dtype, [condition] = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
condition=condition,
)
# where
@handle_frontend_test(
fn_tree="tensorflow.where",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("bool"),
num_arrays=3,
min_value=0,
max_value=10,
min_num_dims=1,
),
dim_remove_from_x=st.integers(),
dim_remove_from_y=st.integers(),
)
def test_tensorflow_where_with_xy(
*,
dtype_and_input,
dim_remove_from_x,
dim_remove_from_y,
frontend,
backend_fw,
fn_tree,
test_flags,
on_device,
):
input_dtype, [condition, x, y] = dtype_and_input
if input_dtype != ["bool", "bool", "bool"]:
return
for _ in range(min(len(x.shape) - 1, dim_remove_from_x)):
x = x[0]
for _ in range(min(len(y.shape) - 1, dim_remove_from_y)):
y = y[0]
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
condition=condition,
x=x,
y=y,
)
@handle_frontend_test(
fn_tree="tensorflow.while_loop",
dtype_and_x=helpers.dtype_and_values(
num_arrays=1,
min_num_dims=1,
max_num_dims=3,
min_dim_size=2,
max_dim_size=10,
shared_dtype=True,
min_value=-100,
max_value=100,
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_tensorflow_while_loop(
*,
dtype_and_x,
test_flags,
on_device,
backend_fw,
fn_tree,
frontend,
):
def _test_cond_fn(x):
def any_negative_real(arr):
for elem in arr:
if isinstance(elem, (int, float)) and elem < 0:
return True
elif isinstance(elem, complex):
return False
elif isinstance(elem, (list, tuple)):
if any_negative_real(elem):
return True
return False
return any_negative_real(x)
def _test_body_fn(x):
return x + 1
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
cond=_test_cond_fn,
body=_test_body_fn,
loop_vars=(x[0],),
)
# zeros
@handle_frontend_test(
fn_tree="tensorflow.zeros",
input=helpers.get_shape(
allow_none=False,
min_num_dims=0,
max_num_dims=10,
min_dim_size=0,
max_dim_size=10,
),
dtype=helpers.get_dtypes("valid", full=False),
)
def test_tensorflow_zeros(
*,
input,
dtype,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
helpers.test_frontend_function(
shape=input,
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
)
# zeros_like
@handle_frontend_test(
fn_tree="tensorflow.zeros_like",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
dtype=helpers.get_dtypes("numeric", full=False),
test_with_out=st.just(False),
)
def test_tensorflow_zeros_like(
dtype_and_x,
dtype,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dtype=dtype[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_general_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_general_functions.py",
"repo_id": "ivy",
"token_count": 31844
} | 68 |
# global
import math
import numpy as np
from hypothesis import assume, strategies as st
import hypothesis.extra.numpy as nph
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers.hypothesis_helpers.general_helpers import sizes_
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import (
_get_dtype_value1_value2_axis_for_tensordot,
)
# --- Helpers --- #
# --------------- #
@st.composite
def _get_dtype_value1_value2_cov(
draw,
available_dtypes,
min_num_dims,
max_num_dims,
min_dim_size,
max_dim_size,
abs_smallest_val=None,
min_value=None,
max_value=None,
allow_inf=False,
exclude_min=False,
exclude_max=False,
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
):
shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
dtype = draw(st.sampled_from(draw(available_dtypes)))
values = []
for i in range(1):
values.append(
draw(
helpers.array_values(
dtype=dtype,
shape=shape,
abs_smallest_val=abs_smallest_val,
min_value=min_value,
max_value=max_value,
allow_inf=allow_inf,
exclude_min=exclude_min,
exclude_max=exclude_max,
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
)
)
)
value1 = values[0]
correction = draw(helpers.ints(min_value=0, max_value=1))
fweights = draw(
helpers.array_values(
dtype="int64",
shape=shape[1],
abs_smallest_val=1,
min_value=1,
max_value=10,
allow_inf=False,
)
)
aweights = draw(
helpers.array_values(
dtype="float64",
shape=shape[1],
abs_smallest_val=1,
min_value=1,
max_value=10,
allow_inf=False,
small_abs_safety_factor=1,
)
)
return [dtype], value1, correction, fweights, aweights
@st.composite
def _get_input_and_broadcast_shape(draw):
# Determine the dimensionality of the tensor, ranging from scalar (0D) to 3D.
num_dims = draw(st.integers(min_value=0, max_value=3))
# Generate the dimensions of the tensor.
dims = [draw(st.integers(min_value=1, max_value=5)) for _ in range(num_dims)]
# Make Tensor.
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), shape=dims
)
)
# Define the broadcast shape dimension
broadcast_num_dims = draw(st.integers(min_value=num_dims, max_value=3))
# Construct the broadcast shape.
if broadcast_num_dims == num_dims:
shape = tuple(dims)
else:
shape_components = [
draw(st.integers(min_value=1, max_value=5))
for _ in range(broadcast_num_dims - num_dims)
]
shape = tuple(shape_components) + tuple(dims)
return x_dtype, x, shape
# helpers
@st.composite
def _get_repeat_interleaves_args(
draw, *, available_dtypes, valid_axis, max_num_dims, max_dim_size
):
values_dtype, values, axis, shape = draw(
helpers.dtype_values_axis(
available_dtypes=available_dtypes,
valid_axis=valid_axis,
force_int_axis=True,
shape=draw(
helpers.get_shape(
allow_none=False,
min_num_dims=0,
max_num_dims=max_num_dims,
min_dim_size=1,
max_dim_size=max_dim_size,
)
),
ret_shape=True,
)
)
if axis is None:
generate_repeats_as_integer = draw(st.booleans())
num_repeats = 1 if generate_repeats_as_integer else math.prod(tuple(shape))
else:
num_repeats = shape[axis]
repeats_dtype, repeats = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_value=0,
max_value=10,
shape=[num_repeats],
)
)
# Output size is an optional parameter accepted by Torch for optimisation
use_output_size = draw(st.booleans())
output_size = np.sum(repeats) if use_output_size else None
return [values_dtype, repeats_dtype], values, repeats, axis, output_size
@st.composite
def complex_strategy(
draw, min_num_dims=0, max_num_dims=5, min_dim_size=1, max_dim_size=10
):
shape = draw(
st.lists(
helpers.ints(min_value=min_dim_size, max_value=max_dim_size),
min_size=min_num_dims,
max_size=max_num_dims,
)
)
shape = list(shape)
shape.append(2)
return tuple(shape)
# cross
@st.composite
def dtype_value1_value2_axis(
draw,
available_dtypes,
abs_smallest_val=None,
min_value=None,
max_value=None,
allow_inf=False,
exclude_min=False,
exclude_max=False,
min_num_dims=1,
max_num_dims=10,
min_dim_size=1,
max_dim_size=10,
specific_dim_size=3,
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
):
# For cross product, a dim with size 3 is required
shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
axis = draw(helpers.ints(min_value=0, max_value=len(shape)))
# make sure there is a dim with specific dim size
shape = list(shape)
shape = shape[:axis] + [specific_dim_size] + shape[axis:]
shape = tuple(shape)
dtype = draw(st.sampled_from(draw(available_dtypes)))
values = []
for i in range(2):
values.append(
draw(
helpers.array_values(
dtype=dtype,
shape=shape,
abs_smallest_val=abs_smallest_val,
min_value=min_value,
max_value=max_value,
allow_inf=allow_inf,
exclude_min=exclude_min,
exclude_max=exclude_max,
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
)
)
)
value1, value2 = values[0], values[1]
return [dtype], value1, value2, axis
# --- Main --- #
# ------------ #
# atleast_1d
@handle_frontend_test(
fn_tree="torch.atleast_1d",
dtype_and_tensors=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=st.integers(min_value=1, max_value=5),
),
test_with_out=st.just(False),
)
def test_torch_atleast_1d(
*,
dtype_and_tensors,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, tensors = dtype_and_tensors
if isinstance(dtypes, list): # If more than one value was generated
args = {
f"x{i}": np.array(tensor, dtype=dtypes[i])
for i, tensor in enumerate(tensors)
}
else: # If exactly one value was generated
args = {"x0": np.array(tensors, dtype=dtypes)}
test_flags.num_positional_args = len(tensors)
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**args,
)
# atleast_2d
@handle_frontend_test(
fn_tree="torch.atleast_2d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=helpers.ints(min_value=1, max_value=10),
),
test_with_out=st.just(False),
)
def test_torch_atleast_2d(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, arrays = dtype_and_x
arys = {}
for i, (array, idtype) in enumerate(zip(arrays, input_dtype)):
arys[f"arrs{i}"] = array
test_flags.num_positional_args = len(arys)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**arys,
)
# atleast_3d
@handle_frontend_test(
fn_tree="torch.atleast_3d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=helpers.ints(min_value=1, max_value=10),
),
test_with_out=st.just(False),
)
def test_torch_atleast_3d(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, arrays = dtype_and_x
arys = {}
for i, array in enumerate(arrays):
arys[f"arrs{i}"] = array
test_flags.num_positional_args = len(arys)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**arys,
)
@handle_frontend_test(
fn_tree="torch.block_diag",
dtype_and_tensors=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=st.integers(min_value=1, max_value=10),
min_num_dims=0,
max_num_dims=2,
allow_inf=True,
),
test_with_out=st.just(False),
)
def test_torch_block_diag(
*,
dtype_and_tensors,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, tensors = dtype_and_tensors
if isinstance(dtypes, list): # If more than one value was generated
args = {f"x{i}": np.array(t, dtype=dtypes[i]) for i, t in enumerate(tensors)}
else: # If exactly one value was generated
args = {"x0": np.array(tensors, dtype=dtypes)}
test_flags.num_positional_args = len(tensors)
helpers.test_frontend_function(
input_dtypes=dtypes,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
backend_to_test=backend_fw,
**args,
)
@handle_frontend_test(
fn_tree="torch.broadcast_shapes",
shapes=nph.mutually_broadcastable_shapes(
num_shapes=4, min_dims=1, max_dims=5, min_side=1, max_side=5
),
test_with_out=st.just(False),
)
def test_torch_broadcast_shapes(
*,
shapes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
shape, _ = shapes
shapes = {f"shape{i}": shape[i] for i in range(len(shape))}
test_flags.num_positional_args = len(shapes)
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=["int64"],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**shapes,
test_values=False,
)
assert ret == frontend_ret
@handle_frontend_test(
fn_tree="torch.broadcast_to",
array_and_shape=_get_input_and_broadcast_shape(),
test_with_out=st.just(False),
)
def test_torch_broadcast_to(
*,
array_and_shape,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, array, shape = array_and_shape
test_flags.num_positional_args = 2
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
array=array[0],
shape=shape,
)
@handle_frontend_test(
fn_tree="torch.cartesian_prod",
dtype_and_tensors=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=st.integers(min_value=1, max_value=5),
min_num_dims=1,
max_num_dims=1,
max_dim_size=5,
shared_dtype=True,
),
)
def test_torch_cartesian_prod(
*,
dtype_and_tensors,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, tensors = dtype_and_tensors
if isinstance(dtypes, list): # If more than one value was generated
args = {
f"x{i}": np.array(tensor, dtype=dtypes[i])
for i, tensor in enumerate(tensors)
}
else: # If exactly one value was generated
args = {"x0": np.array(tensors, dtype=dtypes)}
test_flags.num_positional_args = len(tensors)
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**args,
)
@handle_frontend_test(
fn_tree="torch.cdist",
dtypes_and_x=helpers.dtype_and_values(
shape=st.shared(helpers.get_shape(min_num_dims=2, max_num_dims=3), key="shape"),
shared_dtype=True,
num_arrays=2,
allow_inf=False,
available_dtypes=["float32", "float64"],
),
p=st.integers(min_value=0, max_value=1000000),
compute_mode=st.sampled_from(
[
"use_mm_for_euclid_dist_if_necessary",
"use_mm_for_euclid_dist",
"donot_use_mm_for_euclid_dist",
]
),
)
def test_torch_cdist(
*,
dtypes_and_x,
p,
compute_mode,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs = dtypes_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
p=p,
compute_mode=compute_mode,
)
# clone
@handle_frontend_test(
fn_tree="torch.clone",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
test_with_copy=st.just(True),
)
def test_torch_clone(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# corrcoef
@handle_frontend_test(
fn_tree="torch.corrcoef",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
max_dim_size=2,
min_value=1,
),
test_with_out=st.just(False),
)
def test_torch_corrcoef(
dtypes_and_x,
frontend,
fn_tree,
on_device,
test_flags,
backend_fw,
):
input_dtypes, x = dtypes_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
input=x[0],
)
# cov
@handle_frontend_test(
fn_tree="torch.cov",
dtype_x1_corr_cov=_get_dtype_value1_value2_cov(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
max_dim_size=5,
min_value=1,
max_value=1e10,
abs_smallest_val=0.01,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_torch_cov(
dtype_x1_corr_cov,
test_flags,
frontend,
fn_tree,
on_device,
backend_fw,
):
dtype, x1, correction, fweights, aweights = dtype_x1_corr_cov
helpers.test_frontend_function(
input_dtypes=["float64", "int64", "float64"],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
input=x1,
correction=correction,
fweights=fweights,
aweights=aweights,
)
@handle_frontend_test(
fn_tree="torch.cross",
dtype_input_other_dim=dtype_value1_value2_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=10,
min_dim_size=3,
max_dim_size=3,
min_value=-1e5,
max_value=1e5,
abs_smallest_val=0.01,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
)
def test_torch_cross(
dtype_input_other_dim,
frontend,
test_flags,
fn_tree,
backend_fw,
):
dtype, input, other, dim = dtype_input_other_dim
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
rtol=1e-2,
atol=1e-2,
input=input,
other=other,
dim=dim,
)
# cummax
@handle_frontend_test(
fn_tree="torch.cummax",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=2,
min_value=-100,
max_value=100,
valid_axis=True,
allow_neg_axes=False,
max_axes_size=1,
force_int_axis=True,
),
dtype=helpers.get_dtypes("float", none=True, full=False),
)
def test_torch_cummax(
*,
dtype_x_axis,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
if ivy.current_backend_str() == "torch":
test_flags.as_variable = [False]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
)
# cumprod
@handle_frontend_test(
fn_tree="torch.cumprod",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=5,
min_value=-100,
max_value=100,
valid_axis=True,
allow_neg_axes=False,
max_axes_size=1,
force_int_axis=True,
),
dtype=helpers.get_dtypes("numeric", none=True, full=False),
)
def test_torch_cumprod(
*,
dtype_x_axis,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
# ToDo: set as_variable_flags as the parameter generated by test_torch_cumsum once
# this issue is marked as completed https://github.com/pytorch/pytorch/issues/75733
if ivy.current_backend_str() == "torch":
test_flags.as_variable = [False]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
dtype=dtype[0],
)
# cumsum
@handle_frontend_test(
fn_tree="torch.cumsum",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=5,
valid_axis=True,
allow_neg_axes=False,
max_axes_size=1,
force_int_axis=True,
),
dtype=helpers.get_dtypes("numeric", none=True, full=False),
)
def test_torch_cumsum(
*,
dtype_x_axis,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
# ToDo: set as_variable_flags as the parameter generated by test_torch_cumsum once
# this issue is marked as completed https://github.com/pytorch/pytorch/issues/75733
if ivy.current_backend_str() == "torch":
test_flags.as_variable = [False]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
dtype=dtype[0],
)
# diag
@handle_frontend_test(
fn_tree="torch.diag",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(min_num_dims=1, max_num_dims=2), key="shape"),
),
diagonal=st.integers(min_value=-100, max_value=100),
)
def test_torch_diag(
*,
dtype_and_values,
diagonal,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, values = dtype_and_values
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=values[0],
diagonal=diagonal,
)
# diagflat
@handle_frontend_test(
fn_tree="torch.diagflat",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
offset=st.integers(min_value=-4, max_value=4),
test_with_out=st.just(False),
)
def test_torch_diagflat(
dtype_and_values,
offset,
test_flags,
backend_fw,
frontend,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
x=x[0],
offset=offset,
)
@handle_frontend_test(
fn_tree="torch.diagonal",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
),
dims_and_offset=helpers.dims_and_offset(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape")
),
)
def test_torch_diagonal(
*,
dtype_and_values,
dims_and_offset,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
dim1, dim2, offset = dims_and_offset
input = value[0]
num_dims = len(np.shape(input))
assume(dim1 != dim2)
if dim1 < 0:
assume(dim1 + num_dims != dim2)
if dim2 < 0:
assume(dim1 != dim2 + num_dims)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input,
offset=offset,
dim1=dim1,
dim2=dim2,
)
# diff
@handle_frontend_test(
fn_tree="torch.diff",
dtype_n_x_n_axis=helpers.dtype_values_axis(
available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
),
n=st.integers(min_value=0, max_value=5),
dtype_prepend=helpers.dtype_and_values(
available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"),
min_num_dims=1,
max_num_dims=1,
),
dtype_append=helpers.dtype_and_values(
available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"),
min_num_dims=1,
max_num_dims=1,
),
)
def test_torch_diff(
*,
dtype_n_x_n_axis,
n,
dtype_prepend,
dtype_append,
test_flags,
frontend,
backend_fw,
fn_tree,
):
input_dtype, x, axis = dtype_n_x_n_axis
_, prepend = dtype_prepend
_, append = dtype_append
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
input=x[0],
n=n,
dim=axis,
prepend=prepend[0],
append=append[0],
)
# einsum
@handle_frontend_test(
fn_tree="torch.einsum",
eq_n_op_n_shp=helpers.einsum_helper(),
)
def test_torch_einsum(
*,
eq_n_op_n_shp,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
eq, operands, dtypes = eq_n_op_n_shp
kw = {}
for i, x_ in enumerate(operands):
dtype = dtypes[i]
kw[f"x{i}"] = np.array(x_).astype(dtype)
test_flags.num_positional_args = len(operands) + 1
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
equation=eq,
**kw,
)
# erfinv
@handle_frontend_test(
fn_tree="torch.special.erfinv",
aliases=["torch.erfinv"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-1,
max_value=1,
abs_smallest_val=1e-05,
),
)
def test_torch_erfinv(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@handle_frontend_test(
fn_tree="torch.flatten",
dtype_input_axes=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
valid_axis=True,
min_num_dims=1,
min_axes_size=2,
max_axes_size=2,
),
)
def test_torch_flatten(
*,
dtype_input_axes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input, axes = dtype_input_axes
if isinstance(axes, int):
start_dim = axes
end_dim = -1
else:
start_dim = axes[0]
end_dim = axes[1]
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
start_dim=start_dim,
end_dim=end_dim,
)
# flip
@handle_frontend_test(
fn_tree="torch.flip",
dtype_and_values=helpers.dtype_and_values(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
available_dtypes=helpers.get_dtypes("float"),
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
force_tuple=True,
),
test_with_copy=st.just(True),
)
def test_torch_flip(
*,
dtype_and_values,
axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
dims=axis,
)
# fliplr
@handle_frontend_test(
fn_tree="torch.fliplr",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=helpers.get_shape(min_num_dims=2),
),
test_with_copy=st.just(True),
)
def test_torch_fliplr(
*,
dtype_and_values,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
)
# flipud
@handle_frontend_test(
fn_tree="torch.flipud",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=helpers.get_shape(min_num_dims=1),
),
test_with_copy=st.just(True),
)
def test_torch_flipud(
*,
dtype_and_values,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
)
# gcd
@handle_frontend_test(
fn_tree="torch.gcd",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
num_arrays=2,
shared_dtype=True,
),
)
def test_torch_gcd(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
# kron
@handle_frontend_test(
fn_tree="torch.kron",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), num_arrays=2
),
)
def test_torch_kron(
dtype_and_x,
frontend,
fn_tree,
test_flags,
backend_fw,
on_device,
):
input_dtypes, x = dtype_and_x
input, label = x[0], x[1]
helpers.test_frontend_function(
input_dtypes=["float32"],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input,
other=label,
)
# lcm
@handle_frontend_test(
fn_tree="torch.lcm",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
allow_nan=False,
),
)
def test_torch_lcm(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
# logcumsumexp
@handle_frontend_test(
fn_tree="torch.logcumsumexp",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=st.shared(helpers.get_shape(), key="shape"),
max_value=100,
min_value=-100,
),
dim=helpers.get_axis(
shape=st.shared(helpers.get_shape(), key="shape"), force_int=True
),
)
def test_torch_logcumsumexp(
*,
dtype_and_input,
dim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input = dtype_and_input
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
input=input[0],
dim=dim,
)
# meshgrid
@handle_frontend_test(
fn_tree="torch.meshgrid",
dtypes_and_tensors=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=st.integers(min_value=2, max_value=5),
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
max_dim_size=5,
shared_dtype=True,
),
indexing=st.sampled_from(["ij", "xy"]),
)
def test_torch_meshgrid(
*,
dtypes_and_tensors,
indexing,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, tensors = dtypes_and_tensors
kwargs = {
f"tensor{i}": np.array(tensor, dtype=dtypes[i])
for i, tensor in enumerate(tensors)
}
kwargs["indexing"] = indexing
test_flags.num_positional_args = len(tensors)
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**kwargs,
)
# ravel
@handle_frontend_test(
fn_tree="torch.ravel",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
),
)
def test_torch_ravel(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=np.asarray(x[0], dtype=input_dtype[0]),
)
# renorm
@handle_frontend_test(
fn_tree="torch.renorm",
dtype_and_values=helpers.dtype_and_values(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
available_dtypes=helpers.get_dtypes("numeric"),
max_value=1e4,
min_value=-1e4,
),
dim=helpers.get_axis(
shape=st.shared(helpers.get_shape(), key="shape"),
force_int=True,
),
p=st.floats(
min_value=0.5,
exclude_min=True,
max_value=5,
), # Non-positive norms aren't supported in backends.
# Small positive norms cause issues due to finite-precision.
maxnorm=st.floats(min_value=0), # Norms are positive semi-definite
)
def test_torch_renorm(
*,
dtype_and_values,
p,
dim,
maxnorm,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, values = dtype_and_values
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-02,
input=values[0],
p=p,
dim=dim,
maxnorm=maxnorm,
)
# repeat_interleave
@handle_frontend_test(
fn_tree="torch.repeat_interleave",
dtype_values_repeats_axis_output_size=_get_repeat_interleaves_args(
available_dtypes=helpers.get_dtypes("valid"),
valid_axis=True,
max_num_dims=4,
max_dim_size=4,
),
)
def test_torch_repeat_interleave(
*,
dtype_values_repeats_axis_output_size,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, values, repeats, axis, output_size = dtype_values_repeats_axis_output_size
helpers.test_frontend_function(
input_dtypes=dtype[0],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=values[0],
repeats=repeats[0],
dim=axis,
output_size=output_size,
)
# roll
@handle_frontend_test(
fn_tree="torch.roll",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
),
shift=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
force_tuple=True,
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
force_tuple=True,
),
)
def test_torch_roll(
*,
dtype_and_values,
shift,
axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
if isinstance(shift, int) and isinstance(axis, tuple):
axis = axis[0]
if isinstance(shift, tuple) and isinstance(axis, tuple):
if len(shift) != len(axis):
mn = min(len(shift), len(axis))
shift = shift[:mn]
axis = axis[:mn]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
shifts=shift,
dims=axis,
)
# rot90
@handle_frontend_test(
fn_tree="torch.rot90",
dtype_and_x=helpers.dtype_and_values(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
available_dtypes=helpers.get_dtypes("numeric"),
),
dims=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
min_size=2,
max_size=2,
unique=True,
allow_neg=False,
force_tuple=True,
),
k=st.integers(min_value=-10, max_value=10),
)
def test_torch_rot90(
*,
dtype_and_x,
dims,
k,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
k=k,
dims=dims,
)
@handle_frontend_test(
fn_tree="torch.searchsorted",
dtype_x_v=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shared_dtype=True,
min_num_dims=1,
max_num_dims=1,
num_arrays=2,
),
side=st.sampled_from(["left", "right", None]),
out_int32=st.booleans(),
right=st.sampled_from([True, False, None]),
test_with_out=st.booleans(),
)
def test_torch_searchsorted(
dtype_x_v,
side,
out_int32,
right,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
potential_kwargs = {}
if side == "left" and right:
right = None # this combo will cause an exception
if side is not None:
potential_kwargs["side"] = side
if right is not None:
potential_kwargs["right"] = right
input_dtypes, xs = dtype_x_v
use_sorter = st.booleans()
if use_sorter:
sorter = np.argsort(xs[0])
sorter = np.array(sorter, dtype=np.int64)
else:
xs[0] = np.sort(xs[0])
sorter = None
helpers.test_frontend_function(
input_dtypes=input_dtypes + ["int64"],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
sorted_sequence=xs[0],
values=xs[1],
out_int32=out_int32,
sorter=sorter,
**potential_kwargs,
)
@handle_frontend_test(
fn_tree="torch.tensordot",
dtype_values_and_axes=_get_dtype_value1_value2_axis_for_tensordot(
helpers.get_dtypes(kind="float"),
min_value=-10,
max_value=10,
),
)
def test_torch_tensordot(
dtype_values_and_axes,
test_flags,
frontend,
backend_fw,
fn_tree,
):
dtype, a, b, dims = dtype_values_and_axes
if ivy.current_backend_str() == "paddle":
# Paddle only supports ndim from 0 to 9
assume(a.shape[0] < 10)
assume(b.shape[0] < 10)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
a=a,
b=b,
rtol=1e-2,
atol=1e-2,
dims=dims,
)
# trace
@handle_frontend_test(
fn_tree="torch.trace",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=st.shared(helpers.get_shape(min_num_dims=2, max_num_dims=2), key="shape"),
),
)
def test_torch_trace(
*,
dtype_and_values,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, value = dtype_and_values
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
)
# tril
@handle_frontend_test(
fn_tree="torch.tril",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2, # Torch requires this.
),
diagonal=st.integers(min_value=-100, max_value=100),
)
def test_torch_tril(
*,
dtype_and_values,
diagonal,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, values = dtype_and_values
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=values[0],
diagonal=diagonal,
)
# tril_indices
@handle_frontend_test(
fn_tree="torch.tril_indices",
row=st.integers(min_value=1, max_value=10),
col=st.integers(min_value=1, max_value=10),
offset=st.integers(min_value=-8, max_value=8),
dtype=helpers.get_dtypes("integer", full=False),
)
def test_torch_tril_indices(
*,
row,
col,
offset,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=[ivy.int32],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
row=row,
col=col,
offset=offset,
dtype=dtype[0],
)
@handle_frontend_test(
fn_tree="torch.triu",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2, # Torch requires this.
),
diagonal=st.integers(min_value=-100, max_value=100),
)
def test_torch_triu(
*,
dtype_and_values,
diagonal,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, values = dtype_and_values
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=values[0],
diagonal=diagonal,
)
@handle_frontend_test(
fn_tree="torch.triu_indices",
row=st.integers(min_value=1, max_value=100),
col=st.integers(min_value=1, max_value=100),
offset=st.integers(min_value=-10, max_value=10),
)
def test_torch_triu_indices(
*,
row,
col,
offset,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=["int32"],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
row=row,
col=col,
offset=offset,
)
# unflatten
@handle_frontend_test(
fn_tree="torch.unflatten",
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
shape_key="shape",
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
force_int=True,
),
)
def test_torch_unflatten(
*,
dtype_and_values,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
shape,
axis,
):
dtype, x = dtype_and_values
sizes = sizes_(shape, axis)
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
input=x[0],
dim=axis,
sizes=sizes,
)
# vander
@handle_frontend_test(
fn_tree="torch.vander",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.tuples(
st.integers(min_value=1, max_value=5),
),
min_num_dims=0,
max_num_dims=5,
),
N=st.integers(min_value=1, max_value=10) | st.none(),
increasing=st.booleans(),
)
def test_torch_vander(
*,
dtype_and_x,
N,
increasing,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=np.asarray(x[0], dtype=input_dtype[0]),
N=N,
increasing=increasing,
)
# view_as_complex
@handle_frontend_test(
fn_tree="torch.view_as_complex",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(complex_strategy()),
),
)
def test_torch_view_as_complex(
*,
dtype_and_values,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, value = dtype_and_values
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
)
# view_as_real
@handle_frontend_test(
fn_tree="torch.view_as_real",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
),
)
def test_torch_view_as_real(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=np.asarray(x[0], dtype=input_dtype[0]),
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_miscellaneous_ops.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_miscellaneous_ops.py",
"repo_id": "ivy",
"token_count": 24282
} | 69 |
# global
from hypothesis import strategies as st
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_statistical import (
_statistical_dtype_values,
_get_castable_dtype,
)
from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_statistical import ( # noqa
_quantile_helper,
)
# --- Helpers --- #
# --------------- #
@st.composite
def _get_axis_and_p(draw, kind="valid"):
p = draw(st.sampled_from(["fro", "nuc", 1, 2, -1, -2, float("inf"), -float("inf")]))
if p in ["fro", "nuc"]:
max_axes_size = 2
min_axes_size = 2
else:
min_axes_size = 1
max_axes_size = 5
dtype_x_axis = draw(
helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes(kind),
min_num_dims=2,
valid_axis=True,
min_value=-1e04,
max_value=1e04,
min_axes_size=min_axes_size,
max_axes_size=max_axes_size,
large_abs_safety_factor=2,
safety_factor_scale="log",
force_int_axis=True,
)
)
input_dtype, x, axis = dtype_x_axis
if type(input_dtype[0]) == str: # noqa: E721
if "complex" in input_dtype[0]:
kind = "complex"
if "float" in input_dtype[0]:
kind = "float"
else:
if input_dtype[0].is_complex_dtype:
kind = "complex"
if input_dtype[0].is_float_dtype:
kind = "float"
dtype = draw(helpers.get_dtypes(kind, full=False))
dtype = dtype[0]
if ivy.can_cast(input_dtype[0], dtype):
dtype = ivy.promote_types(input_dtype[0], dtype)
else:
dtype = input_dtype[0]
return p, input_dtype, x, axis, dtype
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="torch.all",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
allow_inf=False,
),
keepdims=st.booleans(),
)
def test_torch_all(
*,
dtype_input_axis,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
)
@handle_frontend_test(
fn_tree="torch.amax",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
min_axis=-1,
max_axis=0,
),
keepdims=st.booleans(),
)
def test_torch_amax(
*,
dtype_input_axis,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
)
@handle_frontend_test(
fn_tree="torch.amin",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
min_axis=-1,
max_axis=0,
),
keepdims=st.booleans(),
)
def test_torch_amin(
*,
dtype_input_axis,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
)
@handle_frontend_test(
fn_tree="torch.aminmax",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
min_axis=-1,
max_axis=0,
),
keepdims=st.booleans(),
)
def test_torch_aminmax(
*,
dtype_input_axis,
keepdims,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
)
@handle_frontend_test(
fn_tree="torch.any",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
safety_factor_scale="log",
small_abs_safety_factor=8,
large_abs_safety_factor=8,
min_axis=-1,
max_axis=0,
min_num_dims=1,
allow_inf=False,
),
keepdims=st.booleans(),
)
def test_torch_any(
*,
dtype_input_axis,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
)
@handle_frontend_test(
fn_tree="torch.argmax",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
force_int_axis=True,
valid_axis=True,
),
keepdims=st.booleans(),
)
def test_torch_argmax(
*,
dtype_input_axis,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
)
@handle_frontend_test(
fn_tree="torch.argmin",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
force_int_axis=True,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
min_value=1,
max_value=5,
valid_axis=True,
allow_neg_axes=True,
),
keepdims=st.booleans(),
)
def test_torch_argmin(
*,
dtype_input_axis,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
)
@handle_frontend_test(
fn_tree="torch.count_nonzero",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
force_int_axis=True,
min_num_dims=1,
min_axis=-1,
max_axis=0,
),
)
def test_torch_count_nonzero(
*,
dtype_input_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
)
@handle_frontend_test(
fn_tree="torch.dist",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_value=-1e04,
max_value=1e04,
allow_inf=False,
),
p=helpers.floats(min_value=1.0, max_value=10.0),
)
def test_torch_dist(
*,
dtype_and_input,
p,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
other=input[1],
p=p,
)
@handle_frontend_test(
fn_tree="torch.logsumexp",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=-50,
max_value=50,
min_num_dims=1,
max_num_dims=5,
valid_axis=True,
force_int_axis=True,
),
keepdims=st.booleans(),
)
def test_torch_logsumexp(
*,
dtype_input_axis,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
)
@handle_frontend_test(
fn_tree="torch.max",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
num_arrays=st.integers(min_value=1, max_value=2),
),
keepdim=st.booleans(),
)
def test_torch_max(
*,
dtype_input_axis,
keepdim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input, axis = dtype_input_axis
inputs = {f"input{i}": input[i] for i in range(len(input))}
kwargs = {"dim": axis, "keepdim": keepdim} if len(inputs) == 1 else {}
test_flags.num_positional_args = len(input)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**inputs,
**kwargs,
)
@handle_frontend_test(
fn_tree="torch.mean",
dtype_and_x=_statistical_dtype_values(
function="mean",
min_value=-1e04,
max_value=1e04,
),
keepdims=st.booleans(),
dtypes=helpers.get_dtypes("float_and_complex", none=True, full=False),
)
def test_torch_mean(
*,
dtype_and_x,
keepdims,
dtypes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis, *_ = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
dtype=dtypes[0],
atol=1e-2,
)
@handle_frontend_test(
fn_tree="torch.median",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
),
keepdim=st.booleans(),
)
def test_torch_median(
*,
dtype_input_axis,
keepdim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input, dim = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
dim=dim,
keepdim=keepdim,
)
# min
@handle_frontend_test(
fn_tree="torch.min",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
num_arrays=st.integers(min_value=1, max_value=2),
),
keepdim=st.booleans(),
)
def test_torch_min(
*,
dtype_input_axis,
keepdim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input, axis = dtype_input_axis
inputs = {f"input{i}": input[i] for i in range(len(input))}
kwargs = {"dim": axis, "keepdim": keepdim} if len(inputs) == 1 else {}
test_flags.num_positional_args = len(input)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**inputs,
**kwargs,
)
# moveaxis
@handle_frontend_test(
fn_tree="torch.moveaxis",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-100,
max_value=100,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
),
source=helpers.get_axis(
allow_none=False,
unique=True,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
min_size=1,
force_int=True,
),
destination=helpers.get_axis(
allow_none=False,
unique=True,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
min_size=1,
force_int=True,
),
)
def test_torch_moveaxis(
*,
dtype_and_a,
source,
destination,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=a[0],
source=source,
destination=destination,
)
@handle_frontend_test(
fn_tree="torch.nanmean",
dtype_and_x=_statistical_dtype_values(
function="nanmean",
min_value=-1e04,
max_value=1e04,
),
keepdims=st.booleans(),
)
def test_torch_nanmean(
*,
dtype_and_x,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis, *_ = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
)
@handle_frontend_test(
fn_tree="torch.nanmedian",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
),
keepdim=st.booleans(),
)
def test_torch_nanmedian(
*,
dtype_input_axis,
keepdim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input, dim = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
dim=dim,
keepdim=keepdim,
)
@handle_frontend_test(
fn_tree="torch.nansum",
dtype_and_x=_get_castable_dtype(
min_value=-1e04,
max_value=1e04,
),
keepdims=st.booleans(),
)
def test_torch_nansum(
*,
dtype_and_x,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis, castable_dtype = dtype_and_x
if test_flags.as_variable:
castable_dtype = input_dtype
input_dtype = [input_dtype]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
dtype=castable_dtype,
)
# norm
@handle_frontend_test(
fn_tree="torch.norm",
p_dtype_x_axis=_get_axis_and_p(),
keepdim=st.booleans(),
)
def test_torch_norm(
*,
p_dtype_x_axis,
keepdim,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
p, x_dtype, x, axis, dtype = p_dtype_x_axis
helpers.test_frontend_function(
backend_to_test=backend_fw,
input_dtypes=x_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
atol=1e-08,
input=x[0],
p=p,
dim=axis,
keepdim=keepdim,
out=None,
dtype=dtype,
)
# prod
@handle_frontend_test(
fn_tree="torch.prod",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=5,
valid_axis=True,
allow_neg_axes=False,
max_axes_size=1,
force_int_axis=True,
large_abs_safety_factor=10,
small_abs_safety_factor=10,
safety_factor_scale="log",
),
dtype=helpers.get_dtypes("numeric", none=True, full=False),
keepdims=st.booleans(),
)
def test_torch_prod(
*,
dtype_x_axis,
dtype,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
# ToDo: set as_variable_flags as the parameter generated by test_torch_prod once
# this issue is marked as completed https://github.com/pytorch/pytorch/issues/75733
if ivy.current_backend_str() == "torch":
test_flags.as_variable = [False]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
dtype=dtype[0],
)
@handle_frontend_test(
fn_tree="torch.quantile",
dtype_and_x=_quantile_helper(),
keepdims=st.booleans(),
)
def test_torch_quantile(
*,
dtype_and_x,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis, interpolation, q = dtype_and_x
if type(axis) is tuple:
axis = axis[0]
if interpolation == "nearest_jax":
interpolation = "nearest"
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
q=q,
dim=axis,
keepdim=keepdims,
interpolation=interpolation[0],
)
@handle_frontend_test(
fn_tree="torch.std",
dtype_and_x=_statistical_dtype_values(function="std"),
keepdims=st.booleans(),
)
def test_torch_std(
*,
dtype_and_x,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis, correction = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
unbiased=bool(correction),
keepdim=keepdims,
)
@handle_frontend_test(
fn_tree="torch.std_mean",
dtype_and_x=_statistical_dtype_values(
function="std_mean",
min_value=-1e04,
max_value=1e04,
),
keepdims=st.booleans(),
)
def test_torch_std_mean(
*,
dtype_and_x,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis, correction = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
unbiased=bool(correction),
keepdim=keepdims,
)
@handle_frontend_test(
fn_tree="torch.sum",
dtype_and_x=_get_castable_dtype(
min_value=-1e04,
max_value=1e04,
),
keepdims=st.booleans(),
)
def test_torch_sum(
*,
dtype_and_x,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis, castable_dtype = dtype_and_x
if test_flags.as_variable:
castable_dtype = input_dtype
input_dtype = [input_dtype]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
keepdim=keepdims,
dtype=castable_dtype,
)
@handle_frontend_test(
fn_tree="torch.unique",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
force_int_axis=True,
valid_axis=True,
),
return_inverse=st.booleans(),
return_counts=st.booleans(),
sorted=st.booleans(),
)
def test_torch_unique(
*,
dtype_x_axis,
return_inverse,
return_counts,
sorted,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
sorted=sorted,
return_inverse=return_inverse,
return_counts=return_counts,
dim=axis,
)
# known bug of returning empty tensors when ret_inv or ret_counts is passed positionally
# https://github.com/pytorch/pytorch/issues/68610
# ToDo: activate test_values when this is resolved
@handle_frontend_test(
fn_tree="torch.unique_consecutive",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=2,
force_int_axis=True,
valid_axis=True,
),
ret_inv=st.booleans(),
ret_counts=st.booleans(),
)
def test_torch_unique_consecutive(
*,
dtype_x_axis,
ret_inv,
ret_counts,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
return_inverse=ret_inv,
return_counts=ret_counts,
dim=axis,
test_values=False,
)
@handle_frontend_test(
fn_tree="torch.var",
dtype_and_x=_statistical_dtype_values(
function="var",
min_value=-1e04,
max_value=1e04,
),
keepdims=st.booleans(),
)
def test_torch_var(
*,
dtype_and_x,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis, correction = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
unbiased=bool(correction),
keepdim=keepdims,
)
@handle_frontend_test(
fn_tree="torch.var_mean",
dtype_and_x=_statistical_dtype_values(
function="var_mean",
min_value=-1e04,
max_value=1e04,
),
keepdims=st.booleans(),
)
def test_torch_var_mean(
*,
dtype_and_x,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis, correction = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=axis,
unbiased=bool(correction),
keepdim=keepdims,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_reduction_ops.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_reduction_ops.py",
"repo_id": "ivy",
"token_count": 13297
} | 70 |
"""Collection of tests for unified dtype functions."""
# global
import numpy as np
from hypothesis import strategies as st
import typing
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test, BackendHandler
# for data generation in multiple tests
dtype_shared = helpers.get_dtypes("valid", full=False, key="dtype")
# --- Helpers --- #
# --------------- #
@st.composite
def _array_or_type(draw, float_or_int):
valid_dtypes = {
"float": draw(helpers.get_dtypes("float")),
"int": draw(helpers.get_dtypes("integer")),
}[float_or_int]
return draw(
st.sampled_from(
(
draw(
helpers.dtype_and_values(
available_dtypes=valid_dtypes,
)
),
draw(st.sampled_from(valid_dtypes)),
)
)
)
def _composition_1():
return ivy.relu().argmax()
def _composition_2():
a = ivy.floor
return ivy.ceil() or a
# Array API Standard Function Tests #
# --------------------------------- #
@st.composite
def astype_helper(draw):
dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
small_abs_safety_factor=4,
large_abs_safety_factor=4,
safety_factor_scale="log",
)
)
cast_dtype = draw(
helpers.get_castable_dtype(draw(helpers.get_dtypes("valid")), dtype[0], x)
)
return dtype, x, cast_dtype
# broadcast arrays
@st.composite
def broadcastable_arrays(draw, dtypes):
num_arrays = st.shared(helpers.ints(min_value=2, max_value=5), key="num_arrays")
shapes = draw(num_arrays.flatmap(helpers.mutually_broadcastable_shapes))
dtypes = draw(dtypes)
arrays = []
for c, (shape, dtype) in enumerate(zip(shapes, dtypes), 1):
x = draw(helpers.array_values(dtype=dtype, shape=shape), label=f"x{c}").tolist()
arrays.append(x)
return arrays
@st.composite
def dtypes_list(draw):
num = draw(st.one_of(helpers.ints(min_value=1, max_value=5)))
return draw(
st.lists(
st.sampled_from(ivy.valid_dtypes),
min_size=num,
max_size=num,
)
)
@st.composite
def dtypes_shared(draw, num_dtypes):
if isinstance(num_dtypes, str):
num_dtypes = draw(st.shared(helpers.ints(), key=num_dtypes))
return draw(
st.shared(
st.lists(
st.sampled_from(draw(helpers.get_dtypes("valid"))),
min_size=num_dtypes,
max_size=num_dtypes,
),
key="dtypes",
)
)
# --- Main --- #
# ------------ #
# as_ivy_dtype
@handle_test(
fn_tree="functional.ivy.as_ivy_dtype",
input_dtype=helpers.get_dtypes("valid", full=False),
)
def test_as_ivy_dtype(
*,
input_dtype,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
input_dtype = input_dtype[0]
res = ivy_backend.as_ivy_dtype(input_dtype)
if isinstance(input_dtype, str):
assert isinstance(res, str)
return
assert isinstance(
input_dtype, (ivy_backend.Dtype, str)
), f"input_dtype={input_dtype!r}, but should be str or ivy.Dtype"
assert isinstance(res, str), f"result={res!r}, but should be str"
# as_native_dtype
@handle_test(
fn_tree="functional.ivy.as_native_dtype",
input_dtype=helpers.get_dtypes("valid", full=False),
)
def test_as_native_dtype(
*,
input_dtype,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
input_dtype = input_dtype[0]
res = ivy_backend.as_native_dtype(input_dtype)
if isinstance(input_dtype, ivy_backend.NativeDtype):
assert isinstance(res, ivy_backend.NativeDtype)
return
assert isinstance(
input_dtype, (ivy_backend.Dtype, str)
), f"input_dtype={input_dtype!r}, but should be str or ivy.Dtype"
assert isinstance(
res, ivy_backend.NativeDtype
), f"result={res!r}, but should be ivy.NativeDtype"
# astype
@handle_test(
fn_tree="functional.ivy.astype",
dtype_and_x_and_cast_dtype=astype_helper(),
test_gradients=st.just(False),
test_with_copy=st.just(True),
)
def test_astype(
*, dtype_and_x_and_cast_dtype, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x, cast_dtype = dtype_and_x_and_cast_dtype
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-3,
atol_=1e-3,
x=x[0],
dtype=cast_dtype[0],
)
@handle_test(
fn_tree="functional.ivy.broadcast_arrays",
arrays=broadcastable_arrays(dtypes_shared("num_arrays")),
input_dtypes=dtypes_shared("num_arrays"),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_broadcast_arrays(
*, arrays, input_dtypes, test_flags, backend_fw, fn_name, on_device
):
if backend_fw == "torch":
for input_dtype in input_dtypes:
if input_dtype == "bfloat16" or (
"uint" in input_dtype and "uint8" not in input_dtype
):
# Torch has no inference strategy for bfloat16
# Torch has no support for uint above uint8
return
kw = {}
for i, (array, dtype) in enumerate(zip(arrays, input_dtypes)):
kw[f"x{i}"] = np.asarray(array, dtype=dtype)
test_flags.num_positional_args = len(kw)
helpers.test_function(
input_dtypes=input_dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
on_device=on_device,
fn_name=fn_name,
**kw,
)
@handle_test(
fn_tree="functional.ivy.broadcast_to",
array_and_shape=helpers.array_and_broadcastable_shape(dtype_shared),
input_dtype=dtype_shared,
test_gradients=st.just(False),
)
def test_broadcast_to(
*, array_and_shape, input_dtype, test_flags, backend_fw, fn_name, on_device
):
array, to_shape = array_and_shape
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=array,
shape=to_shape,
)
# can_cast
@handle_test(
fn_tree="functional.ivy.can_cast",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=1
),
to_dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_can_cast(*, dtype_and_x, to_dtype, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
from_=x[0],
to=to_dtype[0],
)
# closest_valid_dtypes
@handle_test(
fn_tree="functional.ivy.closest_valid_dtype",
input_dtype=helpers.get_dtypes("valid", full=False),
)
def test_closest_valid_dtype(
*, input_dtype, test_flags, backend_fw, fn_name, on_device
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
input_dtype = input_dtype[0]
res = ivy_backend.closest_valid_dtype(input_dtype)
assert isinstance(input_dtype, (ivy_backend.Dtype, str))
assert isinstance(
res, (ivy_backend.Dtype, str)
), f"result={res!r}, but should be str or ivy.Dtype"
# default_complex_dtype
@handle_test(
fn_tree="functional.ivy.default_complex_dtype",
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("complex")),
as_native=st.booleans(),
test_gradients=st.just(False),
)
def test_default_complex_dtype(
*,
dtype_x,
as_native,
backend_fw,
):
complex_dtype, x = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
res = ivy_backend.default_complex_dtype(
input=input,
complex_dtype=complex_dtype[0],
as_native=as_native,
)
assert isinstance(
res,
(
ivy_backend.Dtype,
typing.get_args(ivy_backend.NativeDtype),
ivy_backend.NativeDtype,
str,
),
)
assert (
ivy_backend.default_complex_dtype(
input=None, complex_dtype=None, as_native=False
)
== ivy_backend.complex64
)
assert (
ivy_backend.default_complex_dtype(complex_dtype=ivy_backend.complex64)
== ivy_backend.complex64
)
assert ivy_backend.default_complex_dtype() == ivy_backend.complex64
# default_dtype
@handle_test(
fn_tree="functional.ivy.default_dtype",
input_dtype=helpers.get_dtypes("valid", full=False),
as_native=st.booleans(),
)
def test_default_dtype(
*,
input_dtype,
as_native,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
input_dtype = input_dtype[0]
res = ivy_backend.default_dtype(dtype=input_dtype, as_native=as_native)
assert isinstance(
input_dtype, (ivy_backend.Dtype, str, ivy_backend.NativeDtype)
)
assert isinstance(res, ivy_backend.Dtype) or isinstance(
input_dtype, str
), f"input_dtype={input_dtype!r}, but should be str or ivy.Dtype"
# default_float_dtype
@handle_test(
fn_tree="functional.ivy.default_float_dtype",
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
as_native=st.booleans(),
test_gradients=st.just(False),
)
def test_default_float_dtype(
*,
dtype_x,
as_native,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
float_dtype, x = dtype_x
res = ivy_backend.default_float_dtype(
input=input,
float_dtype=float_dtype[0],
as_native=as_native,
)
assert isinstance(
res,
(
ivy_backend.Dtype,
typing.get_args(ivy_backend.NativeDtype),
ivy_backend.NativeDtype,
str,
),
)
assert (
ivy_backend.default_float_dtype(
input=None, float_dtype=None, as_native=False
)
== ivy_backend.float32
)
assert (
ivy_backend.default_float_dtype(float_dtype=ivy_backend.float16)
== ivy_backend.float16
)
assert ivy_backend.default_float_dtype() == ivy_backend.float32
# default_int_dtype
@handle_test(
fn_tree="functional.ivy.default_int_dtype",
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("integer")),
as_native=st.booleans(),
test_gradients=st.just(False),
)
def test_default_int_dtype(
*,
dtype_x,
as_native,
backend_fw,
):
int_dtype, x = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
res = ivy_backend.default_int_dtype(
input=input,
int_dtype=int_dtype[0],
as_native=as_native,
)
assert isinstance(
res,
(
ivy_backend.Dtype,
typing.get_args(ivy_backend.NativeDtype),
ivy_backend.NativeDtype,
str,
),
)
assert (
ivy_backend.default_int_dtype(input=None, int_dtype=None, as_native=False)
== ivy_backend.int32
)
assert (
ivy_backend.default_int_dtype(int_dtype=ivy_backend.int16)
== ivy_backend.int16
)
assert ivy_backend.default_int_dtype() == ivy_backend.int32
# dtype
# TODO: fix instance method
@handle_test(
fn_tree="functional.ivy.dtype",
array=helpers.array_values(
dtype=dtype_shared,
shape=helpers.lists(
x=helpers.ints(min_value=1, max_value=5),
min_size="num_dims",
max_size="num_dims",
size_bounds=[1, 5],
),
),
input_dtype=dtype_shared,
as_native=st.booleans(),
test_with_out=st.just(False),
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_dtype(
*, array, input_dtype, as_native, test_flags, backend_fw, fn_name, on_device
):
if backend_fw == "torch":
if input_dtype == "bfloat16" or (
"uint" in input_dtype and "uint8" not in input_dtype
):
# Torch has no inference strategy for bfloat16
# Torch has no support for uint above uint8
return
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=array,
as_native=as_native,
test_values=False,
)
# dtype_bits
# TODO: fix instance method
@handle_test(
fn_tree="functional.ivy.dtype_bits",
input_dtype=helpers.get_dtypes("valid", full=False),
test_instance_method=st.just(False),
container_flags=st.just([False]),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_dtype_bits(*, input_dtype, test_flags, backend_fw, fn_name, on_device):
ret = helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
dtype_in=input_dtype[0],
test_values=False,
)
if not ivy.exists(ret):
return
num_bits, num_bits_np = ret
assert num_bits == num_bits_np
# dtype objects
@handle_test(fn_tree="functional.ivy.exists") # dummy fn_tree
def test_dtype_instances(backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
assert ivy_backend.exists(ivy_backend.int8)
assert ivy_backend.exists(ivy_backend.int16)
assert ivy_backend.exists(ivy_backend.int32)
assert ivy_backend.exists(ivy_backend.int64)
assert ivy_backend.exists(ivy_backend.uint8)
if backend_fw not in ["torch", "paddle", "mxnet"]:
assert ivy_backend.exists(ivy_backend.uint16)
assert ivy_backend.exists(ivy_backend.uint32)
assert ivy_backend.exists(ivy_backend.uint64)
assert ivy_backend.exists(ivy_backend.float32)
assert ivy_backend.exists(ivy_backend.float64)
assert ivy_backend.exists(ivy_backend.complex64)
assert ivy_backend.exists(ivy_backend.complex128)
assert ivy_backend.exists(ivy_backend.bool)
# finfo
@handle_test(
fn_tree="functional.ivy.finfo",
type=_array_or_type("float"),
test_with_out=st.just(False),
as_variable_flags=st.just([False]),
native_array_flags=st.just([False]),
container_flags=st.just([False]),
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_finfo(*, type, test_flags, backend_fw, fn_name, on_device):
if isinstance(type, str):
input_dtype = [type]
else:
input_dtype, x = type
type = x[0]
ret = helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
type=type,
test_values=False,
)
if not ivy.exists(ret):
return
mach_lims, mach_lims_np = ret
assert np.allclose(mach_lims.min, mach_lims_np.min, rtol=1e-2, atol=1e-2)
assert np.allclose(mach_lims.max, mach_lims_np.max, rtol=1e-2, atol=1e-2)
assert np.allclose(mach_lims.eps, mach_lims_np.eps, rtol=1e-2, atol=1e-2)
assert mach_lims.bits == mach_lims_np.bits
# function_dtype_versioning
@handle_test(
fn_tree="functional.ivy.function_unsupported_dtypes", # dummy fn_tree
func_and_version=st.just(
[
{
"torch": {
"cumsum": {
"2.0.1": {"bfloat16", "uint8", "float16"},
"1.12.1": set(),
}
}
},
],
),
)
def test_function_dtype_versioning(
*,
func_and_version,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
for key in func_and_version:
if key != backend_fw:
continue
var = ivy_backend.backend_version
# key --> framework
for key1 in func_and_version[key]:
for key2 in func_and_version[key][key1]:
var["version"] = key2
fn = getattr(ivy_backend, key1)
expected = func_and_version[key][key1][key2]
res = fn.unsupported_dtypes
if res is None:
res = set()
else:
res = set(res)
if res != expected:
raise Exception
return True
# function_dtype_versioning_frontend
@handle_test(
fn_tree="functional.ivy.function_unsupported_dtypes", # dummy fn_tree
func_and_version=st.just(
[
{
"torch": {
"cumsum": {
"2.0.1": {"bfloat16", "uint8", "float16"},
"1.12.1": set(),
}
}
},
],
),
)
def test_function_dtype_versioning_frontend(
*,
func_and_version,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
_import_mod = ivy_backend.utils.dynamic_import
for key in func_and_version:
if key != backend_fw:
continue
frontend = _import_mod.import_module("ivy.functional.frontends")
var = frontend.versions
for key1 in func_and_version[key]:
for key2 in func_and_version[key][key1]:
var[backend_fw] = key2
fn = getattr(
_import_mod.import_module(
f"ivy.functional.frontends.{backend_fw}"
),
key1,
)
expected = func_and_version[key][key1][key2]
res = fn.unsupported_dtypes
if res is None:
res = set()
else:
res = set(res)
if res != expected:
raise Exception
return True
# function_unsupported_dtypes
@handle_test(
fn_tree="functional.ivy.function_supported_dtypes",
func=st.sampled_from([_composition_1, _composition_2]),
)
def test_function_supported_dtypes(*, func, backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
res = ivy_backend.function_supported_dtypes(func)
exp = set(ivy_backend.all_dtypes).difference(
set(func.test_unsupported_dtypes[backend_fw])
)
assert set(exp) == set(res)
# function_unsupported_dtypes
@handle_test(
fn_tree="functional.ivy.function_unsupported_dtypes",
func=st.sampled_from([_composition_2]),
)
def test_function_unsupported_dtypes(*, func, backend_fw):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
res = ivy_backend.function_unsupported_dtypes(func)
exp = func.test_unsupported_dtypes[backend_fw]
assert set(exp) == set(res)
# iinfo
@handle_test(
fn_tree="functional.ivy.iinfo",
type=_array_or_type("int"),
test_with_out=st.just(False),
as_variable_flags=st.just([False]),
native_array_flags=st.just([False]),
container_flags=st.just([False]),
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_iinfo(*, type, test_flags, backend_fw, fn_name, on_device):
if isinstance(type, str):
input_dtype = [type]
else:
input_dtype, x = type
type = x[0]
ret = helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
type=type,
test_values=False,
)
if not ivy.exists(ret):
return
mach_lims, mach_lims_np = ret
assert mach_lims.min == mach_lims_np.min
assert mach_lims.max == mach_lims_np.max
assert mach_lims.bits == mach_lims_np.bits
# invalid_dtype
@handle_test(
fn_tree="functional.ivy.invalid_dtype",
dtype_in=helpers.get_dtypes("valid", full=False),
)
def test_invalid_dtype(
*,
dtype_in,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
dtype_in = dtype_in[0]
res = ivy_backend.invalid_dtype(dtype_in)
invalid_dtypes = ivy_backend.invalid_dtypes
if dtype_in in invalid_dtypes:
assert res is True, (
f"fDtype = {dtype_in!r} is a valid dtype for {backend_fw}, butresult ="
f" {res}"
)
else:
assert res is False, (
f"fDtype = {dtype_in!r} is not a valid dtype for {backend_fw},"
f" butresult = {res}"
)
# is_bool_dtype
@handle_test(
fn_tree="functional.ivy.is_bool_dtype",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=False)
),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_is_bool_dtype(*, dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
dtype_in=x[0],
)
# is_complex_dtype
@handle_test(
fn_tree="functional.ivy.is_complex_dtype",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=False)
),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_is_complex_dtype(*, dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
dtype_in=x[0],
)
# is_float_dtype
@handle_test(
fn_tree="functional.ivy.is_float_dtype",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=False)
),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_is_float_dtype(*, dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
dtype_in=x[0],
)
# Extra Ivy Function Tests #
# ------------------------ #
# is_hashable_dtype
@handle_test(
fn_tree="functional.ivy.is_hashable_dtype",
input_dtype=helpers.get_dtypes("valid", full=False),
)
def test_is_hashable_dtype(
*,
input_dtype,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
input_dtype = input_dtype[0]
res = ivy_backend.is_hashable_dtype(input_dtype)
assert res
# is_int_dtype
@handle_test(
fn_tree="functional.ivy.is_int_dtype",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=False)
),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_is_int_dtype(*, dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
dtype_in=x[0],
)
# is_native_dtype
@handle_test(
fn_tree="functional.ivy.is_native_dtype",
input_dtype=helpers.get_dtypes("valid", full=False),
)
def test_is_native_dtype(
input_dtype,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
input_dtype = input_dtype[0]
if isinstance(input_dtype, str):
assert ivy_backend.is_native_dtype(input_dtype) is False
assert (
ivy_backend.is_native_dtype(ivy_backend.as_native_dtype(input_dtype))
is True
)
# is_uint_dtype
@handle_test(
fn_tree="functional.ivy.is_uint_dtype",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=False)
),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_is_uint_dtype(*, dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
dtype_in=x[0],
)
# promote_types
# TODO: fix instance method
@handle_test(
fn_tree="functional.ivy.promote_types",
type1=helpers.get_dtypes("valid", full=False),
type2=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_promote_types(*, type1, type2, test_flags, backend_fw, fn_name, on_device):
helpers.test_function(
input_dtypes=[],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
type1=type1[0],
type2=type2[0],
test_values=False,
)
# result_type
@handle_test(
fn_tree="functional.ivy.result_type",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=st.shared(helpers.ints(min_value=2, max_value=5), key="num_arrays"),
shared_dtype=False,
),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_result_type(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = helpers.as_lists(*dtype_and_x)
kw = {}
for i, (dtype_, x_) in enumerate(zip(dtype, x)):
kw[f"x{i}"] = x_
test_flags.num_positional_args = len(kw)
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
on_device=on_device,
fn_name=fn_name,
**kw,
)
# type_promote_arrays
# TODO: fix container method
@handle_test(
fn_tree="functional.ivy.type_promote_arrays",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=False,
),
test_with_out=st.just(False),
container_flags=st.just([False]),
test_gradients=st.just(False),
)
def test_type_promote_arrays(
*, dtype_and_values, test_flags, backend_fw, fn_name, on_device
):
types, arrays = dtype_and_values
helpers.test_function(
input_dtypes=types,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=arrays[0],
x2=arrays[1],
test_values=True,
)
# unset_default_complex_dtype
@handle_test(
fn_tree="functional.ivy.unset_default_complex_dtype",
dtype=helpers.get_dtypes("complex", full=False),
)
def test_unset_default_complex_dtype(
*,
dtype,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
dtype = dtype[0]
stack_size_before = len(ivy_backend.default_complex_dtype_stack)
ivy_backend.set_default_complex_dtype(dtype)
ivy_backend.unset_default_complex_dtype()
stack_size_after = len(ivy_backend.default_complex_dtype_stack)
assert (
stack_size_before == stack_size_after
), f"Default float dtype not unset. Stack size= {stack_size_after!r}"
# unset_default_dtype
@handle_test(
fn_tree="functional.ivy.unset_default_dtype",
dtype=helpers.get_dtypes("valid", full=False),
)
def test_unset_default_dtype(
*,
dtype,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
dtype = dtype[0]
stack_size_before = len(ivy_backend.default_dtype_stack)
ivy_backend.set_default_dtype(dtype)
ivy_backend.unset_default_dtype()
stack_size_after = len(ivy_backend.default_dtype_stack)
assert (
stack_size_before == stack_size_after
), f"Default dtype not unset. Stack size= {stack_size_after!r}"
# unset_default_float_dtype
@handle_test(
fn_tree="functional.ivy.unset_default_float_dtype",
dtype=helpers.get_dtypes("float", full=False),
)
def test_unset_default_float_dtype(
*,
dtype,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
dtype = dtype[0]
stack_size_before = len(ivy_backend.default_float_dtype_stack)
ivy_backend.set_default_float_dtype(dtype)
ivy_backend.unset_default_float_dtype()
stack_size_after = len(ivy_backend.default_float_dtype_stack)
assert (
stack_size_before == stack_size_after
), f"Default float dtype not unset. Stack size= {stack_size_after!r}"
# unset_default_int_dtype
@handle_test(
fn_tree="functional.ivy.unset_default_int_dtype",
dtype=helpers.get_dtypes("integer", full=False),
)
def test_unset_default_int_dtype(
*,
dtype,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
dtype = dtype[0]
stack_size_before = len(ivy_backend.default_int_dtype_stack)
ivy_backend.set_default_int_dtype(dtype)
ivy_backend.unset_default_int_dtype()
stack_size_after = len(ivy_backend.default_int_dtype_stack)
assert (
stack_size_before == stack_size_after
), f"Default int dtype not unset. Stack size= {stack_size_after!r}"
# valid_dtype
@handle_test(
fn_tree="functional.ivy.valid_dtype",
dtype_in=helpers.get_dtypes("valid", full=False),
)
def test_valid_dtype(
*,
dtype_in,
backend_fw,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
dtype_in = dtype_in[0]
res = ivy_backend.valid_dtype(dtype_in)
valid_dtypes = ivy_backend.valid_dtypes
if dtype_in in valid_dtypes:
assert res is True, (
f"fDtype = {dtype_in!r} is not a valid dtype for {backend_fw},"
f" butresult = {res}"
)
else:
assert res is False, (
f"fDtype = {dtype_in!r} is a valid dtype for {backend_fw}, butresult ="
f" {res}"
)
_composition_1.test_unsupported_dtypes = {
"numpy": ("bfloat16",),
"jax": ("complex64", "complex128"),
"tensorflow": ("complex64", "complex128"),
"torch": (
"uint16",
"uint32",
"uint64",
"float16",
"complex64",
"complex128",
),
"paddle": (
"uint16",
"uint32",
"uint64",
"bfloat16",
"complex64",
"complex128",
),
"mxnet": ("uint16", "uint32", "uint64", "complex64", "complex128"),
}
_composition_2.test_unsupported_dtypes = {
"numpy": ("bfloat16", "complex64", "complex128"),
"jax": ("complex64", "complex128"),
"tensorflow": ("complex64", "complex128"),
"torch": ("uint16", "uint32", "uint64", "float16", "complex64", "complex128"),
"paddle": (
"uint16",
"uint32",
"uint64",
"bfloat16",
),
}
| ivy/ivy_tests/test_ivy/test_functional/test_core/test_dtype.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_dtype.py",
"repo_id": "ivy",
"token_count": 15995
} | 71 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
@handle_test(
fn_tree="functional.ivy.experimental.optional_get_element",
dtype_and_x=helpers.dtype_and_values(),
input_tensor=st.booleans(),
)
def test_optional_get_element(
*,
dtype_and_x,
input_tensor,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, x = dtype_and_x
fn_input = x[0] if input_tensor else x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=fn_input,
)
| ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_utility.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_utility.py",
"repo_id": "ivy",
"token_count": 335
} | 72 |
# Global
import pytest
import itertools
from hypothesis import strategies as st, given, settings, HealthCheck
# Local
import ivy
import numpy as np
from ivy.utils.backend.handler import _backend_dict
def test_is_local(backend_fw):
local_ivy = ivy.with_backend(backend_fw)
assert local_ivy.is_local()
@settings(
# To be able to share traced_backends between examples
suppress_health_check=[HealthCheck(9)]
)
@given(name=st.sampled_from(["add", "Array", "Container", "globals_vars"]))
def test_memory_id(name, traced_backends):
for b in traced_backends:
assert id(getattr(ivy, name)) != id(
getattr(b, name)
), f"Shared object {name} between global Ivy and backend {b.backend}"
for comb in itertools.combinations(traced_backends, 2):
assert id(getattr(comb[0], name)) != id(getattr(comb[1], name)), (
f"Shared object {name} between {comb[0].backend} and backend "
f"{comb[1].backend}"
)
def test_prevent_access(backend_fw):
local_ivy = ivy.with_backend(backend_fw)
with pytest.raises(RuntimeError):
local_ivy.with_backend(backend_fw)
with pytest.raises(RuntimeError):
local_ivy.set_backend(backend_fw)
def test_with_backend_array(backend_fw):
local_ivy = ivy.with_backend(backend_fw)
local_x = local_ivy.array([1, 2, 3, 4])
ivy.set_backend(backend_fw)
x = ivy.array([1, 2, 3, 4])
assert np.allclose(x._data, local_x._data)
def test_with_backend_cached(backend_fw):
non_cached_local_ivy = ivy.with_backend(backend_fw)
cached_local_ivy = ivy.with_backend(backend_fw)
assert non_cached_local_ivy == cached_local_ivy
@pytest.fixture
def traced_backends():
traced_backends = []
for b in _backend_dict:
_b = ivy.with_backend(b)
traced_backends.append(_b)
return traced_backends
| ivy/ivy_tests/test_ivy/test_misc/test_backend_utils/test_with_backend.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_backend_utils/test_with_backend.py",
"repo_id": "ivy",
"token_count": 794
} | 73 |
# Run Array API Tests for PRs
import os
import subprocess
import sys
BACKENDS = ["numpy", "jax", "tensorflow", "torch"]
def main():
failed = False
k_flag = {}
subprocess.run(
["python3", "ivy_tests/array_api_testing/write_array_api_tests_k_flag.py"],
check=True,
)
for backend in BACKENDS:
k_flag_file = f"ivy_tests/array_api_testing/.array_api_tests_k_flag_{backend}"
with open(k_flag_file, "r") as f:
array_api_tests_k_flag = f.read().strip()
if backend == "torch":
array_api_tests_k_flag += " and not (uint16 or uint32 or uint64)"
k_flag[backend] = array_api_tests_k_flag
with open(sys.argv[1], "w") as f_write:
with open("tests_to_run", "r") as f:
for line in f:
test, backend = line.split(",")
backend = backend.strip("\n")
command = f'docker run --rm --env IVY_BACKEND={backend} --env ARRAY_API_TESTS_MODULE="ivy" -v "$(pwd)":/ivy -v "$(pwd)"/.hypothesis:/.hypothesis unifyai/ivy:latest timeout 30m python3 -m pytest {test} -k "{k_flag[backend]}" --tb=short -vv' # noqa
print(f"\n{'*' * 100}")
print(f"{line[:-1]}")
print(f"{'*' * 100}\n")
sys.stdout.flush()
ret = os.system(command)
if ret != 0:
failed = True
f_write.write(line)
if failed:
sys.exit(1)
if __name__ == "__main__":
main()
| ivy/scripts/run_tests/array_api_run_tests_pr.py/0 | {
"file_path": "ivy/scripts/run_tests/array_api_run_tests_pr.py",
"repo_id": "ivy",
"token_count": 783
} | 74 |
from pymongo import MongoClient
from get_all_tests import BACKENDS
def main():
# connect to the database
cluster = MongoClient(
"mongodb+srv://readonly-user:hvpwV5yVeZdgyTTm@cluster0.qdvf8q3.mongodb.net"
)
ci_dashboard_db = cluster["ci_dashboard"]
ivy_tests_collection = ci_dashboard_db["ivy_tests"]
frontend_tests_collection = ci_dashboard_db["frontend_tests"]
# iterate over demos and collect ivy and frontend functions used
ivy_test_docs = ivy_tests_collection.find()
frontend_test_docs = frontend_tests_collection.find()
ivy_functions = [
ivy_test_doc["_id"]
for ivy_test_doc in ivy_test_docs
if ivy_test_doc.get("demos", None)
]
frontend_functions = [
frontend_test_doc["_id"]
for frontend_test_doc in frontend_test_docs
if frontend_test_doc.get("demos", None)
]
ivy_functions = sorted(list(set(ivy_functions)))
frontend_functions = sorted(list(set(frontend_functions)))
# find corresponding test paths for those functions
ivy_test_paths = []
frontend_test_paths = []
for function in ivy_functions:
result = ivy_tests_collection.find_one({"_id": function})
if result:
ivy_test_paths.append(result["test_path"])
for function in frontend_functions:
result = frontend_tests_collection.find_one({"_id": function})
if result:
frontend_test_paths.append(result["test_path"])
# add those paths to the tests_to_run
with open("tests_to_run", "w") as write_file:
for test_path in ivy_test_paths + frontend_test_paths:
test_path = test_path.strip()
for backend in BACKENDS:
write_file.write(f"{test_path},{backend}\n")
if __name__ == "__main__":
main()
| ivy/scripts/setup_tests/setup_priority_tests.py/0 | {
"file_path": "ivy/scripts/setup_tests/setup_priority_tests.py",
"repo_id": "ivy",
"token_count": 783
} | 75 |
# installs multiple versions of cuda and cudnn and then installs the
# latest frameworks and the requirements
FROM debian:buster
WORKDIR /ivy
# arguments
ARG fw
ARG pycon=3.10
# environment variables
ENV DEBIAN_FRONTEND=noninteractive
ENV TZ=Europe/Moscow
ENV CONDA_DIR /opt/miniconda/
# install base libraries
RUN grep security /etc/apt/sources.list | tee /etc/apt/security.sources.list && \
apt-get update && \
apt-get upgrade -o Dir::Etc::SourceList=/etc/apt/security.sources.list -y &&\
apt-get -y update && \
apt-get install -y gnupg \
curl \
wget \
software-properties-common \
gcc \
nano \
procps
# install miniconda
RUN apt clean && \
rm -rf /var/lib/apt/lists/* && \
apt-get update && \
apt-get install -y wget && \
apt-get install -y jq && \
apt-get install git -y && \
wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
/bin/bash ~/miniconda.sh -b -p /opt/miniconda
# create conda environment
ENV PATH=$CONDA_DIR/bin:$PATH
RUN conda create --name multienv python==$pycon -y
# fix protobuf conflicts
ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION python
ENV PATH=/opt/miniconda/envs/multienv/bin:$PATH
RUN apt-get update && \
apt-get install -y python3-pip python3-tk && \
apt-get install -y libsm6 libxext6 libxrender-dev libgl1-mesa-glx && \
apt-get install -y git && \
apt-get install -y rsync && \
apt-get install -y libusb-1.0-0 && \
apt-get install -y libglib2.0-0 && \
pip3 install --upgrade pip && \
pip3 install setuptools==58.5.3
# install Ivy Upstream
RUN git clone --progress --recurse-submodules https://github.com/unifyai/ivy --depth 1 && \
cd ivy && \
cd ivy_tests/array_api_testing/test_array_api && \
pip3 install --no-cache-dir -r requirements.txt
# copy library files to workdir
COPY docker/gpu_framework_directory.py .
COPY requirements/optional_gpu.txt .
COPY requirements/requirements.txt .
# setting torch path early on because torch-scatter needs it
ENV PYTHONPATH "/opt/fw/torch:/opt/miniconda/envs/multienv/bin"
# requirement mappings directs which dependency to be installed and where
COPY /docker/requirement_mappings_gpu.json .
SHELL ["/bin/bash", "-c"]
# install all libraries based on the mappings
RUN python3 gpu_framework_directory.py $fw &&\
jq -r 'to_entries[] | select(.value != [""]) | .key as $dir | .value[] | @sh "/opt/fw/\($dir) \(.)"' requirement_mappings_gpu.json | xargs -I {} sh -c 'printf "Installing %s\n" $2 && pip install --ignore-installed --target $1 $2 --extra-index-url https://download.pytorch.org/whl/cu118' sh {}
RUN sed -i '/numpy/d' requirements.txt &&\
pip install -r requirements.txt &&\
cp ./optional_gpu.txt tmp.txt &&\
jq -r 'to_entries[] | [.key] + .value | select(length > 0 or (. == "")) | .[]' requirement_mappings_gpu.json | sort -u | xargs -I {} sed -i '/{}/d;/jaxlib/d' tmp.txt && pip install -r tmp.txt
# add all the directories to environment path so that python knows where to find them
ENV PYTHONPATH "/opt/fw/mxnet:/opt/fw/numpy:/opt/fw/tensorflow:/opt/fw/jax:/opt/fw/torch:/opt/fw/paddle:/opt/miniconda/envs/multienv/bin"
# test dependencies
COPY scripts/test_dependencies.py .
RUN python3 test_dependencies.py -fp requirements.txt,optional_gpu.txt && \
rm -rf requirements.txt && \
rm -rf tmp.txt && \
rm -rf optional_gpu.txt && \
rm -rf test_dependencies.py
| ivy/docker/DockerfileGPU/0 | {
"file_path": "ivy/docker/DockerfileGPU",
"repo_id": "ivy",
"token_count": 1321
} | 0 |
{% extends "top_level_toc.rst" %}
{% block name %}{{"Data classes" | escape | underline}}{% endblock %}
{% block template %}top_data_module.rst{% endblock %}
{% block options %}{{ super() }} :hide-table:
{% endblock %}
{#
As this toc generates files a little differently, we added this to fix linking
issues
#}
{% block custom_content %}
.. autosummary::
{% for submodule in modules %}
{{ submodule }}.{{ submodule.split('.')[-1] }}
{%- endfor %}
{% endblock %}
| ivy/docs/_templates/top_data_toc.rst/0 | {
"file_path": "ivy/docs/_templates/top_data_toc.rst",
"repo_id": "ivy",
"token_count": 182
} | 1 |
Array API Tests
===============
.. _`Array API Standard`: https://data-apis.org/array-api/latest/
.. _`test suite`: https://github.com/data-apis/array-api-tests
.. _`test_array_api`: https://github.com/unifyai/ivy/tree/20d07d7887766bb0d1707afdabe6e88df55f27a5/ivy_tests
.. _`for each backend`: https://github.com/unifyai/ivy/tree/20d07d7887766bb0d1707afdabe6e88df55f27a5/.github/workflows
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`array api tests thread`: https://discord.com/channels/799879767196958751/1189907361494741073
.. _`scripts/shell/test_array_api.sh`: https://github.com/unifyai/ivy/blob/bcddc79978afe447958dfa3ea660716845c85846/scripts/shell/test_array_api.sh
.. _`array-api test repository`: https://github.com/data-apis/array-api/tree/main
.. _`issue`: https://github.com/numpy/numpy/issues/21213
.. _`ivy_tests/array_api_testing/test_array_api/array_api_tests/test_special_cases.py`: https://github.com/data-apis/array-api-tests/blob/ddd3b7a278cd0c0b68c0e4666b2c9f4e67b7b284/array_api_tests/test_special_cases.py
.. _`git website`: https://www.git-scm.com/book/en/v2/Git-Tools-Submodules
.. _`hypothesis`: https://hypothesis.readthedocs.io/en/latest/
.. _`ivy tests`: ivy_tests.rst
.. _`CI Pipeline`: continuous_integration.html
In conjunction with our own ivy unit tests, we import the array-api `test suite`_.
These tests check that all ivy backend libraries behave according to the `Array API Standard`_ which was established in May 2020 by a group of maintainers.
It was intended to bring some consistency and completeness to the various python libraries that have gained popularity in the last 5-10 years.
Since Ivy aims to unify machine learning frameworks, it makes sense that we value consistency in behaviour across each of the backend libraries in our code too.
The test suite is included in the ivy repository as a submodule in the folder `test_array_api`_, which we keep updated with the upstream test suite.
The array-api tests repository is maintained by a group of developers unconnected to Ivy.
We have made the decision to import the test suite directly from this repository rather than having our own fork.
This means that the test suite you see in the ivy source code cannot be modified in the usual way of pushing to the ivy main branch.
Instead, the change must be made to the array-api repository directly and then our submodule must be updated with the commands:
.. code-block:: none
# to initialise local config file and fetch + checkout submodule (not needed every time)
git submodule update --init --recursive
# pulls changes from the upstream remote repo and merges them
git submodule update --recursive --remote --merge
Sometimes you will face strange behaviour when staging changes from Ivy's main repo which includes submodule updates.
And this is being caused by your submodule being out of date because we update the submodule iteratively. You can get around this by running the following command:
.. code-block:: none
# Updating your submodule to the latest commit
git submodule update --remote
and only *then* can changes to the submodule be pushed to ivy-main, i.e. only when these changes exist in the source array-api repository.
See the `git website`_ for further information on working with submodules.
Running the Tests
-----------------
The entire test suite is run independently `for each backend`_ on every push to the repo.
Therefore, every function which exists in the standard is thoroughly tested for adherence to the standard on a continuous basis.
You will need to make sure the Array API tests are passing for each backend framework if/when making any changes to Ivy functions which are part of the standard.
If a test fails on the CI, you can see details about the failure under `Details -> Run [backend] Tests` as shown in `CI Pipeline`_.
You can also run the tests locally before making a PR.
There are two ways to do this: using the terminal or using your IDE.
Using Terminal
**************
Using the terminal, you can run all array-api tests in a given file for a certain backend using the bash file `scripts/shell/test_array_api.sh`_:
.. code-block:: none
# /ivy
/bin/bash -e ./scripts/shell/scripts/shell/test_array_api.sh jax test_linalg
You can change the argument with any of our supported frameworks - tensorflow, numpy, torch, or jax - and the individual test function categories in :code:`ivy/ivy_tests/array_api_testing/test_array_api/array_api_tests`, e.g. *test_set_functions*, *test_signatures* etc.
You can also run a specific test, as often running *all* tests in a file is excessive.
To make this work, you should set the backend explicitly in the `_array_module.py` file, which you can find in the `array_api_tests` submodule.
At the beginning of the file, you will see the following line of code :code:`array_module = None`.
You need to comment out that line and add the following:
.. code-block:: none
import ivy as array_module
array_module.set_backend("jax") # or numpy, tensorflow, torch
You should now be able to run the following commands via terminal:
.. code-block:: none
# run all tests in a file
pytest -vv ivy_tests/array_api_testing/test_array_api/array_api_tests/test_manipulation_functions.py
# run a single test
pytest -vv ivy_tests/array_api_testing/test_array_api/array_api_tests/test_manipulation_functions.py -k "test_concat"
Using the IDE
*************
You can also run a specific test or test file by using your IDE.
To make this work, you should set the backend explicitly in the `_array_module.py` file as explained in the previous subsection.
After that, you can run the API test files as you typically would with other tests.
See :ref:`here <overview/contributing/the_basics:Running Tests Locally>` for instructions on how to run tests in ivy more generally.
*NB*: make sure to not add any changes to the array-api files to your commit.
Regenerating Test Failures
--------------------------
Array-API tests are written using `hypothesis`_ to perform property-based testing, just like the `ivy tests`_.
However, unlike the ivy tests, the Array-API tests make liberal use of :code:`data.draw` in the main body of the test function instead of generating the data in the :code:`@given` decorator that wraps it.
This means that failed tests cannot be re-run with the :code:`@example` decorator, as explained in the :ref:`final section <overview/deep_dive/ivy_tests:Re-Running Failed Ivy Tests>` of the ivy tests deep dive.
Fortunately, it is possible to regenerate test failures using a unique decorator that appears in the final line of the falsifying example in the error stack trace:
.. code-block:: none
=================================== FAILURES ===================================
______________________ test_remainder[remainder(x1, x2)] _______________________
ivy_tests/array_api_testing/test_array_api/array_api_tests/test_operators_and_elementwise_functions.py:1264: in test_remainder
@given(data=st.data())
ivy_tests/array_api_testing/test_array_api/array_api_tests/test_operators_and_elementwise_functions.py:1277: in test_remainder
binary_param_assert_against_refimpl(ctx, left, right, res, "%", operator.mod)
ivy_tests/array_api_testing/test_array_api/array_api_tests/test_operators_and_elementwise_functions.py:620: in binary_param_assert_against_refimpl
binary_assert_against_refimpl(
ivy_tests/array_api_testing/test_array_api/array_api_tests/test_operators_and_elementwise_functions.py:324: in binary_assert_against_refimpl
assert isclose(scalar_o, expected), (
E AssertionError: out=-2.0, but should be roughly (x1 % x2)=1.0 [remainder()]
E x1=17304064.0, x2=3.0
E assert False
E + where False = isclose(-2.0, 1.0)
E Falsifying example: test_remainder(
E data=data(...), ctx=BinaryParamContext(<remainder(x1, x2)>),
E )
E Draw 1 (x1): ivy.array(17304064.)
E Draw 2 (x2): ivy.array(3.)
E
E You can reproduce this example by temporarily adding @reproduce_failure('6.55.0', b'AXic42BAAowcnP+RuMwMABAeAR0=') as a decorator on your test case
Copy the :code:`@reproduce_failure` decorator and paste it after the usual decorators of `test_remainder`.
You may also need to include the hypothesis import of `reproduce_failure` as shown below.
.. code-block:: none
from hypothesis import reproduce_failure
@pytest.mark.parametrize("ctx", make_binary_params("remainder", dh.numeric_dtypes))
@given(data=st.data())
@reproduce_failure('6.55.0', b'AXic42BAAowcnP+RuMwMABAeAR0=')
def test_remainder(ctx, data):
left = data.draw(ctx.left_strat, label=ctx.left_sym)
right = data.draw(ctx.right_strat, label=ctx.right_sym)
if ctx.right_is_scalar:
assume(right != 0)
else:
assume(not xp.any(right == 0))
res = ctx.func(left, right)
binary_param_assert_dtype(ctx, left, right, res)
binary_param_assert_shape(ctx, left, right, res)
binary_param_assert_against_refimpl(ctx, left, right, res, "%", operator.mod)
The test should then include the inputs which led to the previous failure and recreate it.
If you are taking the :code:`@reproduce_failure` decorator from a CI stack trace and trying to reproduce it locally, you may find that sometimes the local test unexpectedly passes.
This is usually caused by a discrepancy in your local source code and ivy-main, so try pulling from the main to sync the behaviour.
Test Skipping
-------------
Certain tests may need to be skipped when running the array-api test suite.
This could be due to a variety of reasons:
#. the test function has a known issue which the `array-api test repository`_ developers are working on (e.g. :code:`test_asarray_arrays`)
#. the function itself deviates from the standard (e.g. :code:`test_floor_divide`)
#. there is an issue with the hypothesis test data generation i.e. a failed 'health check' (e.g. :code:`test_iop[__imod__(x1_i < 0 and x2_i is +0) -> NaN]`)
#. tolerance issues when asserting output :code:`isequal()` (e.g. :code:`test_matrix_norm`)
All the examples in this list except point 3 (which only occurs with tensorflow) refer to numpy functions, and the first two are skipped in the `array-api test repository`_ also.
The data generation and tolerance issues are not skipped in the array-api repo and are difficult for Ivy developers to solve as we cannot alter the tests directly.
Currently, we import the test suite and run it; we do not have our own fork that we can tweak at will.
These issues have been raised in the array-api test repo and will be addressed in due course.
There are currently two ways to skip array-api tests:
#. in :code:`ivy_tests/array_api_methods_to_test/<submodule>.txt` and
#. in :code:`ivy_tests/skips.txt`
The first method was implemented before the second.
Each :code:`<submodule>.txt` file contains a comprehensive list of functions which belong to that submodule, some of which are commented out.
The commented-out functions are being skipped *only* for the backend(s) that is/are causing the failure, not all the backends.
This is done by identifying any references to a backend in the commented-out line e.g. :code:`#trace # failing for jax, numpy due to issues with dtypes in output in test: https://github.com/data-apis/array-api/issues/202` will cause :code:`test_trace` to be skipped on the jax and numpy backends.
The latter method, on the other hand, skips a test on *all* backends, even if it is just failing on one.
The :code:`ivy_tests/skips.txt` scheme was implemented to skip *specific test cases*.
The array-api test suite contains a set of special tests which aim to cover edge-case input and particular data type promotion rules (see :code:`ivy_tests/array_api_testing/test_array_api/array_api_tests/test_special_cases.py`).
In :code:`ivy_tests/skips.txt`, tests are skipped by writing the filepath + conditions on the input of the test e.g.,
.. code-block:: bash
ivy_tests/array_api_testing/test_array_api/array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity]
is skipping the in-place operations test on the :code:`pow` instance method when x1 is -infinity and x2 is a positive, odd float.
The result should be +infinity, however there is a known problem with the numpy instance method and an `issue`_ has been raised on the numpy repository.
Tests are categorised in :code:`ivy_tests/skips.txt` according to the backend they are failing on and the reason for the failure.
The fact that the skip instruction itself contains the exact input conditions that are failing makes it easier to keep track of and revisit failing tests to try and fix them.
**Round Up**
This should have hopefully given you a good understanding of how the Array API test suite is used for testing Ivy.
If you have any questions, please feel free to reach out on `discord`_ in the `array api tests thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/R1XuYwzhxWw" class="video">
</iframe>
| ivy/docs/overview/deep_dive/array_api_tests.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/array_api_tests.rst",
"repo_id": "ivy",
"token_count": 4205
} | 2 |
Gradients
=========
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`gradients thread`: https://discord.com/channels/799879767196958751/1190235826806853672
Overview
--------
Gradients are a crucial aspect of all modern deep learning workflows.
Different frameworks provide different APIs for gradient computation and there were a few considerations to be made while building a unified gradients API in Ivy.
There are a number of functions added in ivy to allow gradient computation, but we'll mainly focus on the most commonly used and the most general function :func:`ivy.execute_with_gradients`.
This is because the other gradient functions such as :func:`ivy.value_and_grad` and :func:`ivy.grad` can be considered as providing a subset of the functionality that :func:`ivy.execute_with_gradients` provides.
Example Usage of the Gradient API
---------------------------------
The :func:`ivy.execute_with_gradients` function signature
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Following is the pseudo function signature for the :func:`ivy.execute_with_gradients` function,
.. code-block:: python
def execute_with_gradients (
func : Callable,
xs : Any arbitrary nest,
xs_grad_idxs : Input indices,
ret_grad_idxs : Output indices,
) :
return func_ret, grads
The :code:`func` in the input can be any user-defined function that returns a single scalar or any arbitrary nest of scalars.
By scalars, we are referring to zero-dimensional arrays.
So for example, the following are some valid outputs by the :code:`func`,
.. code-block:: python
ivy.array(12.)
# OR
ivy.Container(
a=ivy.array(12.),
b=ivy.Container(
c=ivy.array(15.),
d=ivy.array(32.)
)
)
# OR
[ivy.array(25.), {'x': (ivy.array(21.), ivy.array(11.))}, (ivy.array(9.),)]
:code:`xs` can be any arbitrary nest of arrays and refers to the inputs passed to the :code:`func`, so we suggest designing your :code:`func` based on what inputs you pass in :code:`xs`.
The arrays in :code:`xs` can contain any arbitrary number of dimensions, the only constraint is on the output of the :code:`func` as explained above.
The :code:`xs_grad_idxs` and :code:`ret_grad_idxs` are intended to provide more control over the arrays gradients are computed with.
:code:`xs_grad_idxs` accepts the indices of the input arrays to compute gradients for, and :code:`ret_grad_idxs` accepts the indices of the output arrays to compute gradients with respect to.
An example using :func:`ivy.execute_with_gradients`
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: python
def func(xs) :
return ivy.mean(xs[0] + xs[1].b)
x = ivy.array([1., 2., 3.])
x = ivy.Container(a=x, b=x)
y = ivy.array([4., 5., 6.])
y = ivy.Container(b=y, c=x)
xs = [x, y]
ret, grads = ivy.execute_with_gradients(
func,
xs,
xs_grad_idxs=[[0]],
ret_grad_idxs=[["a"]]
)
Custom Gradient Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
There are various scenarios where users may want to define custom gradient computation rules for their functions.
Some of these are numerical stability, smoothing, and clipping of the computed gradients.
Ivy provides the :func:`ivy.bind_custom_gradient_function` function to allow users to bind custom gradient computation logic to their functions.
Following is an example of usage of :func:`ivy.bind_custom_gradient_function`,
.. code-block:: python
import ivy
ivy.set_backend("torch")
x = ivy.array(50.0)
inter_func = lambda x: ivy.log1p(ivy.exp(x))
# args –> ((xs, ret), upstream)
def custom_grad_fn(*args):
args1 = (1 - 10 / (1 + args[0][0]))
return (args[1] * args)
inter_func = ivy.bind_custom_gradient_function(
inter_func, custom_grad_fn
)
func = lambda x: ivy.sum(inter_func(x) ** 2)
ret, grad = ivy.execute_with_gradients(func, x)
The :code:`custom_grad_fn` here accepts :code:`*args` which has the structure :code:`((xs, ret), upstream)` where,
* :code:`xs` is the input similar to the one accepted in :func:`ivy.execute_with_gradients`
* :code:`ret` is the output of the forward pass of the :func:`inter_func`
* :code:`upstream` refers to the previously computed gradients while back-propagating
Design of the Gradient API
--------------------------
Our policy on gradients
^^^^^^^^^^^^^^^^^^^^^^^
* The gradient API is fully-functional in ivy.
* There is no explicit variable class or any public-facing function for adding gradient support to an ivy.Array.
* The gradient functions in ivy implicitly convert all arrays to support gradient computation before computing gradients and detach all arrays after computing gradients.
* We don't retain any previously tracked computations in arrays by frameworks like torch for e.g.
* This makes our gradient API disambiguous, flexible, and easy to debug.
* Any framework-specific tracking of computations or variable classes should be handled in the corresponding frontends.
Gradient APIs of frameworks
^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. list-table:: Frameworks and their gradient functions
:widths: 25 25 50
:header-rows: 1
* - Framework
- Common ways to Gradient Computation
* - JAX
- `jax.grad`, `jax.value_and_grad`, `jax.jacfwd`, `jax.jacrev`
* - PyTorch
- `torch.autograd.grad`, `torch.autograd.backward`
* - TensorFlow
- `tf.GradientTape`, `tf.gradients` (only in graph-mode)
General Structure of Backend-specific implementations
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here's a high-level description of the steps followed backend-specific implementation of :func:`ivy.execute_with_gradients`:
#. Get Duplicate Index Chains : indices of arrays that share the same :code:`id`
#. Convert integer arrays to floats : only for ease of use. it's *not* recommended to pass integer arrays to gradient functions
#. Get relevant inputs : based on the :code:`xs_grad_idxs`, we collect the relevant inputs for gradient computation
#. Enable gradient support : we implicitly make use of framework-specific APIs to enable gradients in arrays. Ivy doesn't need to have an explicit variable class as the gradient API is fully functional
#. Compute Results : we do the forward pass by passing the input as it is to the function
#. Get relevant outputs : based on the :code:`ret_grad_idxs`, we collect the relevant outputs for gradient computation
#. Compute gradients : we make use of the framework-specific APIs to compute the gradients for the relevant outputs with respect to the relevant inputs
#. Handle duplicates : we explicitly handle duplicate instances using the index chains captured above as different frameworks treat duplicates differently
#. Post process and detach : finally, all computed gradients are updated to deal with :code:`NaN` and :code:`inf` and the input arrays are detached (i.e. gradient propagation is stopped)
Framework-specific Considerations
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* JAX treats duplicate arrays as distinct while computing gradients, so we need additional logic to replicate gradients computed w.r.t one array over all its duplicates.
* Gradients computed for functions with undefined results are inconsistent across backends (NaN, Inf, 0). We handle all these inconsistencies by returning 0 for all backends. So if you’re debugging gradients and find a 0, there’s a possibility that it was NaN or an Inf before computing.
**Round Up**
This should have hopefully given you a good feel for how the gradient API is implemented in Ivy.
If you have any questions, please feel free to reach out on `discord`_ in the `gradients thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/riNddnTgDdk" class="video">
</iframe>
| ivy/docs/overview/deep_dive/gradients.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/gradients.rst",
"repo_id": "ivy",
"token_count": 2437
} | 3 |
FAQ
===
.. _`dex`: https://github.com/dexidp/dex
.. _`API for distributed training`: https://github.com/unifyai/ivy/blob/a2f37b1bae232b7ba5257e59f8b46a0374cca9f1/ivy/functional/ivy/device.py#L660
.. _`fully support these`: https://pytorch.org/tutorials/prototype/vmap_recipe.html
.. _`README`: https://github.com/unifyai/ivy
These are some of the most common technical questions that continue to arise when we're discussing Ivy with developers in the community.
As Ivy becomes more mature and we continue these discussions, then many more questions and answers will no doubt be added!
We are all incredibly grateful to everyone in the community who has put in the time and effort to ask so many important and probing questions!
We hope these Q&As are a useful reference!
Maintaining Backend Versions
----------------------------
**Q:** Isn't it complex to maintain support for all backend versions, particularly as they undergo constant changes?
How are you going to handle this, will you have an option to select any version for any backend?
**A:** Ivy **only** wraps the functional APIs of each backend framework.
The last 2 years of Ivy development have shown us how remarkably stable the functional APIs are for each backend framework.
**Not once** have we needed to change an implementation or a unit test as a result of a version update of a backend framework.
This is not entirely surprising, each framework has strong backward compatibility requirements, and the functional API is generally one of the lower level building blocks upon which everything else in the framework depends.
Our CI always tests against the latest version available on PyPI, and this has been the case since we started development.
We do not lock-in any versions during our continuous testing, and we will continue to always pull the latest version.
In the future, we hope to add explicit testing also for previous versions, so we can guarantee backward compatibility for each backend.
We will also add an option to select backend versions for the small minority of cases where changes in the backend functional APIs do cause breaking changes for Ivy.
Dynamic Sizes
-------------
**Q:** Assuming a static computation graph, can tensors have sizes that dynamically change?
XLA does not support dynamic sizes, because it JIT-compiles the graph, and pre-allocates all buffers in memory before the graph runs.
TensorFlow and PyTorch do allow dynamic sizes, but only on certain backends.
Dynamic sizes require a dynamic memory manager, which CPUs/GPUs have, but XLA currently doesn't.
How does Ivy deal with all of this?
**A:** Ivy assumes dynamic shapes are supported, but an error will be thrown if/when the function is traced with dynamic shapes enabled, but the backend does not support dynamic shapes in the traced graph.
For now, fully framework-agnostic traced graphs are only possible for static graphs.
Type and Shape Checking
-----------------------
**Q:** What kind of type system does Ivy use? Does it do shape-checking of tensors? If so, how does it handle dynamic sizes? The gold standard here is a fully dependent type system, but this is very rare, with the exception of `dex`_.
**A:** The checks performed during compiling will remain backend-specific.
The function :func:`ivy.compile` wraps the backend tracing functions, for example :func:`jax.jit`, :func:`tf.function`, :func:`torch.jit.script` and :func:`torch.jit.trace`.
For some backends, shape-checking will be performed during the tracing phase and for others it will not.
GPU handling
------------
**Q:** How does Ivy handle GPU usage?
**A:** Ivy handles GPU usage by simply wrapping the backend frameworks, so Ivy will use GPUs in the same manner as the backend framework does.
E.g. When using a torch backend, torch will be a dependency of Ivy, and its handling of GPU functionalities will be inherited and extended upon by Ivy.
Model Deployment
----------------
**Q:** Does Ivy support model deployment?
**A:** Yes, Ivy will support efficient model deployment.
However, currently this feature is not yet supported as the tracer module is still under development, and will be released soon with ivy version 1.2.0.
Dynamic Control Flow
--------------------
**Q:** Tensorflow has dynamic control-flow primitives (loops, branches) even within a static computation graph.
Jax also has dynamic control-flow (:code:`lax.scan`, :code:`lax.while`), but support is limited; only :code:`lax.scan` is differentiable in reverse mode.
Branching is also tricky, and is backend-dependent.
CPUs have branch predictors and can execute tight loops, GPUs don't, but have drivers that can schedule kernels dynamically, and some other architectures do static scheduling, which limits the kinds of algorithms that can run effectively.
TensorFlow eager and PyTorch allow you to use full python control flow, (loops, branches, function calls, dynamic dispatch, recursion) but there is no static computation graph.
How will Ivy handle dynamic control flow?
Will Ivy parse python ASTs?
**A:** For now, Ivy will not support dynamic control flow by parsing ASTs.
The dynamism of :code:`for` loops and :code:`while` loops will be ignored during tracing, and just the static trace which chains the array operations performed during the forward pass at tracing time will be preserved.
However, Ivy will support the tracing of looping and branching methods such as :code:`lax.scan`, :code:`lax.while`, :code:`tf.while`, :code:`tf.cond` etc.
In cases where there is not an associated compilable method in other backends, we will strive to implement this as a composition of existing compilable operations.
If such a composition is not possible, then we will instead convert these to compositions of pure Python :code:`for`, :code:`while` and :code:`if` statements (when using a PyTorch backend for example).
The reverse mode conversions will not be possible without using parsing ASTs though.
This does mean that for example TensorFlow (with loops + branches) → PyTorch (with for, while + if statements) but the reverse mode will not preserve the loops and branches PyTorch (with for, while + if statements) → TensorFlow (static, no loops or branches).
Auto-Differentiation
--------------------
**Q:** How do you handle reverse mode, forward mode, and Jacobians? How about stop gradients, and gradient checkpointing, and custom gradients? What about autodiff for control-flow operators like :code:`lax.scan`?
This is where JAX really shines, and unless you are implementing your own autodiff framework, you are at the mercy of whatever the backend supports.
**A:** Ivy will implement all of the general methods that JAX supports, and will provide errors if/when the backend does not support this.
In general, Ivy will support the superset of functionality, and not just the lowest common denominator.
Ivy takes a fully functional approach like JAX, and the API enables arbitrary nested :code:`execute_with_gradient` calls up to an arbitrary gradient order.
Again, if a backend does not support this then an error will be thrown.
This means Ivy code is not 100% framework-agnostic, and is indeed at the mercy of what the backend autograd package supports in these cases.
Replicas, and Data vs Model Parallelism
---------------------------------------
**Q:** Big models don't run on just one device, and the major frameworks have *very* different ways of splitting a model up so that it runs on a cluster.
There are multiple competing paradigms for parallelisation -- e.g. SPMD vs mixture of experts.
JAX and Tensorflow are very sophisticated in this department, and routinely run models on hundreds or thousands of devices.
How will Ivy support multi-device training, if at all?
**A:** This is not something we’re diving into too deeply at the moment.
However, we have written our own `API for distributed training`_, which broadly follows PyTorch’s approach using a CUDA-enabled multiprocessing module.
If heavily distributed training is important.
Then Ivy can be supplementary for the time being, rather than a total replacement.
For example, someone can use TensorFlow’s distributed training tools, and just use Ivy to copy over a PyTorch model into their TF pipeline.
We are not trying to encourage anyone to drop any existing tools and just use Ivy instead.
Projects can use 1% Ivy code or 100%.
We’re very happy in either case!
Support for Functions
---------------------
**Q:** Is it possible to trace tensor code into a reusable and differentiable function? If you can't, then it will be difficult to apply any fancy kernel fusion algorithms, and you can expect to lose a lot of performance.
What about higher-order operations, like :code:`jax.vmap` and :code:`jax.pmap`?
**A:** Most functions in Ivy are *primary* functions, which are generally implemented as light wrapping around a near-identical backend-specific function, which itself will likely map to an efficient kernel.
*Compositional* functions on the other hand are implemented as a composition of other Ivy functions, meaning there will not be a one-to-one mapping to a single backend kernel.
However, our experiments (to be published soon!) show this does not lead to a significant run-time overhead, even when a composition of operations is required.
For methods like :code:`jax.vmap` and :code:`jax.pmap`, we will need to implement these as (possibly inefficient) compositions in other frameworks, until they are supported in these frameworks.
However, it seems as though other frameworks such as PyTorch are seeing the benefit in these functions, and will eventually `fully support these`_.
Alternative Data Structures
---------------------------
**Q:** Will Ivy support data structures such as tuples, dictionaries, lists etc.? For example, JAX code is full of them.
**A:** We will of course support these structures in pure python code, but we will not support backend-specific alternative compilable data structures.
While Ivy will not provide an interface to these data structures directly, Ivy code can easily supplement JAX code which does contain these data structures, and both can be traced together without issue.
Ivy can act as a supplementary framework if/when some of the more unique backend-specific data structures are required.
Custom Operations
-----------------
**Q:** Most frameworks have a backdoor for user-defined ops, implemented in C++/CUDA, or some kind of host callback mechanism.
Will Ivy support this ability also?
**A:** We will not attempt to provide a unified back-door for all possible backend kernel customizations, but of course users can still use the backend-specific backdoors which already exist when using Ivy.
The Pipeline
------------
**Q:** How will Ivy manage the training loop and input pipeline? What about loading and saving models, recording of scalar metrics, visualization, etc.? These are often also somewhat framework-dependent.
**A:** We are not advocating to replace all code with Ivy.
We would encourage users to continue using whatever data loaders they want to, and perhaps just use an Ivy model, or use Ivy to convert a model, or even just a single function from a library.
State
-----
**Q:** Tensorflow handles state as part of the static graph.
JAX is purely functional and so outsources it to one of several third-party libraries, like Flax.
How will Ivy handle state?
**A:** Ivy has a fully functional backend.
When using a TensorFlow or PyTorch backend, we pass all of the variables and gradients explicitly as function inputs and outputs.
This is not actually required for the stateful back-ends, but we still return the values such that JAX is also supported.
Ivy will remain fully functional in design, and we therefore assume behavior similar to JAX.
Our simple example on the `README`_ trains correctly for all back-ends, which passes everything explicitly in a functional manner.
| ivy/docs/overview/faq.rst/0 | {
"file_path": "ivy/docs/overview/faq.rst",
"repo_id": "ivy",
"token_count": 2823
} | 4 |
.. _`RWorks Frameworks`:
Frameworks
==========
.. _`MATLAB`: https://mathworks.com/products/matlab.html
.. _`MathWorks`: https://mathworks.com/
.. _`MuPAD`: https://mathworks.com/discovery/mupad.html
.. _`Simulink`: https://mathworks.com/products/simulink.html
.. _`SciPy`: https://scipy.org/
.. _`NumPy`: https://numpy.org/
.. _`Torch`: http://torch.ch/
.. _`IDIAP`: https://www.idiap.ch/
.. _`EPFL`: https://www.epfl.ch/
.. _`Numarray`: https://wiki.python.org/moin/NumArray
.. _`Numeric`: https://people.csail.mit.edu/jrennie/python/numeric/
.. _`CPython`: https://github.com/python/cpython
.. _`Scikit-learn`: https://scikit-learn.org/
.. _`Theano`: https://github.com/Theano/Theano
.. _`Aesara`: https://github.com/aesara-devs/aesara
.. _`Pandas`: https://pandas.pydata.org/
.. _`Julia`: https://julialang.org/
.. _`NASA`: https://www.nasa.gov/
.. _`CERN`: https://home.cern/
.. _`Climate Modeling Alliance`: https://clima.caltech.edu/
.. _`Apache Spark`: https://spark.apache.org/
.. _`MLlib`: https://spark.apache.org/mllib/
.. _`Caffe`: https://caffe.berkeleyvision.org/
.. _`PyTorch`: https://pytorch.org/
.. _`Chainer`: https://chainer.org/
.. _`CuPy`: https://cupy.dev/
.. _`Preferred Networks`: https://www.preferred.jp/
.. _`TensorFlow 1`: https://www.tensorflow.org/versions/r1.15/api_docs
.. _`MXNet`: https://mxnet.apache.org/
.. _`Amazon`: https://www.amazon.com/
.. _`Microsoft Cognitive Toolkit (CNTK)`: https://learn.microsoft.com/en-us/cognitive-toolkit/
.. _`TensorFlow`: https://tensorflow.org/
.. _`PyTorch Ecosystem`: https://pytorch.org/ecosystem/
.. _`Flux`: https://fluxml.ai/
.. _`Zygote.jl`: https://github.com/FluxML/Zygote.jl
.. _`DaggerFlux.jl`: https://github.com/FluxML/DaggerFlux.jl
.. _`JAX`: https://jax.readthedocs.io/
.. _`XLA`: https://www.tensorflow.org/xla
.. _`Haiku`: https://dm-haiku.readthedocs.io/
.. _`Flax`: https://flax.readthedocs.io/
.. _`TensorFlow 2`: https://www.tensorflow.org/api_docs
.. _`TensorFlow Lite`: https://www.tensorflow.org/lite
.. _`Adam Paszke`: https://github.com/apaszke
.. _`Dex language`: https://github.com/google-research/dex-lang
.. _`Haskell`: https://www.haskell.org/
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. |matlab| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/matlab.png
:height: 20pt
:class: dark-light
.. |scipy| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/scipy.png
:height: 20pt
:class: dark-light
.. |torch| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/torch.png
:height: 20pt
:class: dark-light
.. |numpy| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/numpy.png
:height: 20pt
:class: dark-light
.. |scikit-learn| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/scikit-learn.png
:height: 15pt
:class: dark-light
.. |theano| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/theano.png
:height: 10pt
:class: dark-light
.. |pandas| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/pandas.png
:height: 22pt
:class: dark-light
.. |julia| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/julia.png
:height: 20pt
:class: dark-light
.. |apache-spark-mllib| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/apache-spark-mllib.png
:height: 20pt
:class: dark-light
.. |caffe| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/caffe.png
:height: 10pt
:class: dark-light
.. |chainer| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/chainer.png
:height: 20pt
:class: dark-light
.. |tensorflow-1| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/tensorflow-1.png
:height: 20pt
:class: dark-light
.. |mxnet| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/mxnet.png
:height: 20pt
:class: dark-light
.. |cntk| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/cntk.png
:height: 20pt
:class: dark-light
.. |pytorch| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/pytorch.png
:height: 22pt
:class: dark-light
.. |flux| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/flux.png
:height: 22pt
:class: dark-light
.. |jax| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/jax.png
:height: 20pt
:class: dark-light
.. |tensorflow-2| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/tensorflow-2.png
:height: 20pt
:class: dark-light
.. |dex-language| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/frameworks/dex-language.png
:height: 20pt
:class: dark-light
Here we list some of the most prominent frameworks for array computation.
These are the individual frameworks which the wrapper frameworks mentioned above generally wrap around and abstract.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/related_work/frameworks/ml_framework_timeline.png?raw=true
:width: 100%
MATLAB |matlab|
---------------
`MATLAB`_ (an abbreviation of MATrix LABoratory) is a proprietary multi-paradigm programming language and numeric computing environment developed by `MathWorks`_, which was first commercially released in 1984.
It allows matrix manipulations, plotting of functions and data, implementation of algorithms, creation of user interfaces, and interfacing with programs written in other languages.
Although MATLAB is intended primarily for numeric computing, an optional toolbox uses the `MuPAD`_ symbolic engine allowing access to symbolic computing abilities.
An additional package, `Simulink`_, adds graphical multi-domain simulation and model-based design for dynamic and embedded systems.
As of 2020, MATLAB has more than 4 million users worldwide, who come from various backgrounds of engineering, science, and economics.
SciPy |scipy|
-------------
First released in 2001, `SciPy`_ is a Python framework used for scientific computing and technical computing, with modules for optimization, linear algebra, integration, interpolation, special functions, FFT, signal and image processing, ODE solvers and other tasks common in science and engineering.
While the user interface is in Python, the backend involves Fortran, Cython, C++, and C for high runtime efficiency.
It is built to work with `NumPy`_ arrays, and provides many user-friendly and efficient numerical routines, such as routines for numerical integration and optimization.
Torch |torch|
-------------
Initially released in 2002, `Torch`_ is an open-source machine learning library, a scientific computing framework, and a script language based on the Lua programming language.
It provides a wide range of algorithms for deep learning, and uses the scripting language LuaJIT, and an underlying C implementation.
It was created in `IDIAP`_ at `EPFL`_.
NumPy |numpy|
-------------
First released in 2005, `NumPy`_ is a Python framework which was created by incorporating features of the competing `Numarray`_ into `Numeric`_, with extensive modifications.
NumPy supports large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays.
NumPy targets the `CPython`_ reference implementation of Python.
NumPy addresses the absence of compiler optimization partly by providing multidimensional arrays and functions and operators that operate efficiently on arrays.
NumPy arrays are strided views on memory.
It has long been the go-to framework for numeric computing in Python.
SciKit Learn |scikit-learn|
---------------------------
First released in 2007, `Scikit-learn`_ is a Python framework which features various classification, regression, and clustering algorithms including support-vector machines, random forests, gradient boosting, k-means, and DBSCAN, and is designed to interoperate with the Python numerical and scientific libraries `NumPy`_ and `SciPy`_.
Theano |theano|
---------------
Initially released in 2007, `Theano`_ is a Python framework which focuses on manipulating and evaluating mathematical expressions, especially matrix-valued ones, with an inbuilt optimizing compiler.
Computations are expressed using a `NumPy`_-esque syntax and are compiled to run efficiently on either CPU or GPU architectures.
Notably, it includes an extensible graph framework suitable for the rapid development of custom operators and symbolic optimizations, and it implements an extensible graph transpilation framework.
It is now being continued under the name `Aesara`_.
Pandas |pandas|
---------------
Initially released in 2008, `Pandas`_ is a Python framework which focuses on data manipulation and analysis.
In particular, it offers data structures and operations for manipulating numerical tables and time series.
Key features include: a DataFrame object for data manipulation with integrated indexing, tools for reading and writing data between in-memory data structures and different file formats, and label-based slicing, fancy indexing, and subsetting of large data sets.
It is built upon `NumPy`_, and as such the library is highly optimized for performance, with critical code paths written in Cython or C.
Julia |julia|
-------------
Initially released in 2012, `Julia`_ is a high-level, dynamic programming language.
Its features are well suited for numerical analysis and computational science.
Distinctive aspects of Julia's design include a type system with parametric polymorphism in a dynamic programming language; with multiple dispatch as its core programming paradigm.
Julia supports concurrent, (composable) parallel and distributed computing (with or without using MPI or the built-in corresponding to "OpenMP-style" threads), and direct calling of C and Fortran libraries without glue code.
Julia uses a just-in-time (JIT) compiler that is referred to as "just-ahead-of-time" (JAOT) in the Julia community, as Julia compiles all code (by default) to machine code before running it.
Julia is used extensively by researchers at `NASA`_ and `CERN`_, and it was selected by the `Climate Modeling Alliance`_ as the sole implementation language for their next generation global climate model, to name a few influential users.
Apache Spark MLlib |apache-spark-mllib|
---------------------------------------
Initially released in 2014, `Apache Spark`_ is a unified analytics engine for large-scale data processing, implemented in Scala.
It provides an interface for programming clusters with implicit data parallelism and fault tolerance.
`MLlib`_ is a distributed machine-learning framework on top of Spark Core that, due in large part to the distributed memory-based Spark architecture, is very runtime efficient.
Many common machine learning and statistical algorithms have been implemented and are shipped with MLlib which simplifies large scale machine learning pipelines.
MLlib fits into Spark's APIs and it also interoperates with `NumPy`_ in Python.
Caffe |caffe|
-------------
Initially released in 2014, `Caffe`_ is highly efficient, but is all in C++ which requires frequent re-compiling during development and testing.
It was also not very easy to quickly throw prototypes together, with C++ being much less forgiving than Python as a front-facing language.
In the last few years, Caffe has been merged into `PyTorch`_.
Chainer |chainer|
-----------------
Initially released in 2015, `Chainer`_ is written purely in Python on top of `NumPy`_ and `CuPy`_.
It is notable for its early adoption of "define-by-run" scheme, as well as its performance on large scale systems.
In December 2019, `Preferred Networks`_ announced the transition of its development effort from Chainer to `PyTorch`_ and it will only provide maintenance patches after releasing v7.
TensorFlow 1 |tensorflow-1|
---------------------------
Initially released in 2015, `TensorFlow 1`_ enabled graphs to be defined as chains of Python functions, but the lack of python classes in the model construction process made the code very non-pythonic.
It was hard to create meaningful hierarchical and reusable abstractions.
The computation graph was also hard to debug; intermediate values were not accessible in the Python environment, and had to be explicitly extracted from the graph using a tf.session.
Overall it was easier to get started on projects and prototype more quickly than in `Caffe`_ (the most popular ML framework at the time, written in C++), but it was not much easier to debug than Caffe due to the compiled graph which could not be stepped through.
The graph also needed to be fully static, with limited ability for branching in the graph, removing the possibility for pure python control flow to form part of the graph.
MXNet |mxnet|
-------------
Initially released in 2016, `MXNet`_ allows users to mix symbolic and imperative programming.
At its core, MXNet contains a dynamic dependency scheduler that automatically parallelizes both symbolic and imperative operations on the fly.
A graph optimization layer on top of that makes symbolic execution fast and memory efficient.
Despite having big industry users such as `Amazon`_, MXNet has not gained significant traction among researchers.
CNTK |cntk|
-----------
Originally released in 2016, the `Microsoft Cognitive Toolkit (CNTK)`_ is an open-source toolkit for commercial-grade distributed deep learning, written entirely in C++.
It describes neural networks as a series of computational steps via a directed graph.
CNTK allows the user to easily realize and combine popular model types such as feed-forward DNNs, convolutional neural networks (CNNs), and recurrent neural networks (RNNs/LSTMs).
CNTK implements stochastic gradient descent (SGD, error backpropagation) learning with automatic differentiation and parallelization across multiple GPUs and servers.
It is no longer being actively developed, having succumbed to the increasing popularity of the frameworks using Python frontend interfaces.
PyTorch |pytorch|
-----------------
`PyTorch`_ came onto the scene another year later in 2016, which also operated very differently to `TensorFlow`_.
PyTorch operates based on asynchronous scheduling on the target device, without any pre-compilation of the full computation graph on the target device required.
This made it possible to combine asynchronous scheduled efficient kernels with pure Python control flow, and also made it easy to query and monitor the intermediate values in the model, with the boundaries of the “computation graph” having been broken down.
This quickly made it very popular for researchers.
Generally, PyTorch is the choice of the ML researcher, ML practitioner, and the hobbyist.
PyTorch is very Pythonic, very simple to use, very forgiving, and has a tremendous ecosystem built around it.
No other framework comes close to having anything like the `PyTorch Ecosystem`_, with a vast collection of third-party libraries in various important topics for ML research.
Flux |flux|
-----------
`Flux`_ is a library for machine learning geared towards high-performance production pipelines, written entirely in the `Julia`_ language.
It comes "batteries-included" with many useful tools built in, whilst still enabling the full power of the Julia language to be leveraged.
It follows a few key principles.
It “does the obvious thing”.
Flux has relatively few explicit APIs for features like regularization or embeddings.
Instead, writing down the mathematical form will work – and be fast.
It is “extensible by default”.
Flux is written to be highly extensible and flexible while being performant.
Extending Flux is as simple as using custom code as part of the desired model - it is all high-level Julia code.
“Performance is key”.
Flux integrates with high-performance AD tools such as `Zygote.jl`_ for generating fast code.
Flux optimizes both CPU and GPU performance.
Scaling workloads easily to multiple GPUs can be done with the help of Julia's GPU tooling and projects like `DaggerFlux.jl`_.
It “plays nicely with others”.
Flux works well with Julia libraries from data frames and images to differential equation solvers, so it is easy to build complex data processing pipelines that integrate Flux models.
Flux and Julia are not used nearly as much as the Python frameworks, but they are growing in popularity.
JAX |jax|
---------
All of the other Python frameworks work well when the aim was “vanilla” first-order optimization of neural networks against a scalar loss function, but are not as flexible when anything more customized is required, including meta learning (learning to learn), higher order loss functions, gradient monitoring, and other important research frontiers involving more customization for the gradients.
These frameworks abstract away most of the gradient computation from the developer, making it hard for them to explicitly track gradients, compute vector gradients such as Jacobians, and/or compute higher order gradients such as Hessians etc.
Initially released in 2018, `JAX`_ offers an elegant lightweight fully-functional design, which addresses all of these shortcomings, and uses direct bindings to `XLA`_ for running highly performant code on TPUs.
This gives JAX fundamental advantages over `PyTorch`_ and `TensorFlow`_ in terms of user flexibility, ease of debugging, and user control, but has a higher entry barrier for inexperienced ML users, and despite having fundamental advantages over PyTorch and TensorFlow, it still has a very underdeveloped ecosystem.
JAX is generally the choice of the deeply technical ML researcher, working on deeply customized gradient and optimization schemes, and also the runtime performance fanatic.
JAX is not a framework for beginners, but it offers substantially more control for the people who master it.
You can control how your Jacobians are computed, you can design your own vectorized functions using :code:`vmap`, and you can make design decisions which ensure you can squeeze out every ounce of performance from your model when running on the TPU.
The ecosystem is evolving but is still in its infancy compared to PyTorch.
As mentioned above, The emergence of libraries such as `Haiku`_ and `Flax`_ are lowering the entry barrier somewhat.
TensorFlow 2 |tensorflow-2|
---------------------------
With `PyTorch`_ showing clear advantages and gaining in popularity in the Python ML community, `TensorFlow 2`_ was released in 2019 which, like PyTorch, also supported eager execution of the computation graph.
However, because TensorFlow was not an eager framework by design, the eager-mode was very inefficient compared to compiled mode, and therefore was targeted mainly at ease of debugging, rather than a default mode in which TensorFlow should be run.
Without a clear niche, TensorFlow seems to have now focused more on edge and mobile devices, with the introduction of `TensorFlow Lite`_, making it the go-to for industrial enterprise users looking to deploy ML models.
Overall, TensorFlow 2 is a mature language which has been around in some form since 2015, has already needed to reinvent itself a couple of times, and one of the main advantages currently is the very strong bindings for edge and mobile devices via TensorFlow Lite.
Another advantage is the inertia it has with industrial users who adopted TensorFlow in the past years and haven’t transitioned.
DEX Language |dex-language|
---------------------------
Since 2020, the creator of `PyTorch`_ (and major `JAX`_ contributor) `Adam Paszke`_ has stopped working much on either PyTorch and JAX, and has been instead spending his time working on the `Dex language`_, which looks to combine the clarity and safety of high-level functional languages with the efficiency and parallelism of low-level numerical languages, avoiding the need to compose primitive bulk-array operations.
They propose an explicit nested indexing style that mirrors application of functions to arguments.
The goal of the project is to explore: type systems for array programming, mathematical program transformations like differentiation and integration, user-directed compilation to parallel hardware, and interactive and incremental numerical programming and visualization.
It is quite early and still in an experimental phase, but this framework would provide hugely significant fundamental improvements over all existing frameworks if it reaches a mature stage of development.
The language is built on top of `Haskell`_.
| ivy/docs/overview/related_work/frameworks.rst/0 | {
"file_path": "ivy/docs/overview/related_work/frameworks.rst",
"repo_id": "ivy",
"token_count": 5739
} | 5 |
import ast
import astunparse
import inspect
replace_map = {}
def replace_with(new_func):
"""Decorate a function/method/attribute to be replaced by another.
Parameters
----------
new_func
The function that will replace the original.
"""
def decorator(original_func):
if not callable(original_func) or not callable(new_func):
raise TypeError(
f"Both '{original_func.__name__}' and '{new_func.__name__}' should be"
" callable."
)
if inspect.getfullargspec(original_func) != inspect.getfullargspec(new_func):
raise ValueError(
f"Replacement function '{new_func.__name__}' arguments don't match"
f" '{original_func.__name__}' arguments."
)
new_func_name = f"{original_func.__name__}_replacement"
if new_func_name in globals():
raise NameError(
f"Name '{new_func_name}' already exists in global namespace."
)
globals()[new_func_name] = new_func
replace_map[original_func.__name__] = new_func_name
return original_func
return decorator
class ReplaceFunction(ast.NodeTransformer):
"""AST Node Transformer to replace function calls, methods, and
attributes."""
def visit_Attribute(self, node):
if (
isinstance(node.value, ast.Name)
and f"{node.value.id}.{node.attr}" in replace_map
):
return ast.copy_location(
ast.Name(replace_map[f"{node.value.id}.{node.attr}"], node.ctx), node
)
return node
def visit_Call(self, node):
if (
isinstance(node.func, ast.Attribute)
and f"{node.func.value.id}.{node.func.attr}" in replace_map
):
node.func = ast.Name(
replace_map[f"{node.func.value.id}.{node.func.attr}"], node.func.ctx
)
elif isinstance(node.func, ast.Name) and node.func.id in replace_map:
node.func.id = replace_map[node.func.id]
return node
def transform_function(func):
"""Transform the function by replacing its calls based on the
replace_map."""
source = inspect.getsource(func)
tree = ast.parse(source)
transformed_tree = ReplaceFunction().visit(tree)
transformed_code = astunparse.unparse(transformed_tree)
namespace = {}
exec(transformed_code, globals(), namespace)
return namespace[func.__name__]
| ivy/ivy/compiler/replace_with.py/0 | {
"file_path": "ivy/ivy/compiler/replace_with.py",
"repo_id": "ivy",
"token_count": 1100
} | 6 |
# global
import abc
from typing import Optional, Union, Tuple, List, Sequence
from numbers import Number
# local
import ivy
class _ArrayWithElementWiseExperimental(abc.ABC):
def amax(
self: ivy.Array,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.amax. This method simply
wraps the function, and so the docstring for ivy.amax also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued data type.
axis
axis or axes along which maximum values must be computed. By default, the
maximum value must be computed over the entire array. If a tuple of
integers, maximum values must be computed over multiple axes.
Default: ``None``.
keepdims
optional boolean, if ``True``, the reduced axes (dimensions) must be
included in the result as singleton dimensions, and, accordingly, the
result must be compatible with the input array
(see `broadcasting<https://data-apis.org/array-api/latest/
API_specification/broadcasting.html#broadcasting>`_).
Otherwise, if ``False``, the reduced axes (dimensions)
must not be included in the result.
Default: ``False``.
out
optional output array, for writing the result to.
Returns
-------
ret
if the maximum value was computed over the entire array, a zero-dimensional
array containing the maximum value; otherwise, a non-zero-dimensional array
containing the maximum values. The returned array must have the same
data type as ``x``.
Examples
--------
>>> x = ivy.array([3., 4., 5.])
>>> y = x.amax()
>>> print(y)
ivy.array(5.)
>>> x = ivy.array([[-1, 0, 1], [2, 3, 4]])
>>> y = x.amax(axis=1)
>>> print(y)
ivy.array([1, 4])
>>> x = ivy.array([0.1, 1.1, 2.1])
>>> y = ivy.array(0.)
>>> x.amax(out=y)
>>> print(y)
ivy.array(2.1)
"""
return ivy.amax(self._data, axis=axis, keepdims=keepdims, out=out)
def amin(
self: ivy.Array,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.amin. This method simply
wraps the function, and so the docstring for ivy.amin also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued data type.
axis
axis or axes along which minimum values must be computed. By default, the
minimum value must be computed over the entire array. If a tuple of
integers, minimum values must be computed over multiple axes.
Default: ``None``.
keepdims
optional boolean, if ``True``, the reduced axes (dimensions) must be
included in the result as singleton dimensions, and, accordingly, the
result must be compatible with the input array
(see `broadcasting<https://data-apis.org/array-api/latest/
API_specification/broadcasting.html#broadcasting>`_). Otherwise,
if ``False``, the reduced axes (dimensions)
must not be included in the result.
Default: ``False``.
out
optional output array, for writing the result to.
Returns
-------
ret
if the minimum value was computed over the entire array, a zero-dimensional
array containing the minimum value; otherwise, a non-zero-dimensional array
containing the minimum values. The returned array must have the same
data type as ``x``.
Examples
--------
>>> x = ivy.array([3., 4., 5.])
>>> y = x.amin()
>>> print(y)
ivy.array(3.)
>>> x = ivy.array([[-1, 0, 1], [2, 3, 4]])
>>> y = x.amin(axis=1)
>>> print(y)
ivy.array([-1, 2])
>>> x = ivy.array([0.1, 1.1, 2.1])
>>> y = ivy.array(0.)
>>> x.amin(out=y)
>>> print(y)
ivy.array(0.1)
"""
return ivy.amin(self._data, axis=axis, keepdims=keepdims, out=out)
def lgamma(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.lgamma. This method simply
wraps the function, and so the docstring for ivy.lgamma also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the evaluated result for each element in ``self``.
The returned array must have a real-valued floating-point data
type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([1., 2., 3.])
>>> y = x.lgamma()
>>> print(y)
ivy.array([0., 0., 0.69314718])
>>> x = ivy.array([4.5, -4, -5.6])
>>> x.lgamma(out = x)
>>> print(x)
ivy.array([2.45373654, inf, -4.6477685 ])
"""
return ivy.lgamma(self._data, out=out)
def sinc(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.sinc. This method simply
wraps the function, and so the docstring for ivy.sinc also applies to
this method with minimal changes.
Parameters
----------
self
input array whose elements are each expressed in radians. Should have a
floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the sinc of each element in ``self``. The returned
array must have a floating-point data type determined by
:ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([0.5, 1.5, 2.5, 3.5])
>>> y = x.sinc()
>>> print(y)
ivy.array([0.637,-0.212,0.127,-0.0909])
"""
return ivy.sinc(self._data, out=out)
def fmod(
self: ivy.Array,
x2: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.fmod. This method simply
wraps the function, and so the docstring for ivy.fmod also applies to
this method with minimal changes.
Parameters
----------
self
First input array.
x2
Second input array
out
optional output array, for writing the result to.
Returns
-------
ret
Array with element-wise remainder of divisions.
Examples
--------
>>> x1 = ivy.array([2, 3, 4])
>>> x2 = ivy.array([1, 5, 2])
>>> x1.fmod(x2)
ivy.array([ 0, 3, 0])
>>> x1 = ivy.array([ivy.nan, 0, ivy.nan])
>>> x2 = ivy.array([0, ivy.nan, ivy.nan])
>>> x1.fmod(x2)
ivy.array([ nan, nan, nan])
"""
return ivy.fmod(self._data, x2, out=out)
def fmax(
self: ivy.Array,
x2: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.fmax. This method simply
wraps the function, and so the docstring for ivy.fmax also applies to
this method with minimal changes.
Parameters
----------
self
First input array.
x2
Second input array
out
optional output array, for writing the result to.
Returns
-------
ret
Array with element-wise maximums.
Examples
--------
>>> x1 = ivy.array([2, 3, 4])
>>> x2 = ivy.array([1, 5, 2])
>>> ivy.fmax(x1, x2)
ivy.array([ 2., 5., 4.])
>>> x1 = ivy.array([ivy.nan, 0, ivy.nan])
>>> x2 = ivy.array([0, ivy.nan, ivy.nan])
>>> x1.fmax(x2)
ivy.array([ 0, 0, nan])
"""
return ivy.fmax(self._data, x2, out=out)
def float_power(
self: Union[ivy.Array, float, list, tuple],
x2: Union[ivy.Array, float, list, tuple],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.float_power. This method
simply wraps the function, and so the docstring for ivy.float_power
also applies to this method with minimal changes.
Parameters
----------
self
Array-like with elements to raise in power.
x2
Array-like of exponents. If x1.shape != x2.shape,
they must be broadcastable to a common shape
(which becomes the shape of the output).
out
optional output array, for writing the result to.
Returns
-------
ret
The bases in x1 raised to the exponents in x2.
This is a scalar if both x1 and x2 are scalars
Examples
--------
>>> x1 = ivy.array([1, 2, 3, 4, 5])
>>> x1.float_power(3)
ivy.array([1., 8., 27., 64., 125.])
>>> x1 = ivy.array([1, 2, 3, 4, 5])
>>> x2 = ivy.array([2, 3, 3, 2, 1])
>>> x1.float_power(x2)
ivy.array([1., 8., 27., 16., 5.])
"""
return ivy.float_power(self._data, x2, out=out)
def copysign(
self: Union[ivy.Array, ivy.NativeArray, Number],
x2: Union[ivy.Array, ivy.NativeArray, Number],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.copysign. This method
simply wraps the function, and so the docstring for ivy.copysign also
applies to this method with minimal changes.
Parameters
----------
x1
Array or scalar to change the sign of
x2
Array or scalar from which the new signs are applied
Unsigned zeroes are considered positive.
out
optional output array, for writing the result to.
Returns
-------
ret
x1 with the signs of x2.
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> x1 = ivy.array([0, 1, 2, 3])
>>> x2 = ivy.array([-1, 1, -2, 2])
>>> x1.copysign(x2)
ivy.array([-0., 1., -2., 3.])
>>> x2.copysign(-1)
ivy.array([-1., -1., -2., -2.])
"""
return ivy.copysign(self._data, x2, out=out)
def count_nonzero(
self: ivy.Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.count_nonzero. This method
simply wraps the function, and so the docstring for ivy.count_nonzero
also applies to this method with minimal changes.
Parameters
----------
self
input array for which to count non-zeros.
axis
optional axis or tuple of axes along which to count non-zeros. Default is
None, meaning that non-zeros will be counted along a flattened
version of the input array.
keepdims
optional, if this is set to True, the axes that are counted are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
dtype
optional output dtype. Default is of type integer.
out
optional output array, for writing the result to.
Returns
-------
ret
Number of non-zero values in the array along a given axis. Otherwise,
the total number of non-zero values in the array is returned.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> x.count_nonzero()
ivy.array(3)
>>> x = ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x.count_nonzero(axis=0)
ivy.array([[1, 2],
[2, 2]])
>>> x = ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x.count_nonzero(axis=(0,1), keepdims=True)
ivy.array([[[3, 4]]])
"""
return ivy.count_nonzero(
self._data, axis=axis, keepdims=keepdims, dtype=dtype, out=out
)
def nansum(
self: ivy.Array,
/,
*,
axis: Optional[Union[tuple, int]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
keepdims: bool = False,
out: Optional[ivy.Container] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.nansum. This method simply
wraps the function, and so the docstring for ivy.nansum also applies to
this method with minimal changes.
Parameters
----------
self
Input array.
axis
Axis or axes along which the sum is computed.
The default is to compute the sum of the flattened array.
dtype
The type of the returned array and of the accumulator in
which the elements are summed. By default, the dtype of input is used.
keepdims
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
A new array holding the result is returned unless out is specified,
in which it is returned.
Examples
--------
>>> a = ivy.array([[ 2.1, 3.4, ivy.nan], [ivy.nan, 2.4, 2.1]])
>>> ivy.nansum(a)
10.0
>>> ivy.nansum(a, axis=0)
ivy.array([2.1, 5.8, 2.1])
>>> ivy.nansum(a, axis=1)
ivy.array([5.5, 4.5])
"""
return ivy.nansum(
self._data, axis=axis, dtype=dtype, keepdims=keepdims, out=out
)
def isclose(
self: ivy.Array,
b: ivy.Array,
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.isclose. This method simply
wraps the function, and so the docstring for ivy.isclose also applies
to this method with minimal changes.
Parameters
----------
self
First input array.
b
Second input array.
rtol
The relative tolerance parameter.
atol
The absolute tolerance parameter.
equal_nan
Whether to compare NaN's as equal. If True, NaN's in a will be
considered equal to NaN's in b in the output array.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
A new array holding the result is returned unless out is specified,
in which it is returned.
Examples
--------
>>> a = ivy.array([[ 2.1, 3.4, ivy.nan], [ivy.nan, 2.4, 2.1]])
>>> b = ivy.array([[ 2.1, 3.4, ivy.nan], [ivy.nan, 2.4, 2.1]])
>>> a.isclose(b)
ivy.array([[True, True, False],
[False, True, True]])
>>> a.isclose(b, equal_nan=True)
ivy.array([[True, True, True],
[True, True, True]])
>>> a=ivy.array([1.0, 2.0])
>>> b=ivy.array([1.0, 2.001])
>>> a.isclose(b, atol=0.0)
ivy.array([True, False])
>>> a.isclose(b, rtol=0.01, atol=0.0)
ivy.array([True, True])
"""
return ivy.isclose(
self._data, b, rtol=rtol, atol=atol, equal_nan=equal_nan, out=out
)
def signbit(
self: Union[ivy.Array, float, int, list, tuple],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.signbit. This method simply
wraps the function, and so the docstring for ivy.signbit also applies
to this method with minimal changes.
Parameters
----------
self
Array-like input.
out
optional output array, for writing the result to.
Returns
-------
ret
Element-wise signbit of x.
Examples
--------
>>> x = ivy.array([1, -2, 3])
>>> x.signbit()
ivy.array([False, True, False])
"""
return ivy.signbit(self._data, out=out)
def hypot(
self: ivy.Array,
x2: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.hypot. This method simply
wraps the function, and so the docstring for ivy.hypot also applies to
this method with minimal changes.
Parameters
----------
self
First input array
x2
Second input array
out
Optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
An array containing the hypotenuse computed from each element of the
input arrays.
Examples
--------
>>> x = ivy.array([3.0, 4.0, 5.0])
>>> y = ivy.array([4.0, 5.0, 6.0])
>>> x.hypot(y)
ivy.array([5.0, 6.4031, 7.8102])
"""
return ivy.hypot(self._data, x2, out=out)
def allclose(
self: ivy.Array,
x2: ivy.Array,
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[ivy.Container] = None,
) -> bool:
"""ivy.Array instance method variant of ivy.allclose. This method
simply wraps the function, and so the docstring for ivy.allclose also
applies to this method with minimal changes.
Parameters
----------
self
First input array.
x2
Second input array.
rtol
The relative tolerance parameter.
atol
The absolute tolerance parameter.
equal_nan
Whether to compare NaN's as equal. If True, NaN's in a will be
considered equal to NaN's in b in the output array.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
Returns True if the two arrays are equal within the given tolerance;
False otherwise.
Examples
--------
>>> x1 = ivy.array([1e10, 1e-7])
>>> x2 = ivy.array([1.00001e10, 1e-8])
>>> y = x1.allclose(x2)
>>> print(y)
ivy.array(False)
>>> x1 = ivy.array([1.0, ivy.nan])
>>> x2 = ivy.array([1.0, ivy.nan])
>>> y = x1.allclose(x2, equal_nan=True)
>>> print(y)
ivy.array(True)
>>> x1 = ivy.array([1e-10, 1e-10])
>>> x2 = ivy.array([1.00001e-10, 1e-10])
>>> y = x1.allclose(x2, rtol=0.005, atol=0.0)
>>> print(y)
ivy.array(True)
"""
return ivy.allclose(
self._data, x2, rtol=rtol, atol=atol, equal_nan=equal_nan, out=out
)
def diff(
self: ivy.Array,
/,
*,
n: int = 1,
axis: int = -1,
prepend: Optional[Union[ivy.Array, ivy.NativeArray, int, list, tuple]] = None,
append: Optional[Union[ivy.Array, ivy.NativeArray, int, list, tuple]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.diff. This method simply
wraps the function, and so the docstring for ivy.diff also applies to
this method with minimal changes.
Parameters
----------
self
array-like input.
n
The number of times values are differenced. If zero, the input is returned
as-is.
axis
The axis along which the difference is taken, default is the last axis.
prepend,append
Values to prepend/append to x along given axis prior to performing the
difference. Scalar values are expanded to arrays with length 1 in the
direction of axis and the shape of the input array in along all other
axes. Otherwise the dimension and shape must match x except along axis.
out
optional output array, for writing the result to.
Returns
-------
ret
Returns the n-th discrete difference along the given axis.
Examples
--------
>>> x = ivy.array([1, 2, 4, 7, 0])
>>> x.diff()
ivy.array([ 1, 2, 3, -7])
"""
return ivy.diff(
self._data, n=n, axis=axis, prepend=prepend, append=append, out=out
)
def fix(
self: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.fix. This method simply
wraps the function, and so the docstring for ivy.fix also applies to
this method with minimal changes.
Parameters
----------
self
Array input.
out
optional output array, for writing the result to.
Returns
-------
ret
Array of floats with elements corresponding to input elements
rounded to nearest integer towards zero, element-wise.
Examples
--------
>>> x = ivy.array([2.1, 2.9, -2.1])
>>> x.fix()
ivy.array([ 2., 2., -2.])
"""
return ivy.fix(self._data, out=out)
def nextafter(
self: ivy.Array,
x2: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.nextafter. This method
simply wraps the function, and so the docstring for ivy.nextafter also
applies to this method with minimal changes.
Parameters
----------
self
First input array.
x2
Second input array.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
The next representable values of x1 in the direction of x2.
Examples
--------
>>> x1 = ivy.array([1.0e-50, 2.0e+50])
>>> x2 = ivy.array([2.0, 1.0])
>>> x1.nextafter(x2)
ivy.array([1.4013e-45., 3.4028e+38])
"""
return ivy.nextafter(self._data, x2, out=out)
def zeta(
self: ivy.Array,
q: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.zeta. This method simply
wraps the function, and so the docstring for ivy.zeta also applies to
this method with minimal changes.
Parameters
----------
self
First input array.
q
Second input array.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
Array with values computed from zeta function from
input arrays' values.
Examples
--------
>>> x = ivy.array([5.0, 3.0])
>>> q = ivy.array([2.0])
>>> x.zeta(q)
ivy.array([0.0369, 0.2021])
"""
return ivy.zeta(self._data, q, out=out)
def gradient(
self: Union[ivy.Array, ivy.NativeArray],
/,
*,
spacing: Union[int, list, tuple] = 1,
edge_order: int = 1,
axis: Optional[Union[int, list, tuple]] = None,
) -> Union[ivy.Array, List[ivy.Array]]:
"""Calculate gradient of x with respect to (w.r.t.) spacing.
Parameters
----------
self
input array representing outcomes of the function
spacing
if not given, indices of x will be used
if scalar indices of x will be scaled with this value
if array gradient of x w.r.t. spacing
edge_order
1 or 2, for 'first order' and 'second order' estimation
of boundary values of gradient respectively.
Note: jax supports edge_order=1 case only
axis
dimension(s) to approximate the gradient over
by default partial gradient is computed in every dimension
Returns
-------
ret
Array with values computed from gradient function from
inputs
Examples
--------
>>> spacing = (ivy.array([-2., -1., 1., 4.]),)
>>> x = ivy.array([4., 1., 1., 16.], )
>>> ivy.gradient(x, spacing=spacing)
ivy.array([-3., -2., 2., 5.])
>>> x = ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> ivy.gradient(x)
[ivy.array([[ 9., 18., 36., 72.],
[ 9., 18., 36., 72.]]), ivy.array([[ 1. , 1.5, 3. , 4. ],
[10. , 15. , 30. , 40. ]])]
>>> x = ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> ivy.gradient(x, spacing=2.0)
[ivy.array([[ 4.5, 9. , 18. , 36. ],
[ 4.5, 9. , 18. , 36. ]]), ivy.array([[ 0.5 , 0.75, 1.5 , 2. ],
[ 5. , 7.5 , 15. , 20. ]])]
>>> x = ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> ivy.gradient(x, axis=1)
ivy.array([[ 1. , 1.5, 3. , 4. ],
[10. , 15. , 30. , 40. ]])
>>> x = ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> ivy.gradient(x, spacing=[3., 2.])
[ivy.array([[ 3., 6., 12., 24.],
[ 3., 6., 12., 24.]]), ivy.array([[ 0.5 , 0.75, 1.5 , 2. ],
[ 5. , 7.5 , 15. , 20. ]])]
>>> spacing = (ivy.array([0, 2]), ivy.array([0, 3, 6, 9]))
>>> ivy.gradient(x, spacing=spacing)
[ivy.array([[ 4.5, 9. , 18. , 36. ],
[ 4.5, 9. , 18. , 36. ]]), ivy.array([[ 0.33333333, 0.5, 1., 1.33333333],
[ 3.33333333, 5. , 10. , 13.33333333]])]
"""
return ivy.gradient(
self._data, spacing=spacing, axis=axis, edge_order=edge_order
)
def xlogy(
self: ivy.Array,
y: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.xlogy. This method simply
wraps the function, and so the docstring for ivy.xlogy also applies to
this method with minimal changes.
Parameters
----------
self
First input array.
y
Second input array.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
The next representable values of x1 in the direction of x2.
Examples
--------
>>> x = ivy.zeros(3)
>>> y = ivy.array([-1.0, 0.0, 1.0])
>>> x.xlogy(y)
ivy.array([0.0, 0.0, 0.0])
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.array([3.0, 2.0, 1.0])
>>> x.xlogy(y)
ivy.array([1.0986, 1.3863, 0.0000])
"""
return ivy.xlogy(self._data, y, out=out)
def binarizer(
self: ivy.Array, /, *, threshold: float = 0, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""Map the values of the input tensor to either 0 or 1, element-wise,
based on the outcome of a comparison against a threshold value.
Parameters
----------
self
Data to be binarized
threshold
Values greater than this are
mapped to 1, others to 0.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
Binarized output data
"""
return ivy.binarizer(self._data, threshold=threshold, out=out)
def conj(self: ivy.Array, /, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.conj. This method simply
wraps the function, and so the docstring for ivy.conj also applies to
this method with minimal changes.
Parameters
----------
self
input array.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
an array containing the complex conjugates of values in the input array,
with the same dtype as the input array.
Examples
--------
>>> x = ivy.array([4+3j, 6+2j, 1-6j])
>>> x.conj()
ivy.array([4-3j, 6-2j, 1+6j])
"""
return ivy.conj(self._data, out=out)
def lerp(
self: ivy.Array,
end: ivy.Array,
weight: Union[ivy.Array, float],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.lerp. This method simply
wraps the function, and so the docstring for ivy.lerp also applies to
this method with minimal changes.
Parameters
----------
self
Array of starting points
end
Array of ending points
weight
Weight for the interpolation formula , array or scalar.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
The linear interpolation between array self and array end based on
scalar or array weight
self + ((end - self) * weight)
Examples
--------
>>> x = ivy.array([1.0, 2.0, 3.0, 4.0])
>>> end = ivy.array([10.0, 10.0, 10.0, 10.0])
>>> weight = 0.5
>>> x.lerp(end, weight)
ivy.array([5.5, 6. , 6.5, 7. ])
"""
return ivy.lerp(self, end, weight, out=out)
def ldexp(
self: ivy.Array,
x2: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.ldexp. This method simply
wraps the function, and so the docstring for ivy.ldexp also applies to
this method with minimal changes.
Parameters
----------
self
Input array.
x2
The array of exponents.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
The next representable values of x1 in the direction of x2.
Examples
--------
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.array([3.0, 2.0, 1.0])
>>> x.ldexp(y)
ivy.array([8.0, 8.0, 6.0])
"""
return ivy.ldexp(self._data, x2, out=out)
def frexp(
self: ivy.Array, /, *, out: Optional[Tuple[ivy.Array, ivy.Array]] = None
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.frexp. This method simply
wraps the function, and so the docstring for ivy.frexp also applies to
this method with minimal changes.
Parameters
----------
self
Input array.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
The next representable values of x1 in the direction of x2.
Examples
--------
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> x.frexp()
ivy.array([[0.5, 0.5, 0.75], [1, 2, 2]])
"""
return ivy.frexp(self._data, out=out)
def modf(
self: ivy.Array, /, *, out: Optional[Tuple[ivy.Array, ivy.Array]] = None
) -> Tuple[ivy.Array, ivy.Array]:
"""ivy.Array instance method variant of ivy.modf. This method simply
wraps the function, and so the docstring for ivy.modf also applies to
this method with minimal changes.
Parameters
----------
self
Input array.
out
Alternate output arrays in which to place the result.
The default is None.
Returns
-------
ret
The fractional and integral parts of the input array.
Examples
--------
>>> x = ivy.array([1.5, 2.7, 3.9])
>>> x.modf()
(ivy.array([0.5, 0.7, 0.9]), ivy.array([1, 2, 3]))
"""
return ivy.modf(self._data, out=out)
def digamma(
self: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.digamma. This method simply
wraps the function, and so the docstring for ivy.digamma also applies
to this method with minimal changes.
Note
----
The Ivy version only accepts real-valued inputs.
Parameters
----------
self
Input array.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
Array with values computed from digamma function from
input arrays' values, element-wise.
Examples
--------
>>> x = ivy.array([.9, 3, 3.2])
>>> y = ivy.digamma(x)
ivy.array([-0.7549271 0.92278427 0.9988394])
"""
return ivy.digamma(self._data, out=out)
def sparsify_tensor(
self: ivy.Array,
card: int,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array class method variant of ivy.sparsify_tensor. This method
simply wraps the function, and so the docstring for ivy.sparsify_tensor
also applies to this method with minimal changes.
Parameters
----------
self : array
The tensor to sparsify.
card : int
The number of values to keep.
out : array, optional
Optional output array, for writing the result to.
Returns
-------
ret : array
The sparsified tensor.
Examples
--------
>>> x = ivy.arange(100)
>>> x = ivy.reshape(x, (10, 10))
>>> x.sparsify_tensor(10)
ivy.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[90, 91, 92, 93, 94, 95, 96, 97, 98, 99]])
"""
return ivy.sparsify_tensor(self._data, card, out=out)
def erfc(
self: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.erfc. This method simply
wraps the function, and so the docstring for ivy.erfc also applies to
this method with minimal changes.
Parameters
----------
self
Input array with real or complex valued argument.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
Values of the complementary error function.
Examples
--------
>>> x = ivy.array([0, -1., 10.])
>>> x.erfc()
ivy.array([1.00000000e+00, 1.84270084e+00, 2.80259693e-45])
"""
return ivy.erfc(self._data, out=out)
def erfinv(
self: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.erfinv. This method simply
wraps the function, and so the docstring for ivy.erfinv also applies to
this method with minimal changes.
Parameters
----------
self
Input array with real or complex valued argument.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
Values of the inverse error function.
Examples
--------
>>> x = ivy.array([0, -1., 10.])
>>> x.erfinv()
ivy.array([1.00000000e+00, 1.84270084e+00, 2.80259693e-45])
"""
return ivy.erfinv(self._data, out=out)
| ivy/ivy/data_classes/array/experimental/elementwise.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/elementwise.py",
"repo_id": "ivy",
"token_count": 18642
} | 7 |
# global
import abc
from typing import Union, Optional
# local
import ivy
# ToDo: implement all methods here as public instance methods
class _ArrayWithGradients(abc.ABC):
def stop_gradient(
self: ivy.Array,
/,
*,
preserve_type: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.stop_gradient. This method
simply wraps the function, and so the docstring for ivy.stop_gradient
also applies to this method with minimal changes.
Parameters
----------
self
Array for which to stop the gradient.
preserve_type
Whether to preserve gradient computation on ivy.Array instances. Default is
True.
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
The same array x, but with no gradient information.
Examples
--------
>>> x = ivy.array([1., 2., 3.])
>>> y = x.stop_gradient(preserve_type=True)
>>> print(y)
ivy.array([1., 2., 3.])
"""
return ivy.stop_gradient(self, preserve_type=preserve_type, out=out)
def adam_step(
self: ivy.Array,
mw: Union[ivy.Array, ivy.NativeArray],
vw: Union[ivy.Array, ivy.NativeArray],
step: Union[int, float],
/,
*,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-7,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.adam_step. This method
simply wraps the function, and so the docstring for ivy.adam_step also
applies to this method with minimal changes.
Parameters
----------
self
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
mw
running average of the gradients.
vw
running average of second moments of the gradients.
step
training step.
beta1
gradient forgetting factor (Default value = 0.9).
beta2
second moment of gradient forgetting factor (Default value = 0.999).
epsilon
divisor during adam update, preventing division by zero
(Default value = 1e-7).
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The adam step delta.
Examples
--------
With :class:`ivy.Array` inputs:
>>> dcdw = ivy.array([1, 2, 3])
>>> mw = ivy.ones(3)
>>> vw = ivy.ones(1)
>>> step = ivy.array(3)
>>> adam_step_delta = dcdw.adam_step(mw, vw, step)
>>> print(adam_step_delta)
(ivy.array([0.2020105,0.22187898,0.24144873]),
ivy.array([1.,1.10000002,1.20000005]),
ivy.array([1.,1.00300002,1.00800002]))
"""
return ivy.adam_step(
self, mw, vw, step, beta1=beta1, beta2=beta2, epsilon=epsilon, out=out
)
def optimizer_update(
self: ivy.Array,
effective_grad: Union[ivy.Array, ivy.NativeArray],
lr: Union[float, ivy.Array, ivy.NativeArray],
/,
*,
stop_gradients: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.optimizer_update. This
method simply wraps the function, and so the docstring for
ivy.optimizer_update also applies to this method with minimal changes.
Parameters
----------
self
Weights of the function to be updated.
effective_grad
Effective gradients of the cost c with respect to the weights ws,
[dc/dw for w in ws].
lr
Learning rate(s), the rate(s) at which the weights should be updated
relative to the gradient.
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The new function weights ws_new, following the optimizer updates.
Examples
--------
>>> w = ivy.array([1., 2., 3.])
>>> effective_grad = ivy.zeros(3)
>>> lr = 3e-4
>>> ws_new = w.optimizer_update(effective_grad, lr)
>>> print(ws_new)
ivy.array([1., 2., 3.])
"""
return ivy.optimizer_update(
self, effective_grad, lr, stop_gradients=stop_gradients, out=out
)
def gradient_descent_update(
self: ivy.Array,
dcdw: Union[ivy.Array, ivy.NativeArray],
lr: Union[float, ivy.Array, ivy.NativeArray],
/,
*,
stop_gradients: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.gradient_descent_update.
This method simply wraps the function, and so the docstring for
ivy.gradient_descent_update also applies to this method with minimal
changes.
Parameters
----------
self
Weights of the function to be updated.
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
lr
Learning rate(s), the rate(s) at which the weights should be
updated relative to the gradient.
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The new weights, following the gradient descent updates.
Examples
--------
With :class:`ivy.Array` inputs:
>>> w = ivy.array([[1., 2, 3],
... [4, 6, 1],
... [1, 0, 7]])
>>> dcdw = ivy.array([[0.5, 0.2, 0.1],
... [0.3, 0.6, 0.4],
... [0.4, 0.7, 0.2]])
>>> lr = ivy.array(0.1)
>>> new_weights = w.gradient_descent_update(dcdw, lr, stop_gradients = True)
>>> print(new_weights)
ivy.array([[ 0.95, 1.98, 2.99],
... [ 3.97, 5.94, 0.96],
... [ 0.96, -0.07, 6.98]])
"""
return ivy.gradient_descent_update(
self, dcdw, lr, stop_gradients=stop_gradients, out=out
)
def lars_update(
self: ivy.Array,
dcdw: Union[ivy.Array, ivy.NativeArray],
lr: Union[float, ivy.Array, ivy.NativeArray],
/,
*,
decay_lambda: float = 0,
stop_gradients: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.lars_update. This method
simply wraps the function, and so the docstring for ivy.lars_update
also applies to this method with minimal changes.
Parameters
----------
self
Weights of the function to be updated.
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
lr
Learning rate, the rate at which the weights should be updated relative to
the gradient.
decay_lambda
The factor used for weight decay. Default is zero.
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The new function weights ws_new, following the LARS updates.
Examples
--------
With :class:`ivy.Array` inputs:
>>> w = ivy.array([[3., 1, 5],
... [7, 2, 9]])
>>> dcdw = ivy.array([[0.3, 0.1, 0.2],
... [0.1, 0.2, 0.4]])
>>> lr = ivy.array(0.1)
>>> new_weights = w.lars_update(dcdw, lr, stop_gradients = True)
>>> print(new_weights)
ivy.array([[2.34077978, 0.78025991, 4.56051969],
... [6.78026009, 1.56051981, 8.12103939]])
"""
return ivy.lars_update(
self,
dcdw,
lr,
decay_lambda=decay_lambda,
stop_gradients=stop_gradients,
out=out,
)
def adam_update(
self: ivy.Array,
dcdw: Union[ivy.Array, ivy.NativeArray],
lr: Union[float, ivy.Array, ivy.NativeArray],
mw_tm1: Union[ivy.Array, ivy.NativeArray],
vw_tm1: Union[ivy.Array, ivy.NativeArray],
step: int,
/,
*,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-7,
stop_gradients: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.adam_update. This method
simply wraps the function, and so the docstring for ivy.adam_update
also applies to this method with minimal changes.
Parameters
----------
self
Weights of the function to be updated.
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
lr
Learning rate(s), the rate(s) at which the weights should be updated
relative to the gradient.
mw_tm1
running average of the gradients, from the previous time-step.
vw_tm1
running average of second moments of the gradients, from the previous
time-step.
step
training step.
beta1
gradient forgetting factor (Default value = 0.9).
beta2
second moment of gradient forgetting factor (Default value = 0.999).
epsilon
divisor during adam update, preventing division by zero
(Default value = 1e-7).
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The new function weights ws_new, and also new mw and vw, following the adam
updates.
Examples
--------
With :class:`ivy.Array` inputs:
>>> w = ivy.array([1., 2, 3.])
>>> dcdw = ivy.array([0.2,0.1,0.3])
>>> lr = ivy.array(0.1)
>>> vw_tm1 = ivy.zeros(1)
>>> mw_tm1 = ivy.zeros(3)
>>> step = 2
>>> updated_weights = w.adam_update(dcdw, lr, mw_tm1, vw_tm1, step)
>>> print(updated_weights)
(ivy.array([0.92558753, 1.92558873, 2.92558718]),
ivy.array([0.02, 0.01, 0.03]),
ivy.array([4.00000063e-05, 1.00000016e-05, 9.00000086e-05]))
"""
return ivy.adam_update(
self,
dcdw,
lr,
mw_tm1,
vw_tm1,
step,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
stop_gradients=stop_gradients,
out=out,
)
def lamb_update(
self: ivy.Array,
dcdw: Union[ivy.Array, ivy.NativeArray],
lr: Union[float, ivy.Array, ivy.NativeArray],
mw_tm1: Union[ivy.Array, ivy.NativeArray],
vw_tm1: Union[ivy.Array, ivy.NativeArray],
step: int,
/,
*,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-7,
max_trust_ratio: Union[int, float] = 10,
decay_lambda: float = 0,
stop_gradients: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.lamb_update. This method
simply wraps the function, and so the docstring for ivy.lamb_update
also applies to this method with minimal changes.
Parameters
----------
self
Weights of the function to be updated.
dcdw
Derivates of the cost c with respect to the weights ws, [dc/dw for w in ws].
lr
Learning rate(s), the rate(s) at which the weights should be updated
relative to the gradient.
mw_tm1
running average of the gradients, from the previous time-step.
vw_tm1
running average of second moments of the gradients, from the previous
time-step.
step
training step.
beta1
gradient forgetting factor (Default value = 0.9).
beta2
second moment of gradient forgetting factor (Default value = 0.999).
epsilon
divisor during adam update, preventing division by zero
(Default value = 1e-7).
max_trust_ratio
The maximum value for the trust ratio. Default is 10.
decay_lambda
The factor used for weight decay. Default is zero.
stop_gradients
Whether to stop the gradients of the variables after each gradient step.
Default is ``True``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The new function weights ws_new, following the LAMB updates.
Examples
--------
With :class:`ivy.Array` inputs:
>>> w = ivy.array([1., 2, 3])
>>> dcdw = ivy.array([0.5,0.2,0.1])
>>> lr = ivy.array(0.1)
>>> vw_tm1 = ivy.zeros(1)
>>> mw_tm1 = ivy.zeros(3)
>>> step = ivy.array(1)
>>> new_weights = w.lamb_update(dcdw, lr, mw_tm1, vw_tm1, step)
>>> print(new_weights)
(ivy.array([0.784, 1.78 , 2.78 ]),
ivy.array([0.05, 0.02, 0.01]),
ivy.array([2.5e-04, 4.0e-05, 1.0e-05]))
"""
return ivy.lamb_update(
self,
dcdw,
lr,
mw_tm1,
vw_tm1,
step,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
max_trust_ratio=max_trust_ratio,
decay_lambda=decay_lambda,
stop_gradients=stop_gradients,
out=out,
)
| ivy/ivy/data_classes/array/gradients.py/0 | {
"file_path": "ivy/ivy/data_classes/array/gradients.py",
"repo_id": "ivy",
"token_count": 7366
} | 8 |
"""Base Container Object."""
# global
import inspect
from itertools import chain
import re
import abc
import copy
import termcolor
import numpy as np
import json
from ivy.utils.exceptions import IvyBackendException, IvyException
try:
# noinspection PyPackageRequirements
import h5py
except ModuleNotFoundError:
h5py = None
import pickle
import random
from operator import mul
from functools import reduce as _reduce
from typing import Union, Tuple
from builtins import set
# local
import ivy
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
def _is_jsonable(x):
try:
json.dumps(x)
return True
except (TypeError, OverflowError):
return False
def _repr(x):
try:
return x.__repr__()
except TypeError:
return str(x)
# noinspection PyMissingConstructor
class ContainerBase(dict, abc.ABC):
def __init__(
self,
dict_in=None,
queues=None,
queue_load_sizes=None,
container_combine_method="list_join",
queue_timeout=None,
print_limit=10,
key_length_limit=None,
print_indent=4,
print_line_spacing=0,
ivyh=None,
default_key_color="green",
keyword_color_dict=None,
rebuild_child_containers=False,
types_to_iteratively_nest=None,
alphabetical_keys=True,
dynamic_backend=None,
build_callable=False,
**kwargs,
):
"""Initialize container object from input dict representation.
Parameters
----------
dict_in
the dictionary the container should wrap around. Default is ``None``.
queues
Sequence of multiprocessing queues, each of which returns containers.
This enables the current container to be passed around asynchronously while
waiting for data. Default is ``None``.
queue_load_sizes
Size of leading dimension of the containers returned by each queue.
Default is ``None``.
container_combine_method
The method to use for combining containers arriving from different queues.
Default is ivy.Container.cont_list_join
queue_timeout
The timeout when waiting for containers to arrive from the queues.
Default is global.
print_limit
The total array size limit when printing the container. Default is 10.
key_length_limit
The maximum key length when printing the container. Default is ``None``.
print_indent
The number of whitespaces to use for indenting when printing the container.
Default is 4.
print_line_spacing
The number of extra newlines to use between keys when printing the
container. Default is ``0``.
ivyh
Handle to ivy module to use for the calculations. Default is ``None``, which
results in the global ivy.
default_key_color
The default key color for printing the container to the terminal.
Default is 'green'.
keyword_color_dict
A dict mapping keywords to their termcolor color codes for printing the
container. (Default value = None)
rebuild_child_containers
Whether to rebuild container found in dict_in with these constructor params.
Default is ``False``, in which case the original container are kept as are.
build_callable
Whether to treat functions encountered at leaf nodes as further instructions
to build the container
types_to_iteratively_nest
The data types to nest iteratively in the dict structure, each type must be
iterable. Default is ``None``.
alphabetical_keys
Whether to sort the container keys alphabetically, or preserve the dict
order. Default is ``True``.
kwargs
keyword arguments for dict creation. Default is ``None``.
"""
self._queues = queues
self._container_combine_method = container_combine_method
if ivy.exists(self._queues):
if isinstance(self._container_combine_method, str):
self._container_combine_method = {
"list_join": self.cont_list_join,
"concat": lambda conts: self.concat(conts, 0),
}[self._container_combine_method]
self._loaded_containers_from_queues = {}
self._queue_load_sizes_cum = np.cumsum(queue_load_sizes)
self._queue_timeout = ivy.default(queue_timeout, ivy.queue_timeout)
if dynamic_backend is not None:
self._dynamic_backend = dynamic_backend
else:
self._dynamic_backend = ivy.dynamic_backend
if dict_in is None:
if kwargs:
dict_in = dict(**kwargs)
else:
dict_in = {}
elif kwargs:
raise ivy.utils.exceptions.IvyException(
"dict_in and **kwargs cannot both be specified for ivy.Container "
"constructor, please specify one or the other, not both."
)
self._config_in = {
"print_limit": print_limit,
"print_indent": print_indent,
"key_length_limit": key_length_limit,
"print_line_spacing": print_line_spacing,
"ivyh": ivyh,
"default_key_color": default_key_color,
"keyword_color_dict": keyword_color_dict,
"rebuild_child_containers": rebuild_child_containers,
"build_callable": build_callable,
"types_to_iteratively_nest": types_to_iteratively_nest,
"alphabetical_keys": alphabetical_keys,
}
self._config = {}
self.cont_inplace_update(dict_in, **self._config_in)
# Class Methods #
# --------------#
@staticmethod
def cont_multi_map_in_function(
fn,
*args,
key_chains=None,
to_apply=True,
prune_unapplied=False,
map_sequences=None,
out=None,
**kwargs,
) -> Union[Tuple[ivy.Container, ivy.Container], ivy.Container]:
inspect_fn = fn
if isinstance(fn, str):
inspect_fn = ivy.__dict__[fn]
# retrieve indices where leaves of args are also nested
arg_cont_idxs = ivy.nested_argwhere(
args, ivy.is_ivy_container, to_ignore=ivy.Container
)
# retrieve indices where leaves of kwargs are also nested
kwarg_cont_idxs = ivy.nested_argwhere(
kwargs, ivy.is_ivy_container, to_ignore=ivy.Container
)
# retrieve all the containers in args
arg_conts = ivy.multi_index_nest(args, arg_cont_idxs)
num_arg_conts = len(arg_conts)
# retrieve all the containers in kwargs
kwarg_conts = ivy.multi_index_nest(kwargs, kwarg_cont_idxs)
# Combine the retrieved containers from args and kwargs into a single list
with_out = (
inspect.signature(inspect_fn).parameters.get("out") is not None
and out is not None
)
if with_out:
out_conts = [out]
num_out_conts = 1
out_cont_idxs = []
if not ivy.is_array(out) and not ivy.is_ivy_container(out):
out_cont_idxs = ivy.nested_argwhere(
out, ivy.is_ivy_container, to_ignore=ivy.Container
)
out_conts = ivy.multi_index_nest(out, out_cont_idxs)
num_out_conts = len(out_conts)
conts = arg_conts + kwarg_conts + out_conts
else:
conts = arg_conts + kwarg_conts
ivy.utils.assertions.check_exists(
conts, message="no containers found in arguments"
)
cont0 = conts[0]
if isinstance(fn, str):
fn = cont0.cont_ivy.__dict__[fn]
# Get the function with the name fn_name, enabling containers to specify
# their backends irrespective of global ivy's backend
def map_fn(vals, _):
if with_out:
out = vals[-num_out_conts:]
del vals[-num_out_conts:]
arg_vals = vals[:num_arg_conts]
a = ivy.copy_nest(args)
a = ivy.set_nest_at_indices(a, arg_cont_idxs, arg_vals)
kwarg_vals = vals[num_arg_conts:]
kw = ivy.copy_nest(kwargs)
kw = ivy.set_nest_at_indices(kw, kwarg_cont_idxs, kwarg_vals)
if with_out:
out = out[0] if len(out) == 1 else out
return fn(*a, out=out, **kw)
else:
return fn(*a, **kw)
# Replace each container in arg and kwarg with the arrays at the leaf
# levels of that container using map_fn and call fn using those arrays
# as inputs
ret = ivy.Container.cont_multi_map(
map_fn,
conts,
key_chains,
to_apply,
prune_unapplied,
map_nests=map_sequences,
)
# Multiple containers for functions returning multiple arrays
if ivy.is_ivy_container(ret):
for values in ret.values():
if isinstance(values, (tuple, list)):
for v in values:
if ivy.is_ivy_array(v):
return ret.cont_unstack_conts(0)
if with_out:
for out_cont_idx, out_cont in zip(out_cont_idxs, out_conts):
out_cont.cont_inplace_update(ivy.index_nest(ret, out_cont_idx))
if len(out_conts) == 1:
out.cont_inplace_update(ret)
ret = out
return ret
@staticmethod
def cont_handle_inplace(ret, out):
"""Return an inplace update of out, provided it is not None, by
updating with the values in ret.
Parameters
----------
ret
The container with the return values
out
The optional out container, which is primed for being overwritten if it
exists
Returns
-------
The out container, but filled with the values from the ret container
"""
if ivy.exists(out):
out.cont_inplace_update(ret)
ret = out
return ret
@staticmethod
def cont_list_join(containers, config=None):
"""Join containers of lists together along the specified dimension.
Parameters
----------
containers
containers to list join
config
The configuration for the containers. Default is the same as container0.
Returns
-------
List joined containers, with each entry being a list of arrays
"""
container0 = containers[0]
if not ivy.exists(config):
config = (
container0.cont_config if isinstance(container0, ivy.Container) else {}
)
if isinstance(container0, ivy.Container):
return_dict = {}
for key in container0.keys():
new_list = []
for container in containers:
new_list.append(container[key])
return_dict[key] = ivy.Container.cont_list_join(new_list, config)
return ivy.Container(return_dict, **config)
else:
return [item for sublist in containers for item in sublist]
@staticmethod
def cont_list_stack(containers, dim, config=None):
"""List stack containers together along the specified dimension.
Parameters
----------
containers
containers to list stack
dim
dimension along which to list stack
config
The configuration for the containers. Default is the same as container0.
Returns
-------
Stacked containers, with each entry being a list of arrays
"""
container0 = containers[0]
if not ivy.exists(config):
config = (
container0.cont_config if isinstance(container0, ivy.Container) else {}
)
if isinstance(container0, ivy.Container):
return_dict = {}
for key in container0.keys():
return_dict[key] = ivy.Container.cont_list_stack(
[container[key] for container in containers], dim, config
)
return ivy.Container(return_dict, **config)
else:
return containers
@staticmethod
def _cont_concat_unify(containers, device, axis=0):
return ivy.concat(
[cont.to_device(device) for cont in containers.values()], axis=axis
)
@staticmethod
def _cont_sum_unify(containers, device, _=None, _1=None):
return sum(
(cont.to_device(device) for cont in containers.values()),
start=ivy.zeros([]),
)
@staticmethod
def _cont_mean_unify(containers, device, _=None, _1=None):
return ivy.Container._cont_sum_unify(containers, device) / len(containers)
@staticmethod
def cont_unify(containers, device, mode, axis=0):
"""Unify a list of containers, on arbitrary devices, to a single
container on the specified device.
Parameters
----------
containers
containers to unify
dev
The device to unify the containers to.
mode
The mode by which to unify, must be one of [ concat | mean | sum ]
axis
The axis along which to concattenate the container, if concat mode is set.
Default is ``0``.
Returns
-------
Unified container
"""
return {
"concat": ivy.Container._cont_concat_unify,
"sum": ivy.Container._cont_sum_unify,
"mean": ivy.Container._cont_mean_unify,
}[mode](containers, device, axis)
@staticmethod
def cont_combine(*containers, config=None):
"""Combine keys and values in a sequence of containers, with priority
given to the right-most container in the case of duplicates.
Parameters
----------
containers
containers to compare
config
The configuration for the containers. Default is the same as
container_rightmost.
Returns
-------
Combined containers
"""
# if inputs are not dicts, then simply return the right-most value
container_rightmost = containers[-1]
if not isinstance(container_rightmost, dict):
return container_rightmost
if not ivy.exists(config):
# noinspection PyUnresolvedReferences
config = (
container_rightmost.cont_config
if isinstance(container_rightmost, ivy.Container)
else {}
)
# return if len==1
if len(containers) == 1:
return container_rightmost
# otherwise, check that the keys are aligned between each container, and apply
# this method recursively
return_dict = {}
all_keys = {
item
for sublist in [list(cont.keys()) for cont in containers]
for item in sublist
}
for key in all_keys:
keys_present = [key in cont for cont in containers]
return_dict[key] = ivy.Container.cont_combine(
*[cont[key] for cont, kp in zip(containers, keys_present) if kp],
config=config,
)
return ivy.Container(return_dict, **config)
@staticmethod
def cont_diff(
*containers,
mode="all",
diff_keys="diff",
detect_key_diffs=True,
detect_value_diffs=True,
detect_shape_diffs=True,
config=None,
):
"""Compare keys and values in a sequence of containers, returning the
single shared values where they are the same, and new nested sub-dicts
with all values where they are different.
Parameters
----------
containers
containers to compare
mode
The mode of the diff operation, returning either all keys and values,
only those that are consist across the containers, or only the differences.
Default is all.
diff_keys
The key/keys to add to the returned container when differences are found.
Default is ``"diff"``.
detect_key_diffs
Whether to treat different keys as detected differences. If not, the keys
among the input containers are simply combined without flagging differences.
Default is ``True``.
detect_value_diffs
Whether to treat different values as detected differences.
Default is ``True``.
detect_shape_diffs
Whether to treat different array shapes as detected differences.
Default is ``True``.
config
The configuration for the containers. Default is the same as container0.
*containers
Returns
-------
Compared containers
"""
ivy.utils.assertions.check_elem_in_list(mode, ["all", "same_only", "diff_only"])
# if inputs are not dicts, then compare their values to determine the diff dict
num_containers = len(containers)
container0 = containers[0]
if not ivy.exists(config):
config = (
container0.cont_config if isinstance(container0, ivy.Container) else {}
)
if not isinstance(container0, dict):
equal_mat = ivy.all_equal(*containers, equality_matrix=True)
if not detect_value_diffs:
equal_mat = ivy.ones_like(equal_mat)
if detect_shape_diffs:
shape_equal_mat = ivy.all_equal(
*[c.shape if ivy.is_array(c) else None for c in containers],
equality_matrix=True,
)
equal_mat = ivy.logical_and(equal_mat, shape_equal_mat)
# noinspection PyTypeChecker
if ivy.min(ivy.astype(equal_mat, "int32")) == 1:
if mode == "diff_only":
return ivy.Container(**config)
return container0
elif mode == "same_only":
return ivy.Container(**config)
else:
cont_range = range(num_containers)
diff_dict = {}
cont_dict = dict(zip(cont_range, containers))
idxs_added = []
for idx in cont_range:
if idx not in idxs_added:
idxs_to_add = ivy.argwhere(equal_mat[idx])
idxs_to_add_list = sorted(
ivy.to_numpy(idxs_to_add).reshape(-1).tolist()
)
if isinstance(diff_keys, str):
key = f"{diff_keys}_{str(idxs_to_add_list)[1:-1]}"
elif isinstance(diff_keys, (list, tuple)):
key = diff_keys[idx]
else:
raise ivy.utils.exceptions.IvyException(
"diff_keys must be either a string or list of strings,"
f" but found {diff_keys} of type {type(diff_keys)}"
)
diff_dict[key] = cont_dict[idx]
idxs_added += idxs_to_add_list
return ivy.Container(diff_dict, **config)
# otherwise, check that the keys are aligned between each container, and apply
# this method recursively
return_dict = {}
all_keys = {
item
for sublist in [list(cont.keys()) for cont in containers]
for item in sublist
}
for key in all_keys:
keys_present = [key in cont for cont in containers]
all_keys_present = sum(keys_present) == num_containers
if all_keys_present:
res = ivy.Container.cont_diff(
*[
(
cont[key]()
if cont.cont_config["build_callable"]
and callable(cont[key])
else cont[key]
)
for cont in containers
],
mode=mode,
diff_keys=diff_keys,
detect_key_diffs=detect_key_diffs,
detect_value_diffs=detect_value_diffs,
detect_shape_diffs=detect_shape_diffs,
config=config,
)
if not isinstance(res, dict) or res:
return_dict[key] = res
continue
elif sum(keys_present) == 1 and not detect_key_diffs:
if mode == "all":
return_dict[key] = containers[keys_present.index(True)][key]
continue
diff_dict = {}
for i, (key_present, cont) in enumerate(zip(keys_present, containers)):
if detect_key_diffs:
if key_present and mode != "same_only":
if isinstance(diff_keys, str):
diff_dict[diff_keys + "_" + str(i)] = cont[key]
elif isinstance(diff_keys, (list, tuple)):
diff_dict[diff_keys[i]] = cont[key]
else:
raise ivy.utils.exceptions.IvyException(
"diff_keys must be either a string or list of strings,"
f" but found {diff_keys} of type {type(diff_keys)}"
)
if diff_dict:
return_dict[key] = diff_dict
return ivy.Container(return_dict, **config)
@staticmethod
def cont_structural_diff(
*containers,
mode="all",
diff_keys="diff",
detect_key_diffs=True,
detect_shape_diffs=True,
config=None,
):
"""Compare keys and shapes in a sequence of containers, returning the
single shared values where they are the same, and new nested sub-dicts
with all values where they are different.
Parameters
----------
containers
containers to compare
mode
The mode of the diff operation, returning either all keys and values,
only those that are consist across the containers, or only the differences.
Default is all.
diff_keys
The key/keys to add to the returned container when differences are found.
Default is "diff".
detect_key_diffs
Whether to treat different keys as detected differences.
If not, the keys among the input containers are simply combined without
flagging differences. Default is ``True``.
detect_shape_diffs
Whether to treat different array shapes as detected differences.
Default is ``True``.
config
The configuration for the containers. Default is the same as container0.
*containers
Returns
-------
Compared containers
"""
return ivy.Container.cont_diff(
*containers,
mode=mode,
diff_keys=diff_keys,
detect_key_diffs=detect_key_diffs,
detect_value_diffs=False,
detect_shape_diffs=detect_shape_diffs,
config=config,
)
@staticmethod
def cont_multi_map(
func,
containers,
key_chains=None,
to_apply=True,
prune_unapplied=False,
key_chain="",
config=None,
map_nests=False,
assert_identical=False,
):
"""Apply function to all array values from a collection of containers.
Parameters
----------
func
Function to apply to each container entry.
containers
containers to map.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains will
be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied,
otherwise the leftmost container value is used. Default is ``False``.
key_chain
Chain of keys for this dict entry (Default value = '')
config
The configuration for the containers. Default is the same as container0.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
assert_identical
Whether to assert that the input containers are identical or not.
Returns
-------
Container
"""
# retrieve all keys and the first container if it exists
keys = set([])
container0 = None
for container in containers:
if isinstance(container, ivy.Container):
if container0 is None:
container0 = container
keys = keys.union(container.keys())
ivy.utils.assertions.check_exists(
container0,
message="No containers found in the inputs to ivy.Container.cont_multi_map",
)
if not ivy.exists(config):
config = (
container0.cont_config if isinstance(container0, ivy.Container) else {}
)
return_dict = {}
for key in keys:
values = []
for cont in containers:
if isinstance(cont, (ivy.Container, list, tuple)) and key in cont:
values.append(cont[key])
elif not isinstance(cont, (ivy.Container, list, tuple)):
values.append(cont)
value0 = values[0]
if len(values) >= 1:
this_key_chain = key if key_chain == "" else (key_chain + "/" + key)
is_container = [ivy.is_ivy_container(x) for x in values]
def _found_in_key_chains(this_key_chain, key_chains):
if key_chains is None:
return False
for key_chain in key_chains:
if this_key_chain.startswith(key_chain):
return True
return False
if not assert_identical and not all(is_container) and any(is_container):
found = _found_in_key_chains(this_key_chain, key_chains)
if key_chains is not None:
if (found and not to_apply) or (not found and to_apply):
if prune_unapplied:
continue
return_dict[key] = value0
continue
return_dict[key] = func(values, this_key_chain)
else:
if isinstance(value0, ivy.Container):
ret = ivy.Container.cont_multi_map(
func,
values,
key_chains,
to_apply,
prune_unapplied,
this_key_chain,
config,
map_nests,
assert_identical,
)
if ret:
return_dict[key] = ret
elif (
any(isinstance(x, (list, tuple)) for x in values) and map_nests
):
ret = ivy.nested_multi_map(
lambda x, _: func(x, None), values, to_ivy=False
)
if prune_unapplied and not ret:
continue
return_dict[key] = ret
else:
found = _found_in_key_chains(this_key_chain, key_chains)
if key_chains is not None:
if (found and not to_apply) or (not found and to_apply):
if prune_unapplied:
continue
return_dict[key] = value0
continue
return_dict[key] = func(values, this_key_chain)
else:
return_dict[key] = value0
# noinspection PyProtectedMember
return ivy.Container(return_dict, **config)
@staticmethod
def cont_common_key_chains(containers):
"""Return the key-chains common across all containers.
Parameters
----------
containers
Containers to check.
Returns
-------
list of key-chains.
"""
if len(containers) == 1:
return containers[0].cont_all_key_chains()
sets = [set(cont.cont_all_key_chains()) for cont in containers]
return list(sets[0].intersection(*sets[1:]))
@staticmethod
def cont_identical(
containers,
check_types=True,
check_shapes=True,
same_arrays=True,
arrays_equal=True,
key_chains=None,
to_apply=True,
partial=False,
key_chain="",
assert_and_assign=False,
):
"""Return a single boolean as to whether the input containers have
identical key- chains and data types.
Parameters
----------
containers
containers to check.
check_types
Whether to check if the datatypes of the leaf nodes are the same.
Default is ``True``.
check_shapes
Whether to check if the shapes of the leaf nodes are the same.
Default is ``True``.
same_arrays
Whether to check if the arrays are the exact same instances.
Default is ``True``.
arrays_equal
Whether to check if the arrays have equal values. Default is ``True``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains will
be skipped. Default is ``True``.
partial
Whether to also check for partially complete sub-containers.
Default is ``False``.
key_chain
Chain of keys for this dict entry (Default value = '')
assert_and_assign
if true, then the container being compared with is updated with the value
in the container being compared to given that the structures are congruent
Returns
-------
Boolean
"""
if partial:
common_key_chains = ivy.Container.cont_common_key_chains(containers)
if not common_key_chains:
return False
containers = [
cont.cont_at_key_chains(common_key_chains) for cont in containers
]
keys = {i for sl in [list(cont.keys()) for cont in containers] for i in sl}
# noinspection PyProtectedMember
for key in keys:
if not min(key in cont for cont in containers):
return False
for cont in containers:
if cont.cont_config["build_callable"]:
cont[key] = cont[key]() if callable(cont[key]) else cont[key]
values = [cont[key] for cont in containers]
value_0 = values[0]
type_0 = type(value_0)
types = [type(val) for val in values]
if not min(type_n is type_0 for type_n in types):
if isinstance(value_0, ivy.Container) or check_types:
return False
if ivy.is_array(value_0):
if check_shapes:
shape_0 = value_0.shape
shapes = [val.shape for val in values]
if not min(shape_n == shape_0 for shape_n in shapes):
return False
if same_arrays:
id_0 = id(value_0)
ids = [id(val) for val in values]
if not min(id_n == id_0 for id_n in ids):
return False
elif arrays_equal:
if not ivy.all_equal(*values):
return False
if assert_and_assign:
containers[0].cont_set_at_key_chain(
key, containers[1][key], inplace=True
)
this_key_chain = key if key_chain == "" else f"{key_chain}/{key}"
if isinstance(value_0, ivy.Container):
ret = ivy.Container.cont_identical(
values,
check_types,
check_shapes,
same_arrays,
arrays_equal,
key_chains,
to_apply,
partial,
this_key_chain,
assert_and_assign=assert_and_assign,
)
if not ret:
return False
if assert_and_assign:
# TODO optimise this further, such that keys are assigned
# without waiting for more building
containers[0][key].cont_set_at_key_chains(
target_dict=containers[1][key], inplace=True
)
return True
@staticmethod
def cont_assert_identical(
containers,
check_types=True,
check_shapes=True,
same_arrays=True,
arrays_equal=True,
key_chains=None,
to_apply=True,
partial=False,
):
"""Assert whether the input containers are identical. Otherwise, the
diff is shown in an exception.
Parameters
----------
containers
containers to check.
check_types
Whether to check if the datatypes of the leaf nodes are the same.
Default is ``True``.
check_shapes
Whether to check if the shapes of the leaf nodes are the same.
Default is ``True``.
same_arrays
Whether to check if the arrays are the exact same instances.
Default is ``True``.
arrays_equal
Whether to check if the arrays have equal values. Default is ``True``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
partial
Whether to also check for partially complete sub-containers.
Default is ``False``.
"""
ivy.utils.assertions.check_true(
ivy.Container.cont_identical(
containers,
check_types,
check_shapes,
same_arrays,
arrays_equal,
key_chains,
to_apply,
partial,
),
f"Containers were not identical:\n\n{ivy.Container.cont_diff(*containers)}",
)
@staticmethod
def cont_identical_structure(
containers,
check_types=True,
check_shapes=True,
key_chains=None,
to_apply=True,
partial=False,
key_chain="",
assert_and_assign=False,
):
"""Return a single boolean as to whether the input containers have
identical structure.
Parameters
----------
containers
containers to check.
check_types
Whether to also check whether the datatypes of the leaf nodes are the same.
Default is ``True``.
check_shapes
Whether to also check whether the shapes of the leaf nodes are the same.
Default is ``True``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
partial
Whether to also check for partially complete sub-containers.
Default is ``False``.
key_chain
Chain of keys for this dict entry (Default value = '')
assert_and_assign
if true, then the container being compared with is updated with the value in
the container being compared to given that the structures are congruent
Returns
-------
Boolean
"""
return ivy.Container.cont_identical(
containers,
check_types,
check_shapes,
False,
False,
key_chains,
to_apply,
partial,
key_chain,
assert_and_assign=assert_and_assign,
)
@staticmethod
def cont_assert_identical_structure(
containers,
check_types=True,
check_shapes=True,
key_chains=None,
to_apply=True,
partial=False,
assert_and_assign=False,
):
"""Assert whether the input containers have identical structure.
Otherwise, the diff is shown in an exception.
Parameters
----------
containers
containers to check.
check_types
Whether to also check whether the datatypes of the leaf nodes are the same.
Default is ``True``.
check_shapes
Whether to also check whether the shapes of the leaf nodes are the same.
Default is ``True``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
partial
Whether to also check for partially complete sub-containers.
Default is ``False``.
assert_and_assign
if true, then the container being compared with is updated with the value in
the container being compared to given that the structures are congruent
"""
ivy.utils.assertions.check_true(
ivy.Container.cont_identical_structure(
containers,
check_types,
check_shapes,
key_chains,
to_apply,
partial,
assert_and_assign=assert_and_assign,
),
"Containers did not have identical"
f" structure:\n\n{ivy.Container.cont_structural_diff(*containers)}",
)
@staticmethod
def cont_identical_configs(containers):
"""Return a single boolean as to whether the input containers all have
identical configs.
Parameters
----------
containers
containers to check.
"""
ivy.utils.assertions.check_greater(len(containers), 1, as_array=False)
configs = [cont.cont_config for cont in containers]
config0 = configs[0]
return all(
min(config[k] == v for config in configs) for k, v in config0.items()
)
@staticmethod
def cont_identical_array_shapes(containers, exclusive=False):
"""Determine whether all of the containers have identical number of
arrays and identical array shapes, regardless of their key-chain
structures.
Parameters
----------
containers
containers to check.
exclusive
Whether to check if the data type is exclusively an array, rather than a
variable or traced array. (Default value = False)
Returns
-------
Boolean
"""
array_conts = [cont.cont_size_ordered_arrays(exclusive) for cont in containers]
array_cont0 = array_conts[0]
array_cont0_len = len(array_cont0)
for array_cont in array_conts[1:]:
if len(array_cont) != array_cont0_len:
return False
elif not min(
a.shape == a0.shape
for a, a0 in zip(array_cont.values(), array_cont0.values())
):
return False
return True
@staticmethod
def cont_load(filepath, format="h5py"):
if format == "json":
return ivy.Container.cont_from_disk_as_json(filepath)
elif format == "pickle":
return ivy.Container.cont_from_disk_as_pickled(filepath)
elif format == "h5py":
return ivy.Container.cont_from_disk_as_hdf5(filepath)
else:
raise ivy.utils.exceptions.IvyException("Unsupported format")
@staticmethod
def cont_from_disk_as_hdf5(
h5_obj_or_filepath, slice_obj=slice(None), alphabetical_keys=True, ivyh=None
):
"""Load container object from disk, as an h5py file, at the specified
hdf5 filepath.
Parameters
----------
h5_obj_or_filepath
Filepath where the container object is saved to disk, or h5 object.
slice_obj
slice object to slice all h5 elements. (Default value = slice(None))
alphabetical_keys
Whether to sort the container keys alphabetically, or preserve the dict
order. Default is ``True``.
ivyh
Handle to ivy module to use for the calculations. Default is ``None``, which
results in the global ivy.
Returns
-------
Container loaded from disk
"""
ivy.utils.assertions.check_exists(
h5py,
message=(
"You must install python package h5py in order to load hdf5 "
"files from disk into a container."
),
)
container_dict = {}
if isinstance(h5_obj_or_filepath, str):
h5_obj = h5py.File(h5_obj_or_filepath, "r")
else:
h5_obj = h5_obj_or_filepath
items = sorted(h5_obj.items()) if alphabetical_keys else h5_obj.items()
for key, value in items:
if isinstance(value, h5py.Group):
container_dict[key] = ivy.Container.cont_from_disk_as_hdf5(
value, slice_obj, ivyh
)
elif isinstance(value, h5py.Dataset):
container_dict[key] = ivy.default(ivyh, ivy).array(
list(value[slice_obj]), dtype=str(value[slice_obj].dtype)
)
else:
raise ivy.utils.exceptions.IvyException(
"Item found inside h5_obj which was neither a Group nor a Dataset."
)
return ivy.Container(container_dict, ivyh=ivyh)
@staticmethod
def cont_from_disk_as_pickled(pickle_filepath, ivyh=None):
"""Load container object from disk at the specified pickle filepath.
Parameters
----------
pickle_filepath
Filepath where the container object is saved to disk.
ivyh
Handle to ivy module to use for the calculations. Default is ``None``, which
results in the global ivy.
Returns
-------
Container loaded from disk
"""
return ivy.Container(
pickle.load(open(pickle_filepath, "rb")),
rebuild_child_containers=True,
ivyh=ivyh,
).to_ivy()
@staticmethod
def cont_from_disk_as_json(json_filepath, ivyh=None):
"""Load container object from disk at the specified json filepath. If
some objects were not json-able during saving, then they will be loaded
as strings.
Parameters
----------
json_filepath
Filepath where the container object is saved to disk.
ivyh
Handle to ivy module to use for the calculations. Default is ``None``, which
results in the global ivy.
Returns
-------
Container loaded from disk
"""
with open(json_filepath) as json_data_file:
return ivy.Container(json.load(json_data_file), ivyh=ivyh)
@staticmethod
def h5_file_size(h5_obj_or_filepath):
"""Get file size of h5 file contents.
Parameters
----------
h5_obj_or_filepath
Filepath where the container object is saved to disk, or h5 object.
Returns
-------
Size of h5 file contents, and batch size.
"""
ivy.utils.assertions.check_exists(
h5py,
message=(
"You must install python package h5py in order to determine "
"the size of hdf5 files."
),
)
if isinstance(h5_obj_or_filepath, str):
h5_obj = h5py.File(h5_obj_or_filepath, "r")
else:
h5_obj = h5_obj_or_filepath
size = 0
batch_size = 0
for value in h5_obj.values():
if isinstance(value, h5py.Group):
size_to_add, batch_size = ivy.Container.h5_file_size(value)
size += size_to_add
elif isinstance(value, h5py.Dataset):
value_shape = value.shape
size += _reduce(mul, value_shape, 1) * value.dtype.itemsize
batch_size = value_shape[0]
else:
raise ivy.utils.exceptions.IvyException(
"Item found inside h5_obj which was neither a Group nor a Dataset."
)
return size, batch_size
@staticmethod
def shuffle_h5_file(h5_obj_or_filepath, seed_value=0):
"""Shuffle entries in all datasets of h5 file, such that they are still
aligned along axis 0.
Parameters
----------
h5_obj_or_filepath
Filepath where the container object is saved to disk, or h5 object.
seed_value
random seed to use for array shuffling (Default value = 0)
"""
ivy.utils.assertions.check_exists(
h5py,
message=(
"You must install python package h5py in order to shuffle "
"hdf5 files on disk."
),
)
if seed_value is None:
seed_value = random.randint(0, 1000)
if isinstance(h5_obj_or_filepath, str):
h5_obj = h5py.File(h5_obj_or_filepath, "a")
else:
h5_obj = h5_obj_or_filepath
for value in h5_obj.values():
if isinstance(value, h5py.Group):
ivy.Container.shuffle_h5_file(value, seed_value)
elif isinstance(value, h5py.Dataset):
random.seed(seed_value)
# noinspection PyTypeChecker
random.shuffle(value)
else:
raise ivy.utils.exceptions.IvyException(
"Item found inside h5_obj which was neither a Group nor a Dataset."
)
if isinstance(h5_obj, h5py.File):
h5_obj.close()
@staticmethod
def cont_reduce(containers, reduction, config=None):
"""Reduce containers.
Parameters
----------
containers
containers to reduce
reduction
the reduction function
config
The configuration for the containers. Default is the same as container0.
Returns
-------
reduced containers
"""
container0 = containers[0]
if not ivy.exists(config):
config = (
container0.cont_config if isinstance(container0, ivy.Container) else {}
)
if isinstance(container0, ivy.Container):
return_dict = {}
for key in container0.keys():
return_dict[key] = ivy.Container.cont_reduce(
[container[key] for container in containers], reduction
)
return ivy.Container(return_dict, **config)
else:
# noinspection PyBroadException
try:
return reduction(containers)
except Exception as e:
raise ivy.utils.exceptions.IvyException(
str(e)
+ "\nContainer reduce operation only valid for containers of arrays"
)
@staticmethod
def cont_flatten_key_chain(
key_chain, replacement="__", above_height=None, below_depth=None
):
"""Summary.
Parameters
----------
key_chain
param replacement: (Default value = '__')
above_height
Default value = None)
below_depth
Default value = None)
replacement
(Default value = '__')
"""
# noinspection RegExpSingleCharAlternation
flat_keys = re.split("/|\.", key_chain) # noqa
num_keys = len(flat_keys)
pre_keys = []
post_keys = []
if above_height and num_keys > above_height:
post_keys = flat_keys[-above_height:]
del flat_keys[-above_height:]
if below_depth and num_keys > below_depth:
pre_keys = flat_keys[0:below_depth]
del flat_keys[0:below_depth]
return "/".join(
[
k
for k in [
"/".join(pre_keys),
replacement.join(flat_keys),
"/".join(post_keys),
]
if k
]
)
@staticmethod
def cont_trim_key(key, max_length):
"""Summary. Returns a trimmed key with a maximum length of max_length.
Parameters
----------
key
key to trim
max_length
maximum length of key
"""
key_len = len(key)
if not ivy.exists(max_length) or key_len <= max_length:
return key
idxs = (
np.round(
(key_len - 1)
/ (max_length - 1)
* np.linspace(0, max_length - 1, max_length)
)
.astype(np.int32)
.tolist()
)
return "".join([key[idx] for idx in idxs])
# Private Methods #
# ----------------#
def _cont_call_static_method_with_flexible_args(
self,
static_method,
*args,
kw,
required,
defaults,
self_idx=0,
key_chains=None,
to_apply=True,
prune_unapplied=False,
map_sequences=None,
out=None,
) -> ivy.Container:
if args:
num_args = len(args)
kw = {
k: defaults[k] if k in defaults else v
for i, (k, v) in enumerate(kw.items())
if i > num_args
}
args = list(args)
if self_idx > num_args:
k = list(kw.keys())[self_idx - num_args - 1]
kw[k] = self
else:
args.insert(self_idx, self)
return static_method(
*args,
**kw,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
self_set = False
# set to leftmost non-specified required arg, if present
for k in required:
if kw[k] is None:
kw[k] = self
self_set = True
break
# go through each key and value of the keyword arguments
for k, v in kw.items():
if v is None:
if self_set:
if k in defaults:
# if self is set and a default value exists, set it
kw[k] = defaults[k]
else:
# otherwise set self to this argument
kw[k] = self
self_set = True
# call the static method
return static_method(
**kw,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def _cont_get_shape(self):
if not len(self.keys()):
if ivy.exists(self._queues):
return [self._queue_load_sizes_cum[-1]]
return [0]
sub_shapes = [
v
for k, v in self.cont_map(
lambda x, kc: (
list(x.shape)
if self._cont_ivy.is_native_array(x) or isinstance(x, ivy.Array)
else ([len(x)] if isinstance(x, (list, tuple)) else None)
)
).cont_to_iterator()
if v
]
if not sub_shapes:
return sub_shapes
min_num_dims = min(len(sub_shape) for sub_shape in sub_shapes)
sub_shapes_array = np.asarray(
[sub_shape[0:min_num_dims] for sub_shape in sub_shapes]
)
sub_shapes_array = np.where(sub_shapes_array == 0, -1, sub_shapes_array)
mask = np.prod(sub_shapes_array / sub_shapes_array[0:1], 0) == 1
# noinspection PyTypeChecker
return [
None if np.isnan(i) else int(i)
for i in np.where(
mask, sub_shapes_array[0], np.ones(min_num_dims) * float("nan")
).tolist()
]
def _cont_get_shapes(self):
return self.cont_map(lambda x, kc: x.shape if hasattr(x, "shape") else None)
def _cont_get_dtype(self):
sub_dtypes = [
v for k, v in self.cont_map(lambda x, kc: x.dtype).cont_to_iterator() if v
]
unique_dtypes = list(set(sub_dtypes))
return sub_dtypes[0] if len(unique_dtypes) == 1 else None
def _cont_get_dev(self, as_native=False):
sub_devs = [
v
for k, v in self.cont_map(
lambda x, kc: (
self._cont_ivy.dev(x, as_native=as_native)
if self._cont_ivy.is_native_array(x) or isinstance(x, ivy.Array)
else None
)
).cont_to_iterator()
if v
]
if len(set(sub_devs)) <= 1:
return sub_devs[0]
return None
def _cont_at_key_chains_input_as_seq(self, key_chains, ignore_key_errors=False):
return_cont = ivy.Container({}, **self._config)
for kc in key_chains:
val = self.cont_at_key_chain(kc, ignore_key_errors=ignore_key_errors)
if ignore_key_errors and not ivy.exists(val):
continue
return_cont.cont_set_at_key_chain(kc, val, inplace=True)
return return_cont
def _cont_at_key_chains_input_as_dict(
self, key_chains, current_chain="", ignore_key_errors=False
):
return_dict = {}
for k, v in key_chains.items():
if current_chain == "":
new_current_chain = k
else:
new_current_chain = current_chain + "/" + k
if isinstance(v, dict):
return_dict[k] = self._cont_at_key_chains_input_as_dict(
v, new_current_chain, ignore_key_errors=ignore_key_errors
)
else:
val = self.cont_at_key_chain(
new_current_chain, ignore_key_errors=ignore_key_errors
)
if ignore_key_errors and not ivy.exists(val):
continue
return_dict[k] = val
return ivy.Container(return_dict, **self._config)
def _cont_prune_key_chains_input_as_seq(self, key_chains):
return_cont = self.cont_copy()
for kc in key_chains:
return_cont = return_cont.cont_prune_key_chain(kc)
return return_cont
def _cont_prune_key_chains_input_as_dict(self, key_chains, return_cont=None):
if return_cont is None:
return_cont = self.cont_copy()
for k, v in key_chains.items():
if isinstance(v, dict):
ret_cont = self._cont_prune_key_chains_input_as_dict(v, return_cont[k])
if ret_cont.cont_shape[0] == 0:
del return_cont[k]
else:
del return_cont[k]
return return_cont
# Public Methods #
# ---------------#
def cont_duplicate_array_keychains(self):
duplicates = ()
key_chains = self.cont_all_key_chains()
skips = set()
for i in range(len(key_chains)):
temp_duplicates = ()
if key_chains[i] in skips:
continue
for j in range(i + 1, len(key_chains)):
if key_chains[j] in skips:
continue
if self[key_chains[i]] is self[key_chains[j]]:
if key_chains[i] not in temp_duplicates:
temp_duplicates += (key_chains[i],)
if key_chains[j] not in temp_duplicates:
temp_duplicates += (key_chains[j],)
if len(temp_duplicates) > 0:
duplicates += (temp_duplicates,)
skips = chain.from_iterable(duplicates)
return duplicates
def cont_update_config(self, **config):
new_config = {}
for k, v in config.items():
att_name = f"_{k}"
if k in self._config_in:
if k == "types_to_iteratively_nest":
v = ivy.default(lambda: tuple(v), (), catch_exceptions=True)
elif k == "keyword_color_dict":
v = ivy.default(v, {})
elif k == "ivyh":
att_name = "_local_ivy"
new_config[k] = v
self.__setattr__(att_name, v)
self._config = new_config
def cont_inplace_update(
self, dict_in: Union[ivy.Container, dict], **config
) -> ivy.Container:
"""Update the contents of this container inplace, using either a new
dict or container.
Parameters
----------
dict_in
New dict or container to update the current container inplace with.
**config
"""
# # update config
self.cont_update_config(**config)
# update container values inplace
if dict_in is None:
return
dict_types = tuple([dict] + ivy.container_types())
if isinstance(dict_in, dict_types):
dict_in = dict_in
elif isinstance(dict_in, tuple(self._types_to_iteratively_nest)):
dict_in = dict(
zip(
[
f"it_{str(i).zfill(len(str(len(dict_in))))}"
for i in range(len(dict_in))
],
dict_in,
)
)
else:
raise ivy.utils.exceptions.IvyException(f"invalid input {dict_in}")
items = sorted(dict_in.items()) if self._alphabetical_keys else dict_in.items()
for key, value in items:
if (
isinstance(value, dict_types)
and (
not isinstance(value, ivy.Container)
or self._rebuild_child_containers
)
) or isinstance(value, tuple(self._types_to_iteratively_nest)):
self[key] = ivy.Container(value, **self._config)
elif key in self and isinstance(self[key], ivy.Container):
self[key].cont_inplace_update(value)
else:
self[key] = value
def cont_all_true(
self,
assert_is_bool=False,
key_chains=None,
to_apply=True,
prune_unapplied=False,
map_sequences=False,
):
"""Determine whether all the entries in the container boolean evaluate
to True.
Parameters
----------
assert_is_bool
Whether or not to assert each entry is of type Boolean.
(Default value = False)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
Boolean, whether all entries are boolean True.
"""
return bool(
np.prod(
[
v
for k, v in self.cont_as_bools(
assert_is_bool,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
).cont_to_iterator()
]
)
)
def cont_all_false(
self,
assert_is_bool=False,
key_chains=None,
to_apply=True,
prune_unapplied=False,
map_sequences=False,
):
"""Determine whether all the entries in the container boolean evaluate
to False.
Parameters
----------
assert_is_bool
Whether or not to assert each entry is of type Boolean.
(Default value = False)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
Boolean, whether all entries are boolean False.
"""
return not bool(
np.sum(
[
v
for k, v in self.cont_as_bools(
assert_is_bool,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
).cont_to_iterator()
]
)
)
def cont_slice_via_key(self, slice_key):
"""Get slice of container, based on key.
Parameters
----------
slice_key
key to slice container at.
Returns
-------
Container object sliced at desired key.
"""
return_dict = {}
for key, value in self.items():
if key == slice_key:
return value
elif isinstance(value, ivy.Container):
return_dict[key] = value.cont_slice_via_key(slice_key)
else:
return_dict[key] = value
return ivy.Container(return_dict, **self._config)
def cont_as_bools(
self,
assert_is_bool=False,
key_chains=None,
to_apply=True,
prune_unapplied=False,
map_sequences=False,
):
"""Return boolean evaluation for all nested items in the container.
Parameters
----------
assert_is_bool
Whether or not to assert the entry is of type Boolean.
(Default value = False)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
Container object with all entries boolean evaluated.
"""
def _ret_bool(x):
if assert_is_bool:
ivy.utils.assertions.check_isinstance(x, bool)
return x
return bool(x)
return self.cont_map(
lambda x, kc: _ret_bool(x),
key_chains,
to_apply,
prune_unapplied,
map_sequences,
)
def cont_unstack_conts(self, axis, keepdims=False, dim_size=None):
"""Unstack containers along specified dimension.
Parameters
----------
axis
Dimensions along which to unstack.
keepdims
Whether to keep dimension 1 in the unstack dimensions. Default is ``False``.
dim_size
Size of the dimension to unstack. Determined from inputs by default.
Returns
-------
List of containers, unstacked along the specified dimension.
"""
if dim_size is None:
dim_size = self.cont_shape[axis]
if keepdims:
# noinspection PyTypeChecker
return [
self[
(
slice(i, i + 1, 1)
if axis == 0
else tuple(
[slice(None, None, None)] * axis + [slice(i, i + 1, 1)]
)
)
]
for i in range(dim_size)
]
# noinspection PyTypeChecker
return [
self[i if axis == 0 else tuple([slice(None, None, None)] * axis + [i])]
for i in range(dim_size)
]
def split_conts(
self,
num_or_size_splits=None,
axis=0,
with_remainder=False,
key_chains=None,
to_apply=True,
prune_unapplied=False,
map_sequences=False,
):
"""Split a container into multiple sub-containers.
The function does that by splitting their constituent arrays.
Parameters
----------
num_or_size_splits
Number of equal arrays to divide the array into along the given axis if an
integer. The size of each split element if a sequence of integers. Default
is to divide into as many 1-dimensional arrays as the axis dimension.
axis
The axis along which to split, default is ``0``.
with_remainder
If the tensor does not split evenly, then store the last remainder entry.
Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains will
be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied. Default
is False.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
A list of sub-arrays.
"""
dim_size = (
num_or_size_splits
if isinstance(num_or_size_splits, int)
else len(num_or_size_splits)
)
# noinspection PyTypeChecker
return self.cont_map(
lambda x, kc: (
self._cont_ivy.split(
x,
num_or_size_splits=num_or_size_splits,
axis=axis,
with_remainder=with_remainder,
)
if self._cont_ivy.is_native_array(x) or isinstance(x, ivy.Array)
else x
),
key_chains,
to_apply,
prune_unapplied,
map_sequences,
).cont_unstack_conts(0, dim_size=dim_size)
def cont_num_arrays(self, exclusive=False):
"""Compute the number of arrays present at the leaf nodes, including
variables by default.
Parameters
----------
exclusive
Whether to check if the data type is exclusively an array,
rather than a variable or traced array. (Default value = False)
"""
return sum(
self.cont_map(
lambda x, kc: ivy.is_array(x, exclusive=exclusive)
).cont_to_iterator_values()
)
def cont_size_ordered_arrays(self, exclusive=False):
"""Return a container with keychains mapped to flat keys, and arrays
given in order of smallest to largest.
Parameters
----------
exclusive
Whether to check if the data type is exclusively an array,
rather than a variable or traced array. (Default value = False)
"""
array_dict = {
ivy.Container.cont_flatten_key_chain(kc): v
for kc, v in self.cont_to_iterator()
if ivy.is_array(v, exclusive=exclusive)
}
return ivy.Container(
dict(
sorted(
array_dict.items(), key=lambda item: _reduce(mul, item[1].shape, 1)
)
),
alphabetical_keys=False,
)
def cont_save(self, filepath, format="h5py"):
if format == "json":
self.cont_to_disk_as_json(filepath)
elif format == "pickle":
self.cont_to_disk_as_pickled(filepath)
elif format == "h5py":
self.cont_to_disk_as_hdf5(filepath)
else:
raise ValueError("Unsupported format")
def cont_to_disk_as_hdf5(
self, h5_obj_or_filepath, starting_index=0, mode="a", max_batch_size=None
):
"""Save container object to disk, as an h5py file, at the specified
filepath.
Parameters
----------
h5_obj_or_filepath
Filepath for where to save the container to disk, or h5 object.
starting_index
Batch index for which to start writing to file, if it already exists
(Default value = 0)
mode
H5 read/write mode for writing to disk, ['r', 'r+', 'w', 'w-', 'a'],
default is 'a'.
max_batch_size
Maximum batch size for the container on disk, this is useful if later
appending to file. (Default value = None)
"""
ivy.utils.assertions.check_exists(
h5py,
message=(
"You must install python package h5py in order to save "
"containers to disk as hdf5 files."
),
)
if isinstance(h5_obj_or_filepath, str):
h5_obj = h5py.File(h5_obj_or_filepath, mode)
else:
h5_obj = h5_obj_or_filepath
for key, value in self.items():
if isinstance(value, ivy.Container):
if key not in h5_obj.keys():
h5_group = h5_obj.create_group(key)
else:
h5_group = h5_obj[key]
value.cont_to_disk_as_hdf5(
h5_group, starting_index, mode, max_batch_size
)
else:
value_as_np = self._cont_ivy.to_numpy(value)
value_shape = value_as_np.shape
this_batch_size = value_shape[0]
max_bs = (
max_batch_size
if max_batch_size
else starting_index + this_batch_size
)
if key not in h5_obj.keys():
dataset_shape = [max_bs] + list(value_shape[1:])
maxshape = [None for _ in dataset_shape]
h5_obj.create_dataset(
key, dataset_shape, dtype=value_as_np.dtype, maxshape=maxshape
)
space_left = max_bs - starting_index
amount_to_write = min(this_batch_size, space_left)
for i in range(amount_to_write):
h5_obj[key][starting_index + i : starting_index + i + 1] = (
value_as_np[i : i + 1]
)
def cont_to_disk_as_pickled(self, pickle_filepath):
"""Save container object to disk, as an pickled file, at the specified
filepath.
Parameters
----------
pickle_filepath
Filepath for where to save the container to disk.
"""
pickle.dump(self.to_native().cont_to_dict(), open(pickle_filepath, "wb"))
def cont_to_jsonable(self, return_dict=None):
"""
Parameters
----------
return_dict
Default value = None)
"""
if return_dict is None:
return_dict = self.cont_copy()
for k, v in return_dict.items():
if not _is_jsonable(v):
if isinstance(v, dict):
return_dict[k] = self.cont_to_jsonable(v)
else:
return_dict[k] = str(v)
return return_dict
def cont_to_disk_as_json(self, json_filepath):
"""Save container object to disk, as an json file, at the specified
filepath.
Parameters
----------
json_filepath
Filepath for where to save the container to disk.
"""
with open(json_filepath, "w+") as json_data_file:
json.dump(self.cont_to_jsonable().cont_to_dict(), json_data_file, indent=4)
def cont_to_nested_list(self):
return_list = []
for key, value in self.items():
if isinstance(value, ivy.Container):
return_list.append(value.cont_to_nested_list())
elif value is not None and key != "_f":
return_list.append(value)
return return_list
def cont_to_raw(self):
"""Convert container to its original form.
Returns
-------
ret
Container data in its raw form.
"""
return_item = {}
for key, value in self.items():
if isinstance(value, ivy.Container):
return_item[key] = value.cont_to_raw()
elif key[0:3] == "it_" and tuple(self._types_to_iteratively_nest):
return_item = [
v.cont_to_raw() if isinstance(v, ivy.Container) else v
for v in self.values()
]
break
else:
return_item[key] = value
return return_item
def cont_to_dict(self):
"""Summary.
Returns
-------
ret Container as nested dict.
"""
return_dict = {}
for key, value in self.items():
if isinstance(value, ivy.Container):
return_dict[key] = value.cont_to_dict()
else:
return_dict[key] = value
return return_dict
def cont_to_iterator(self, key_chain="", leaf_keys_only=False, include_empty=False):
"""
Parameters
----------
key_chain
Default value = '')
leaf_keys_only
Default value = False)
include_empty
Default value = False)
Returns
-------
Iterator for the container elements.
"""
for key, value in self.items():
if leaf_keys_only:
kc = key
else:
kc = f"{key_chain}/{key}" if key_chain != "" else key
if isinstance(value, ivy.Container) and (not include_empty or value):
yield from value.cont_to_iterator(kc, leaf_keys_only, include_empty)
else:
yield kc, value
def cont_to_iterator_values(self, include_empty=False):
"""
Parameters
----------
include_empty
Default value = False)
Returns
-------
Iterator for the container values.
"""
for value in self.values():
if isinstance(value, ivy.Container) and (not include_empty or value):
# noinspection PyCompatibility
yield from value.cont_to_iterator_values(include_empty)
else:
yield value
def cont_to_iterator_keys(
self, key_chain="", leaf_keys_only=False, include_empty=False
):
"""
Parameters
----------
key_chain
Default value = '')
leaf_keys_only
Default value = False)
include_empty
Default value = False)
Returns
-------
Iterator for the container elements.
"""
for key, value in self.items():
if leaf_keys_only:
kc = key
else:
kc = f"{key_chain}/{key}" if key_chain != "" else key
if isinstance(value, ivy.Container) and (not include_empty or value):
# noinspection PyCompatibility
yield from value.cont_to_iterator_keys(
kc, leaf_keys_only, include_empty
)
else:
yield kc
def cont_to_flat_list(self):
"""Summary.
Returns
-------
ret
Container as flat list.
"""
return [item for key, item in self.cont_to_iterator()]
def cont_from_flat_list(self, flat_list):
"""Return new container object with the same hierarchy, but with values
replaced from flat list.
Parameters
----------
flat_list
flat list of values to populate container with.
Returns
-------
Container.
"""
new_dict = {}
for key, value in self.items():
if isinstance(value, ivy.Container):
new_value = value.cont_from_flat_list(flat_list)
else:
new_value = flat_list.pop(0)
new_dict[key] = new_value
return ivy.Container(new_dict, **self._config)
def cont_has_key(self, query_key):
"""Determine whether container object has specified key somewhere in
the nested structure.
Parameters
----------
query_key
Returns
-------
ret
Boolean
"""
has_key = False
def map_fn(x, kc):
"""
Parameters
----------
x
param kc:
kc
"""
nonlocal has_key
if query_key in kc:
has_key = True
return x
self.cont_map(map_fn)
return has_key
def cont_has_key_chain(self, key_chain):
"""Determine whether container object has specified key-chain.
Parameters
----------
key_chain
Returns
-------
ret
Boolean
"""
keys = re.split("[/.]", key_chain)
ret = self
for key in keys:
try:
ret = ret[key]
except KeyError:
return False
return True
def cont_find_sub_container(self, sub_cont_to_find, partial=False):
"""Find the sub-container in the current container if it exists.
Parameters
----------
sub_cont_to_find
The sub-container to find.
partial
Whether to also check for partially complete sub-containers.
Default is ``False``.
"""
key_chain_found = False
def _check_sub_cont(sub_cont, kc):
sub_cont_key_chains = sub_cont_to_find.cont_all_key_chains()
kcs_in_sub_cont = [kc in sub_cont for kc in sub_cont_key_chains]
if (
kcs_in_sub_cont
and min(kcs_in_sub_cont)
and ivy.Container.cont_identical(
[sub_cont, sub_cont_to_find], partial=partial
)
):
nonlocal key_chain_found
key_chain_found = kc
return sub_cont
self.cont_map_sub_conts(_check_sub_cont)
return key_chain_found
def cont_contains_sub_container(self, sub_cont, partial=False):
"""Determine whether the current container contains the sub-container,
with matching structure and array values.
Parameters
----------
sub_cont
The sub-container to check.
partial
Whether to also check for partially complete sub-containers.
Default is ``False``.
Returns
-------
Bool
"""
return isinstance(self.cont_find_sub_container(sub_cont, partial), str)
def cont_assert_contains_sub_container(self, sub_cont, partial=False):
"""Assert that the current container contains the sub-container,
otherwise exception raised with the diff printed to screen.
Parameters
----------
sub_cont
The sub-container to check.
partial
Whether to also check for partially complete sub-containers.
Default is ``False``.
"""
try:
ivy.utils.assertions.check_true(
self.cont_contains_sub_container(sub_cont, partial)
)
except ivy.utils.exceptions.IvyException:
key_chain = self.cont_find_sub_structure(
sub_cont, check_shapes=False, partial=True
)
if not key_chain:
key_chain = ""
# noinspection PyTypeChecker
raise ivy.utils.exceptions.IvyException(
"Containers did not have identical structure and"
f" values:\n\n{ivy.Container.cont_diff(self[key_chain], sub_cont)}"
)
def cont_find_sub_structure(
self, sub_struc_to_find, check_shapes=True, partial=False
):
"""Find the sub-container structure in the current container if it
exists.
Parameters
----------
sub_struc_to_find
The sub-container to find.
check_shapes
Whether to check array shapes in the sub-structure. Default is ``True``.
partial
Whether to also check for partially complete sub-containers.
Default is ``False``.
"""
key_chain_found = False
def _check_sub_cont(sub_cont, kc):
"""
Parameters
----------
sub_cont
param kc:
kc
"""
sub_struc_key_chains = sub_struc_to_find.cont_all_key_chains()
kcs_in_sub_cont = [kc in sub_cont for kc in sub_struc_key_chains]
if (
kcs_in_sub_cont
and min(kcs_in_sub_cont)
and ivy.Container.cont_identical_structure(
[sub_cont, sub_struc_to_find],
check_shapes=check_shapes,
partial=partial,
)
):
nonlocal key_chain_found
key_chain_found = kc
return sub_cont
self.cont_map_sub_conts(_check_sub_cont)
return key_chain_found
def cont_contains_sub_structure(self, sub_cont, check_shapes=True, partial=False):
"""Determine whether the current container contains the sub-container
structure.
Parameters
----------
sub_cont
The sub-container to check.
check_shapes
Whether to check array shapes in the sub-structure. Default is ``True``.
partial
Whether to also check for partially complete sub-containers.
Default is ``False``.
"""
return isinstance(
self.cont_find_sub_structure(sub_cont, check_shapes, partial), str
)
def cont_assert_contains_sub_structure(
self, sub_cont, check_shapes=True, partial=False
):
"""Assert that the current container contains the sub-container
structure, otherwise exception raised with the diff printed to screen.
Parameters
----------
sub_cont
The sub-container to check.
check_shapes
Whether to check array shapes in the sub-structure. Default is ``True``.
partial
Whether to also check for partially complete sub-containers.
Default is ``False``.
"""
try:
ivy.utils.assertions.check_true(
self.cont_contains_sub_structure(sub_cont, check_shapes, partial)
)
except ivy.utils.exceptions.IvyException:
key_chain = self.cont_find_sub_structure(
sub_cont, check_shapes=False, partial=True
)
if not key_chain:
key_chain = ""
# noinspection PyTypeChecker
raise ivy.utils.exceptions.IvyException(
"Containers did not have identical structure:\n\n{}".format(
ivy.Container.cont_structural_diff(
self[key_chain],
sub_cont,
detect_key_diffs=not partial,
detect_shape_diffs=check_shapes,
mode="diff_only" if partial else "all",
)
)
)
def cont_at_keys(
self, queries, ignore_none=True, containing=False, ignore_key_errors=False
):
"""Query container object at specified keys, either as list or nested
dict.
Parameters
----------
queries
The keys to query.
ignore_none
Whether to ignore None input. Default is ``True``.
containing
Whether to include keys which only contain the query substrings.
Default is ``False``.
ignore_key_errors
Whether to ignore Key-errors when trying to access the dict.
Default is ``False``.
Returns
-------
sub-container containing only key-chains containing the specified keys.
"""
if queries is None and ignore_none:
return self
key_chains_to_keep = []
if isinstance(queries, str):
queries = [queries]
def map_fn(x, kc):
nonlocal key_chains_to_keep
kc_split = re.split("[/.]", kc)
for query_key in queries:
if (
query_key in kc_split
or containing
and min(query_key in k for k in kc_split)
):
key_chains_to_keep.append(kc)
return x
self.cont_map(map_fn)
return self.cont_at_key_chains(
key_chains_to_keep, ignore_key_errors=ignore_key_errors
)
def cont_at_key_chain(self, key_chain, ignore_key_errors=False):
"""Query container object at a specified key-chain.
Parameters
----------
key_chain
param ignore_key_errors: (Default value = False)
ignore_key_errors
(Default value = False)
Returns
-------
ret
sub-container or value at specified key chain
"""
keys = re.split("[/.]", key_chain)
ret = self
for key in keys:
try:
ret = ret[key]
except KeyError as e:
if ignore_key_errors:
return
raise ivy.utils.exceptions.IvyException(repr(e))
return ret
def cont_at_key_chains(self, key_chains, ignore_none=True, ignore_key_errors=False):
"""Query container object at specified key-chains, either as list or
nested dict.
Parameters
----------
key_chains
param ignore_none: (Default value = True)
ignore_key_errors
Default value = False)
ignore_none
(Default value = True)
Returns
-------
type
sub-container containing only the specified key chains
"""
if key_chains is None and ignore_none:
return self
if isinstance(key_chains, (list, tuple)):
return self._cont_at_key_chains_input_as_seq(
key_chains, ignore_key_errors=ignore_key_errors
)
elif isinstance(key_chains, dict):
return self._cont_at_key_chains_input_as_dict(
key_chains, ignore_key_errors=ignore_key_errors
)
elif isinstance(key_chains, str):
return self._cont_at_key_chains_input_as_seq(
[key_chains], ignore_key_errors=ignore_key_errors
)
else:
raise ivy.utils.exceptions.IvyException(
"Invalid type for input key_chains, must either be a list, tuple, dict"
f" or ivy.Container, but found type {type(key_chains)}"
)
def cont_all_key_chains(self, include_empty=False):
"""
Parameters
----------
include_empty
Default value = False)
"""
return [kc for kc, v in self.cont_to_iterator(include_empty=include_empty)]
def cont_key_chains_containing(self, sub_str, include_empty=False):
"""
Parameters
----------
sub_str
param include_empty: (Default value = False)
include_empty
(Default value = False)
"""
return [
kc
for kc, v in self.cont_to_iterator(include_empty=include_empty)
if sub_str in kc
]
def cont_set_at_keys(self, target_dict):
"""Set values of container object at specified keys.
Parameters
----------
target_dict
Returns
-------
type
new container with updated value at each key
"""
return_dict = {}
for key, val in self.items():
if key in target_dict:
return_dict[key] = target_dict[key]
elif isinstance(val, ivy.Container):
return_dict[key] = val.cont_set_at_keys(target_dict)
else:
return_dict[key] = val
return ivy.Container(return_dict, **self._config)
def cont_set_at_key_chain(self, key_chain, val, inplace=False):
"""Set value of container object at a specified key-chain.
Parameters
----------
key_chain
param val:
inplace
Default value = False)
val
Returns
-------
ret
new container with updated value at key chain
"""
keys = re.split("[/.]", key_chain)
if inplace:
cont = self
else:
cont = self.cont_copy()
sub_cont = cont
for key in keys[:-1]:
if key not in sub_cont:
sub_cont[key] = ivy.Container(**self._config)
sub_cont = sub_cont[key]
sub_cont[keys[-1]] = val
return cont
def cont_overwrite_at_key_chain(self, key_chain, val, inplace=False):
"""Overwrite value of container object at a specified key-chain.
Parameters
----------
key_chain
param val:
inplace
Default value = False)
val
Returns
-------
ret
new container with updated value at key chain, provided it existed before.
"""
keys = re.split("[/.]", key_chain)
if inplace:
cont = self
else:
cont = self.cont_copy()
sub_cont = cont
for key in keys[:-1]:
ivy.utils.assertions.check_elem_in_list(
key,
sub_cont,
message=(
"key chain must already exist in container in order to "
"call cont_overwrite_at_key_chain"
),
)
sub_cont = sub_cont[key]
ivy.utils.assertions.check_elem_in_list(
keys[-1],
sub_cont,
message=(
"key chain must already exist in container in order to call "
"cont_overwrite_at_key_chain"
),
)
sub_cont[keys[-1]] = val
return cont
def cont_set_at_key_chains(self, target_dict, return_dict=None, inplace=False):
"""Set values of container object at specified key-chains.
Parameters
----------
target_dict
param return_dict: (Default value = None)
inplace
Default value = False)
return_dict
(Default value = None)
Returns
-------
ret
new container with updated values at the key chains
"""
if return_dict is None:
if inplace:
return_dict = self
else:
return_dict = self.cont_copy()
for k, v in target_dict.items():
if isinstance(v, dict):
return_dict[k] = self.cont_set_at_key_chains(v, return_dict[k], inplace)
else:
return_dict[k] = v
return return_dict
def cont_overwrite_at_key_chains(
self, target_dict, return_dict=None, inplace=False
):
"""Overwrite values of container object at specified key-chains.
Parameters
----------
target_dict
param return_dict: (Default value = None)
inplace
Default value = False)
return_dict
(Default value = None)
Returns
-------
ret
new container with updated values at the key chains, provided they
existed before.
"""
if return_dict is None:
if inplace:
return_dict = self
else:
return_dict = self.cont_copy()
for k, v in target_dict.items():
ivy.utils.assertions.check_elem_in_list(
k,
return_dict,
message=(
"key chain must already exist in container in order to "
"call cont_overwrite_at_key_chain"
),
)
if isinstance(v, dict):
return_dict[k] = self.cont_overwrite_at_key_chains(
v, return_dict[k], inplace
)
else:
return_dict[k] = v
return return_dict
def cont_prune_keys(self, query_keys, ignore_none=True):
"""Recursively prune set of keys.
Parameters
----------
query_keys
param ignore_none: (Default value = True)
ignore_none
(Default value = True)
Returns
-------
ret
Container with key-chains containing the specified keys pruned.
"""
if query_keys is None and ignore_none:
return self
key_chains_to_prune = []
if isinstance(query_keys, str):
query_keys = [query_keys]
def map_fn(x, kc):
"""
Parameters
----------
x
param kc:
kc
"""
nonlocal key_chains_to_prune
for query_key in query_keys:
if query_key in kc:
key_chains_to_prune.append(kc)
return x
self.cont_map(map_fn)
return self.cont_prune_key_chains(key_chains_to_prune)
def cont_prune_key_chain(self, key_chain):
"""Recursively prune chain of keys, specified as 'key1/key2/key3/...'.
Parameters
----------
key_chain
Returns
-------
ret
Container with keys in key chain pruned.
"""
keys_in_chain = re.split("[/.]", key_chain)
out_dict = {}
for key, value in self.items():
if isinstance(value, ivy.Container):
if key == keys_in_chain[0]:
if len(keys_in_chain) == 1:
new_val = []
else:
new_val = value.cont_prune_key_chain(
"/".join(keys_in_chain[1:])
)
if len(new_val) > 0:
out_dict[key] = new_val
else:
new_val = value.cont_to_dict()
if len(new_val) > 0:
out_dict[key] = value.cont_to_dict()
else:
if len(keys_in_chain) != 1 or key != keys_in_chain[0]:
out_dict[key] = value
return ivy.Container(out_dict, **self._config)
def cont_prune_key_chains(self, key_chains, ignore_none=True):
"""Recursively prune set of key chains.
Parameters
----------
key_chains
param ignore_none: (Default value = True)
ignore_none
(Default value = True)
Returns
-------
ret
Container with keys in the set of key chains pruned.
"""
if key_chains is None and ignore_none:
return self
if isinstance(key_chains, (list, tuple)):
return self._cont_prune_key_chains_input_as_seq(key_chains)
elif isinstance(key_chains, dict):
return self._cont_prune_key_chains_input_as_dict(key_chains)
elif isinstance(key_chains, str):
return self._cont_prune_key_chains_input_as_seq([key_chains])
else:
raise ivy.utils.exceptions.IvyException(
"Invalid type for input key_chains, must either be a list, tuple, dict"
f" or ivy.Container, but found type {type(key_chains)}"
)
def cont_format_key_chains(self, format_fn):
"""Format all key-chains, using the formatting function.
Parameters
----------
format_fn
Returns
-------
ret
Container with the same key-chain structure, but the key strings formatted.
"""
return ivy.Container({format_fn(k): v for k, v in self.cont_to_iterator()})
def cont_sort_by_key(self):
new_dict = {}
for k, v in self.items():
if isinstance(v, ivy.Container):
v_back = v.cont_sort_by_key()
else:
v_back = v
new_dict[k] = v_back
return ivy.Container(new_dict, **self._config)
def cont_prune_empty(self, keep_nones=False, base=True):
"""Recursively prunes empty keys from the container dict structure.
Returns None if the entire container is empty.
Parameters
----------
keep_nones
Default value = False)
base
Default value = True)
Returns
-------
ret
Container with empty keys pruned.
"""
out_dict = {}
for key, value in self.items():
if isinstance(value, ivy.Container):
new_value = value.cont_prune_empty(keep_nones, False)
if new_value:
out_dict[key] = new_value
elif self._cont_ivy.exists(value) or keep_nones:
out_dict[key] = value
if len(out_dict):
return ivy.Container(out_dict, **self._config)
if base:
return ivy.Container(**self._config)
return
def cont_prune_key_from_key_chains(self, absolute=None, containing=None):
"""Recursively prune absolute key or key containing a certain substring
from all key chains.
Parameters
----------
absolute
The absolute key to detect in the key chains. (Default value = None)
containing
A substring to check each key for, when deciding which keys to prune.
(Default value = None)
Returns
-------
Container with specified key or substring-containing-key from all key chains
removed from the chain.
"""
ivy.utils.assertions.check_all_or_any_fn(
absolute,
containing,
fn=ivy.exists,
type="any",
limit=[1, 2],
message="at least one of absolute or containing must be specified",
as_array=False,
)
out_cont = ivy.Container(**self._config)
for key, value in self.items():
if (absolute and key == absolute) or (containing and containing in key):
if isinstance(value, ivy.Container):
out_cont = ivy.Container.cont_combine(out_cont, value)
else:
out_cont = value
elif isinstance(value, ivy.Container):
out_cont[key] = value.cont_prune_key_from_key_chains(
absolute, containing
)
else:
out_cont[key] = value
return out_cont
def cont_prune_keys_from_key_chains(self, absolute=None, containing=None):
"""Recursively prune absolute keys or keys containing certain
substrings from all key chains.
Parameters
----------
absolute
The absolute key to detect in the key chains. (Default value = None)
containing
A substring to check each key for, when deciding which keys to prune.
(Default value = None)
Returns
-------
Container with specified keys or substring-containing-keys from all
key chains removed from the chain.
"""
ivy.utils.assertions.check_all_or_any_fn(
absolute,
containing,
fn=ivy.exists,
type="any",
limit=[1, 2],
message="at least one of absolute or containing must be specified",
as_array=False,
)
out_cont = ivy.Container(**self._config)
for key, value in self.items():
if (
(absolute and key in absolute)
or containing
and max(con in key for con in containing)
):
if isinstance(value, ivy.Container):
out_cont = ivy.Container.cont_combine(out_cont, value)
else:
out_cont = value
elif isinstance(value, ivy.Container):
out_cont[key] = value.cont_prune_key_from_key_chains(
absolute, containing
)
else:
out_cont[key] = value
return out_cont
def cont_restructure_key_chains(
self, keychain_mapping, keep_orig=True, replace=True
):
"""Create a new container with the same contents, but a new key-chain
structure. Given by the mapping with keys as old key-chains and values
as new key-chains.
Parameters
----------
keychain_mapping
A dict with keys as old key-chains and values as new key-chains.
keep_orig
Whether to keep the original keys, or start from a new empty container.
Default is ``True``.
replace
Whether to replace the old key-chains by the new ones. Default is ``True``.
"""
new_cont = self.cont_copy() if keep_orig else ivy.Container()
for old_kc, new_kc in keychain_mapping.items():
if replace and old_kc in new_cont:
new_cont = new_cont.cont_prune_key_chain(old_kc)
new_cont = ivy.Container.cont_combine(
new_cont, ivy.Container({new_kc: self[old_kc]})
)
return new_cont
def cont_restructure(self, mapping, keep_orig=True, replace=True):
"""Create a new container with the same contents, but a new key-chain
structure, and transposes and/or reshaped arrays. Given by the mapping
with keys as old key-chains and values as new key-chains.
Parameters
----------
mapping
A dict with keys as old key-chains and values as new key-chains.
keep_orig
Whether to keep the original keys, are start from a new container.
Default is ``True``.
replace
Whether to replace the old key-chains by the new ones. Default is ``True``.
"""
new_cont = self.cont_copy() if keep_orig else ivy.Container()
for old_kc, new in mapping.items():
if replace and old_kc in new_cont:
new_cont = new_cont.cont_prune_key_chain(old_kc)
val = self[old_kc]
if isinstance(new, dict):
new_kc = new["key_chain"]
if "pattern" in new:
pattern = new["pattern"]
axes_lengths = new["axes_lengths"] if "axes_lengths" in new else {}
if isinstance(val, ivy.Container):
val = val.einops_rearrange(pattern, **axes_lengths)
else:
val = ivy.einops_rearrange(val, pattern, **axes_lengths)
else:
new_kc = new
new_cont = ivy.Container.cont_combine(
new_cont, ivy.Container({new_kc: val})
)
return new_cont
def cont_flatten_key_chains(
self, include_empty=False, above_height=None, below_depth=None
):
"""Summary.
Parameters
----------
include_empty
Default value = False)
above_height
Default value = None)
below_depth
Default value = None)
"""
return ivy.Container(
{
ivy.Container.cont_flatten_key_chain(
kc, above_height=above_height, below_depth=below_depth
): v
for kc, v in self.cont_to_iterator(include_empty=include_empty)
},
**self._config,
)
def cont_copy(self):
"""Create a copy of this container.
Returns
-------
A copy of the container
"""
return ivy.Container(self.cont_to_dict(), **self._config)
def cont_deep_copy(self):
"""Create a deep copy (copying all internal tensors) of this container.
return: A deep copy of the container
"""
return self.cont_map(
lambda x, kc: (
ivy.copy_array(x) if ivy.is_array(x) and not isinstance(x, str) else x
)
)
def __deepcopy__(self, memo):
return self.cont_deep_copy()
def cont_map(
self,
func,
key_chains=None,
to_apply=True,
prune_unapplied=False,
map_sequences=False,
inplace=False,
key_chain="",
):
"""Apply function to all array values of container.
Parameters
----------
func
Function to apply to each container entry
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
inplace
Whether to apply the mapping inplace, or return a new container.
Default is ``False``.
key_chain
Chain of keys for this dict entry (Default value = '')
Returns
-------
New container following the function mapped to each sub-array.
"""
return_dict = self if inplace else {}
for key, value in self.items():
this_key_chain = key if key_chain == "" else f"{str(key_chain)}/{str(key)}"
if isinstance(value, ivy.Container):
ret = value.cont_map(
func,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
inplace,
this_key_chain,
)
if prune_unapplied and not ret:
continue
if not inplace:
return_dict[key] = ret
elif isinstance(value, (list, tuple)) and map_sequences:
ret = ivy.nested_map(
lambda x: func(x, None), value, True, shallow=False
)
if prune_unapplied and not ret:
continue
return_dict[key] = ret
else:
if key_chains is not None:
if (this_key_chain in key_chains and not to_apply) or (
this_key_chain not in key_chains and to_apply
):
if prune_unapplied:
continue
return_dict[key] = value
continue
return_dict[key] = func(value, this_key_chain)
if inplace:
return self
return ivy.Container(return_dict, **self._config)
def cont_map_sub_conts(
self,
func,
key_chains=None,
to_apply=True,
prune_unapplied=False,
inplace=False,
key_chain="",
include_self=True,
):
"""Apply function to all sub-contains in the container.
Parameters
----------
func
Function to apply to each sub-container
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
inplace
Whether to apply the mapping inplace, or return a new container.
Default is ``False``.
key_chain
Chain of keys for this dict entry (Default value = '')
include_self
Whether to also apply the (possibly in-place) function to this container.
Default is ``True``.
Returns
-------
New container following the function mapped to each sub-container.
"""
return_dict = self if inplace else {}
for key, value in self.items():
this_key_chain = key if key_chain == "" else f"{key_chain}/{key}"
if isinstance(value, ivy.Container):
ret = value.cont_map_sub_conts(
func, key_chains, to_apply, prune_unapplied, inplace, this_key_chain
)
if prune_unapplied and not ret:
continue
if not inplace:
return_dict[key] = ret
elif (
key_chains is None
or (this_key_chain not in key_chains or to_apply)
and (this_key_chain in key_chains or not to_apply)
or not prune_unapplied
):
return_dict[key] = value
ret = return_dict if inplace else ivy.Container(return_dict, **self._config)
if key_chain != "" or include_self:
ret = func(ret, key_chain)
if inplace:
return
return ret
def cont_with_entries_as_lists(self):
def to_list(x, _=""):
try:
return self._cont_ivy.to_list(x)
except (IvyException, IvyBackendException):
return x
return self.cont_map(to_list)
def cont_reshape_like(self, target_dict, leading_shape=None, return_cont=None):
"""Set shapes of container entries to shapes specified by new container
with the same key structure.
Parameters
----------
target_dict
param leading_shape: (Default value = None)
return_cont
Default value = None)
leading_shape
(Default value = None)
Returns
-------
ret
new container with values of updated shapes
"""
leading_shape = self._cont_ivy.default(leading_shape, [])
if return_cont is None:
return_cont = self.cont_copy()
for (_, v_shape), (k, v) in zip(target_dict.items(), return_cont.items()):
if isinstance(v_shape, dict):
return_cont[k] = self.cont_reshape_like(
v_shape, leading_shape, return_cont[k]
)
else:
return_cont[k] = self._cont_ivy.reshape(
v, leading_shape + list(v_shape)
)
return ivy.Container(return_cont, **self._config)
def cont_create_if_absent(self, key, value, inplace=True):
"""Add a key to the container with corresponding value, if it is not
already present. otherwise, do nothing.
Parameters
----------
key
param value:
inplace
Default value = True)
value
"""
if key in self:
return
self.cont_set_at_key_chain(key, value, inplace)
def cont_if_exists(self, key):
"""Return the sub-container at the following key if it exists,
otherwise None.
Parameters
----------
key
"""
try:
return self[key]
except KeyError:
return
def cont_try_kc(self, key):
"""Try the following key or key chain, returning self if not present.
Parameters
----------
key
"""
try:
return self[key]
except IvyException:
return self
def cont_cutoff_at_depth(self, depth_cutoff, inplace=False):
"""Summary.
Parameters
----------
depth_cutoff
param inplace: (Default value = False)
inplace
(Default value = False)
"""
total_depth = self.cont_max_depth
copy = self.cont_copy()
def _maybe_cutoff(cont, kc):
if total_depth - copy[kc].cont_max_depth < depth_cutoff:
return cont
if inplace:
cont.clear()
return ivy.Container()
ret = self.cont_map_sub_conts(_maybe_cutoff, inplace=inplace)
if inplace:
return
return ret
def cont_cutoff_at_height(self, height_cutoff, inplace=False):
"""Summary.
Parameters
----------
height_cutoff
param inplace: (Default value = False)
inplace
(Default value = False)
"""
copy = self.cont_copy()
def _maybe_cutoff(cont, kc):
if copy[kc].cont_max_depth > height_cutoff:
return cont
if inplace:
cont.clear()
return ivy.Container()
ret = self.cont_map_sub_conts(_maybe_cutoff, inplace=inplace)
if inplace:
return
return ret
def _cont_slice_keys(self, key_slice):
keys = list(self.keys())
if isinstance(key_slice, str):
ivy.utils.assertions.check_true(len(key_slice) == 3 and key_slice[1] == ":")
ivy.utils.assertions.check_true(self._alphabetical_keys)
start_char = key_slice[0]
end_char = key_slice[2]
start_idx = min(i for i, k in enumerate(keys) if k[0] == start_char)
end_idx = max(i for i, k in enumerate(keys) if k[0] == end_char) + 1
key_slice = slice(start_idx, end_idx, 1)
ret = self.cont_copy()
desired_keys = keys[key_slice]
# noinspection PyUnresolvedReferences
return ret.cont_at_key_chains(desired_keys)
def cont_slice_keys(self, key_slice, all_depths=False):
"""Summary.
Parameters
----------
key_slice
param all_depths: (Default value = False)
all_depths
(Default value = False)
"""
top_depth = self.cont_max_depth
if all_depths:
if isinstance(key_slice, dict):
first_slice = list(key_slice.values())[0]
for d in range(0, top_depth + 1):
if d not in key_slice:
key_slice[d] = first_slice
else:
key_slice = {d: key_slice for d in range(0, top_depth + 1)}
if isinstance(key_slice, dict):
def _fn(cont, kc):
depth = 0 if kc == "" else len(kc.split("/"))
if depth in key_slice:
# noinspection PyProtectedMember
return cont._cont_slice_keys(key_slice[depth])
return cont
return self.cont_map_sub_conts(_fn)
return self._cont_slice_keys(key_slice)
def cont_with_print_limit(self, print_limit, inplace=False):
"""Summary.
Parameters
----------
print_limit
param inplace: (Default value = False)
inplace
(Default value = False)
"""
def _update_print_limit(cont, _):
cont._print_limit = print_limit
return cont
ret = self.cont_map_sub_conts(_update_print_limit, inplace=inplace)
if inplace:
return
return ret
# noinspection PyTypeChecker
def cont_remove_print_limit(self, inplace=False):
"""Summary.
Parameters
----------
inplace
Default value = False)
"""
return self.cont_with_print_limit(None, inplace)
def cont_with_key_length_limit(self, key_length_limit, inplace=False):
"""Summary.
Parameters
----------
key_length_limit
param inplace: (Default value = False)
inplace
(Default value = False)
"""
def _update_key_length_limit(cont, _):
cont._key_length_limit = key_length_limit
return cont
ret = self.cont_map_sub_conts(_update_key_length_limit, inplace=inplace)
if inplace:
return
return ret
def cont_remove_key_length_limit(self, inplace=False):
"""Summary.
Parameters
----------
inplace
Default value = False)
"""
return self.cont_with_key_length_limit(None, inplace)
def cont_with_print_indent(self, print_indent, inplace=False):
"""Summary.
Parameters
----------
print_indent
param inplace: (Default value = False)
inplace
(Default value = False)
"""
def _update_print_indent(cont, _):
cont._print_indent = print_indent
return cont
ret = self.cont_map_sub_conts(_update_print_indent, inplace=inplace)
if inplace:
return
return ret
def cont_with_print_line_spacing(self, print_line_spacing, inplace=False):
"""Summary.
Parameters
----------
print_line_spacing
param inplace: (Default value = False)
inplace
(Default value = False)
"""
def _update_print_line_spacing(cont, _):
cont._print_line_spacing = print_line_spacing
return cont
ret = self.cont_map_sub_conts(_update_print_line_spacing, inplace=inplace)
if inplace:
return
return ret
def cont_with_default_key_color(self, default_key_color, inplace=False):
"""Summary.
Parameters
----------
default_key_color
param inplace: (Default value = False)
inplace
(Default value = False)
"""
def _update_default_key_color(cont, _):
cont._default_key_color = default_key_color
return cont
ret = self.cont_map_sub_conts(_update_default_key_color, inplace=inplace)
if inplace:
return
return ret
def cont_with_ivy_backend(self, ivy_backend: str, inplace=False):
"""Summary.
Parameters
----------
self
input Container
ivy_backend
backend to use
inplace
whether to modify the container or return a copy
"""
if inplace:
self._cont_ivy = ivy_backend
self._config["ivyh"] = ivy_backend
return self
else:
return ivy.Container(self, ivyh=ivy_backend)
def cont_show(self):
print(self)
# noinspection PyUnresolvedReferences
def cont_show_sub_container(self, sub_cont_or_keychain):
"""Summary.
Parameters
----------
sub_cont_or_keychain
"""
# copy this container
this_cont = self.cont_copy()
# get the sub-container
if isinstance(sub_cont_or_keychain, str):
sub_cont = self.cont_at_key_chain(sub_cont_or_keychain)
else:
sub_cont = sub_cont_or_keychain
# find the key chain of the sub-container
sub_cont_kc = self.cont_find_sub_container(sub_cont)
# show this container if key-chain not found, and return
if not sub_cont_kc:
print(self)
return
# otherwise, replace sub-container in this container with known key
this_cont[sub_cont_kc] = ivy.Container({"SUB_CONT": None})
# get the formatted reprs
this_repr = this_cont.cont_with_default_key_color("green").__repr__()
this_repr_red = this_cont.cont_with_default_key_color("red").__repr__()
this_repr_stripped = ansi_escape.sub("", this_repr)
sub_repr = sub_cont.cont_with_default_key_color("red").__repr__()
# remove the outer brackets from the sub repr
sub_repr = "\n" + "\n".join(sub_repr.split("\n")[1:-1]) + "\n"
# find the sub-container placeholder
idx = this_repr_stripped.find("SUB_CONT: null")
# count the lines above and below the sub-container
num_lines_above = this_repr_stripped[0:idx].count("\n")
num_lines_below = this_repr_stripped[idx:].count("\n")
# get the str reprs above and below
this_repr_split = this_repr.split("\n")
this_repr_red_split = this_repr_red.split("\n")
this_repr_above = "\n".join(
this_repr_split[0 : num_lines_above - 1]
+ [this_repr_red_split[num_lines_above - 1]]
)
this_repr_below = "\n".join(this_repr_split[-num_lines_below:])
# count the number of lines needed to be prepended to the sub-container repr
cur_num_spaces = 0
for i, s in enumerate(sub_repr[1:]):
if s != " ":
break
cur_num_spaces += 1
exp_num_spaces = 0
for i, s in enumerate(this_repr.split("\n")[num_lines_above]):
if s != " ":
break
exp_num_spaces += 1
num_spaces_to_add = exp_num_spaces - cur_num_spaces
# prepend these lines to the sub-container
sub_repr = (
"\n"
+ "\n".join(
[" " * num_spaces_to_add + s for s in sub_repr[1:-1].split("\n")]
)
+ "\n"
)
# show
print(this_repr_above + sub_repr + this_repr_below)
# Built-ins #
# ----------#
def __repr__(self, as_repr=True):
indent_str = " " * self._print_indent
def _align_array(array_str_in):
split_phrase_dict = {
"": "([",
"jax": "([",
"numpy": "([",
"tensorflow": "([",
"torch": "([",
"paddle": "])",
"mxnet": "])",
}
split_phrase = split_phrase_dict[self._cont_ivy.current_backend_str()]
array_str_in_split = array_str_in.split(split_phrase)
leading_str_to_keep = array_str_in_split[0].replace("\\n", "")
indented_key_size = len(leading_str_to_keep.replace('"', "").split(": ")[0])
indented_key_str = " " * (indented_key_size + 2)
padded = False
def _pre_pad_alpha_line(str_in):
nonlocal padded
padded = True
return "\\n" + indent_str + indented_key_str + str_in
leading_str_to_keep = ", ".join(
[
_pre_pad_alpha_line(s) if s[0].isalpha() and i != 0 else s
for i, s in enumerate(leading_str_to_keep.split(", "))
]
)
local_indent_str = "" if padded else indent_str
leading_str = leading_str_to_keep.split("\\n")[-1].replace('"', "")
remaining_str = array_str_in_split[1]
num_extra_dims = 0
for i, char in enumerate(remaining_str):
if char != "[":
num_extra_dims = i
break
extra_indent = (len(leading_str) + 1 + num_extra_dims) * " "
array_str_in = split_phrase.join([leading_str_to_keep, remaining_str])
uniform_indent_wo_overflow = array_str_in.replace(
"\\n[", "\n" + local_indent_str + extra_indent + "["
)
uniform_indent_wo_overflow_list = list(
filter(None, uniform_indent_wo_overflow.split("\\n"))
)
uniform_indent = "\n".join(
[
(
local_indent_str + extra_indent + " " + s
if (
s[0].isnumeric()
or s[0] == "-"
or s[0:3] == "..."
or max(ss in s[0:6] for ss in ["nan, ", "inf, "])
)
else (
indent_str + indented_key_str + s
if (not s[0].isspace() and s[0] != '"')
else s
)
)
for s in uniform_indent_wo_overflow_list
]
)
indented = uniform_indent
# 10 dimensions is a sensible upper bound for the number in a single array
for i in range(2, 10):
indented = indented.replace(" " * (i - 1) + "[" * i, "[" * i)
indented = "\n".join(
[s for s in indented.split("\n") if bool(s) and not s.isspace()]
)
return indented
def _align_arrays(str_in):
chunks = str_in.split("\n" + indent_str)
aligned_array_chunks = {
i: _align_array(c) for i, c in enumerate(chunks) if "\\n" in c
}
chunks = [
aligned_array_chunks[i] if i in aligned_array_chunks else c_orig
for i, c_orig in enumerate(chunks)
]
return ("\n" + indent_str).join(chunks)
new_dict = {}
for k, v in self.items():
if isinstance(v, ivy.Container):
# noinspection PyArgumentList
rep = v.__repr__(as_repr=False)
else:
if (
(self._cont_ivy.is_native_array(v) or isinstance(v, ivy.Array))
and len(list(v.shape)) > 0
and ivy.exists(self._print_limit)
and _reduce(mul, v.shape) > self._print_limit
):
rep = (type(v), "shape=", list(v.shape))
elif (
isinstance(v, (list, tuple))
and v
and (
self._cont_ivy.is_native_array(v[0])
or isinstance(v[0], ivy.Array)
)
):
if (
isinstance(v, tuple)
and hasattr(v, "_asdict")
and hasattr(v, "_fields")
):
if len(v) <= self._print_limit:
rep = tuple(
[
(
f"{name} = {v[i]}"
if v[i].size < self._print_limit
else (
f"{name} = {type(v[i])},"
f" shape={list(v[i].shape)}"
)
)
for i, name in enumerate(v._fields)
],
)
else:
rep = (
f"NamedTuple({len(v)})",
type(v[0]),
f"shape={list(v[0].shape)}",
)
elif isinstance(v, tuple):
rep = (
f"tuple({len(v)})",
type(v[0]),
f"shape={list(v[0].shape)}",
)
else:
rep = (
f"list[{len(v)}]",
type(v[0]),
f"shape={list(v[0].shape)}",
)
else:
rep = v
new_dict[k] = rep
if as_repr:
json_dumped_str = _align_arrays(
json.dumps(
ivy.Container(new_dict, **self._config)
.cont_map(
lambda x, kc: (
x
if _is_jsonable(x)
else _repr(x).replace(" ", "").replace(",", ", ")
)
)
.cont_to_dict(),
indent=self._print_indent,
)
)
json_dumped_str = json_dumped_str.replace("true", "True").replace(
"false", "False"
)
def _add_newline(str_in):
str_in_split = str_in.split("\n")
str_split_size = len(str_in_split)
return "\n".join(
[
(
("\n" * self._print_line_spacing + ss)
if i == (str_split_size - 1)
else ss
)
for i, ss in enumerate(str_in_split)
]
)
json_dumped_str = '":'.join(
[_add_newline(s) for s in json_dumped_str.split('":')]
)
# improve tf formatting
if ivy.backend_stack and ivy.current_backend_str() == "tensorflow":
json_dumped_str_split = json_dumped_str.split("'Variable:")
json_dumped_str = (
json_dumped_str_split[0]
+ ", "
+ ", ".join(
[
"'".join(ss.split("'")[1:])
for ss in json_dumped_str_split[1:]
]
)
)
json_dumped_str = (
json_dumped_str.replace(":shape", ", shape")
.replace(")dtype=", "), dtype=")
.replace(", ),", ",),")
)
json_dumped_str = re.sub("}, $", "}", json_dumped_str)
# color keys
json_dumped_str_split = json_dumped_str.split('":')
split_size = len(json_dumped_str_split)
json_dumped_str = '":'.join(
[
(
' "'.join(
sub_str.split(' "')[:-1]
+ [
termcolor.colored(
ivy.Container.cont_trim_key(
sub_str.split(' "')[-1], self._key_length_limit
),
self._default_key_color,
)
]
)
if i < split_size - 1
else sub_str
)
for i, sub_str in enumerate(json_dumped_str_split)
]
)
# remove quotation marks, shape tuple, and color other elements of the dict
ret = (
json_dumped_str.replace('"', "")
.replace(", 'shape=', [", " shape=[")
.replace(":", termcolor.colored(":", "magenta"))
.replace("{", termcolor.colored("{", "blue"))
.replace("}", termcolor.colored("}", "blue"))
.replace("shape=", termcolor.colored("shape=", "magenta"))
.replace("device=", termcolor.colored("device=", "magenta"))
.replace("<class'", "<class '")
.replace("'", "")
.replace("<class", "<" + termcolor.colored("class", "blue"))
)
# ToDo: make the solution below more elegant
for i in range(10):
ret = ret.replace(f"diff_{i}", termcolor.colored(f"diff_{i}", "red"))
for keyword, color in self._keyword_color_dict.items():
ret = ret.replace(keyword, termcolor.colored(keyword, color))
return ret
return new_dict
def __dir__(self):
return list(super().__dir__()) + list(self.keys())
# noinspection PyProtectedMember
def __getattr__(self, item, *args, **kwargs):
try:
ret = dict.__getitem__(self, item)
except KeyError:
# noinspection PyUnresolvedReferences
ret = ivy.Container()
for k, v in self.items():
if isinstance(v, ivy.Container):
result = v.__getattr__(item, *args, **kwargs)
else:
# raise error
if not hasattr(v, item):
raise AttributeError(
f"'{type(v).__module__}' object has no attribute '{item}'"
)
attr = getattr(v, item)
result = attr(*args, **kwargs) if callable(attr) else attr
ret.__setitem__(k, result)
return ret
def __setattr__(self, name, value):
if name[0] != "_":
self[name] = value
else:
super().__setattr__(name, value)
def _get_queue_item(self, query):
if isinstance(query, int):
queue_queries = [query]
elif isinstance(query, slice):
queue_queries = list(
range(query.start, query.stop, ivy.default(query.step, 1))
)
elif isinstance(query, (list, tuple)):
queue_queries = list(
range(query[0].start, query[0].stop, ivy.default(query[0].step, 1))
)
else:
raise ivy.utils.exceptions.IvyException(
"Invalid slice type, must be one of integer, slice "
"or sequences of slices."
)
queue_idxs = {
np.sum(q >= self._queue_load_sizes_cum).item() for q in queue_queries
}
conts = []
for i in queue_idxs:
if i not in self._loaded_containers_from_queues:
cont = ivy.Container(
self._queues[i].get(timeout=self._queue_timeout), **self._config
).to_ivy()
self._loaded_containers_from_queues[i] = cont
else:
cont = self._loaded_containers_from_queues[i]
conts.append(cont)
combined_cont = self._container_combine_method(conts)
idx = list(queue_idxs)[0]
offset = 0 if idx == 0 else self._queue_load_sizes_cum[idx - 1]
if isinstance(query, int):
shifted_query = query - offset
elif isinstance(query, slice):
shifted_query = slice(query.start - offset, query.stop - offset, query.step)
elif isinstance(query, (list, tuple)):
shifted_query = tuple(
slice(slc.start - offset, slc.stop - offset, slc.step) for slc in query
)
# noinspection PyUnboundLocalVariable
return combined_cont[shifted_query]
def __getitem__(self, query):
"""Get slice, key or key chain of container object.
Parameters
----------
query slice or str
slice object, key or key chain to query all container elements.
Returns
-------
Container object at desired query.
"""
if isinstance(query, str):
if query == "":
return self
if "/" in query or "." in query:
ret = self.cont_at_key_chain(query)
return ret
ret = dict.__getitem__(self, query)
return ret
elif ivy.exists(self._queues):
ret = self._get_queue_item(query)
return ret
return_dict = {}
for key, value in self.items():
if isinstance(value, ivy.Container):
return_dict[key] = value[query]
else:
# noinspection PyBroadException
if isinstance(value, (list, tuple)):
if len(value) == 0:
return_dict[key] = value
else:
return_dict[key] = value[query]
elif value is None or hasattr(value, "shape") and value.shape == ():
return_dict[key] = value
else:
return_dict[key] = value[query]
ret = ivy.Container(return_dict, **self._config)
return ret
def __setitem__(self, query, val):
"""Set key or key chain of container object.
Parameters
----------
query slice or str
slice object, key or key chain at which to set all container elements.
val ivy.Container, array, or other
The value to set at the desired query.
Returns
-------
New container after updating.
"""
def _map_fn(fn, x):
x = x.data if isinstance(x, ivy.Array) else x
return fn(x)
if query == "_backend":
self._backend = val
return
if query == "dynamic_backend":
def func(x, _):
if isinstance(x, ivy.Array):
x.dynamic_backend = True
self.cont_map(func)
self._dynamic_backend = val
return
if isinstance(query, str) and ("/" in query or "." in query):
return self.cont_set_at_key_chain(query, val, inplace=True)
else:
return dict.__setitem__(self, query, val)
def __contains__(self, key):
if isinstance(key, str) and ("/" in key or "." in key):
return self.cont_has_key_chain(key)
elif isinstance(key, ivy.Container):
return self.cont_contains_sub_container(key)
else:
return dict.__contains__(self, key)
def __getstate__(self):
state_dict = copy.copy(self.__dict__)
state_dict["_local_ivy"] = (
state_dict["_local_ivy"].current_backend_str()
if state_dict["_local_ivy"] is not None
else None
)
config_in = copy.copy(state_dict["_config_in"])
config_in["ivyh"] = (
config_in["ivyh"].current_backend_str()
if config_in["ivyh"] is not None
else None
)
state_dict["_config_in"] = config_in
config = copy.copy(state_dict["_config"])
config["ivyh"] = (
config["ivyh"].current_backend_str()
if getattr(config, "ivyh", None) is not None
else None
)
state_dict["_config"] = config
return state_dict
def __setstate__(self, state_dict):
if "_local_ivy" in state_dict and ivy.exists(state_dict["_local_ivy"]):
if len(state_dict["_local_ivy"]) > 0:
state_dict["_local_ivy"] = ivy.with_backend(state_dict["_local_ivy"])
else:
state_dict["_local_ivy"] = ivy
if "_config_in" in state_dict:
config_in = copy.copy(state_dict["_config_in"])
if "ivyh" in config_in and ivy.exists(config_in["ivyh"]):
if len(config_in["ivyh"]) > 0:
config_in["ivyh"] = ivy.with_backend(config_in["ivyh"])
else:
config_in["ivyh"] = ivy
state_dict["_config_in"] = config_in
if "_config" in state_dict:
config = copy.copy(state_dict["_config"])
if "ivyh" in config and ivy.exists(config["ivyh"]):
if len(config["ivyh"]) > 0:
config["ivyh"] = ivy.with_backend(config["ivyh"])
else:
config["ivyh"] = ivy
state_dict["_config"] = config
self.__dict__.update(state_dict)
# Getters and Setters #
# --------------------#
# private
@property
def _cont_ivy(self):
return ivy.default(self._local_ivy, ivy)
@_cont_ivy.setter
def _cont_ivy(self, local_ivy):
self._local_ivy = local_ivy
# public
@property
def cont_shape(self):
"""The shape of the arrays in the container.
None is placed in indices which are not consistent across
arrays.
"""
return self._cont_get_shape()
@property
def cont_dtype(self):
"""The dtype of the arrays in the container.
None is returned if the dtypes are not consistent.
"""
return self._cont_get_dtype()
@property
def cont_shapes(self):
"""The shapes of each array in the container.
None is placed in leaf entries without a shape attribute.
"""
return self._cont_get_shapes()
@property
def cont_dev(self):
"""The device to which the arrays in the container belong.
None returned if the devices are not consistent.
"""
return self._cont_get_dev()
@property
def cont_dev_str(self):
"""The device to which the arrays in the container belong.
None returned if the devices are not consistent.
"""
return self._cont_get_dev()
@property
def cont_ivy(self):
return self._cont_ivy
@property
def cont_config(self):
return self._config
@property
def cont_max_depth(self):
kcs = [kc for kc in self.cont_to_iterator_keys(include_empty=True)]
if not kcs:
return 0
return max(len(kc.split("/")) for kc in kcs)
@property
def dynamic_backend(self):
return self._dynamic_backend
| ivy/ivy/data_classes/container/base.py/0 | {
"file_path": "ivy/ivy/data_classes/container/base.py",
"repo_id": "ivy",
"token_count": 74486
} | 9 |
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithImageExperimental(ContainerBase):
pass
| ivy/ivy/data_classes/container/experimental/image.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/image.py",
"repo_id": "ivy",
"token_count": 33
} | 10 |
# global
from typing import Union, Optional, Tuple, Literal, List, Dict, Sequence
# local
from ivy.data_classes.container.base import ContainerBase
import ivy
inf = float("inf")
# ToDo: implement all methods here as public instance methods
# noinspection PyMissingConstructor,PyMethodParameters
class _ContainerWithLinearAlgebra(ContainerBase):
@staticmethod
def _static_matmul(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
transpose_a: Union[bool, ivy.Container] = False,
transpose_b: Union[bool, ivy.Container] = False,
adjoint_a: Union[bool, ivy.Container] = False,
adjoint_b: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.matmul. This method
simply wraps the function, and so the docstring for ivy.matul also
applies to this method with minimal changes.
Parameters
----------
x1
first input array
x2
second input array
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
the matrix multiplication result of x1 and x2
Examples
--------
>>> x = ivy.Container(a = ivy.array([[3., -1.], [-1., 3.]]) ,
... b = ivy.array([[2., 1.], [1., 1.]]))
>>> y = ivy.Container.static_matmul(x, x)
>>> print(y)
{
a: ivy.array([[10., -6.],
[-6., 10.]]),
b: ivy.array([[5., 3.],
[3., 2.]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"matmul",
x1,
x2,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def matmul(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
transpose_a: Union[bool, ivy.Container] = False,
transpose_b: Union[bool, ivy.Container] = False,
adjoint_a: Union[bool, ivy.Container] = False,
adjoint_b: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.matmul. This method
simply wraps the function, and so the docstring for ivy.matmul also
applies to this method with minimal changes.
Parameters
----------
self
first input array
x2
second input array
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
the matrix multiplication result of self and x2
Examples
--------
>>> x = ivy.Container(a = ivy.array([[3., -1.], [-1., 3.]]) ,
... b = ivy.array([[2., 1.], [1., 1.]]))
>>> y = x.matmul(x)
>>> print(y)
{
a: ivy.array([[10., -6.],
[-6., 10.]]),
b: ivy.array([[5., 3.],
[3., 2.]])
}
"""
return self._static_matmul(
self,
x2,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_cholesky(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
upper: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.cholesky. This method
simply wraps the function, and so the docstring for ivy.cholesky also
applies to this method with minimal changes.
Parameters
----------
x
input array or container having shape (..., M, M) and whose innermost two
dimensions form square symmetric positive-definite matrices. Should have a
floating-point data type.
upper
If True, the result must be the upper-triangular Cholesky factor U. If
False, the result must be the lower-triangular Cholesky factor L.
Default: ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the Cholesky factors for each square matrix. If upper
is False, the returned container must contain lower-triangular matrices;
otherwise, the returned container must contain upper-triangular matrices.
The returned container must have a floating-point data type determined by
Type Promotion Rules and must have the same shape as self.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[3., -1.], [-1., 3.]]),
... b=ivy.array([[2., 1.], [1., 1.]]))
>>> y = ivy.Container.static_cholesky(x, upper='false')
>>> print(y)
{
a: ivy.array([[1.73, -0.577],
[0., 1.63]]),
b: ivy.array([[1.41, 0.707],
[0., 0.707]])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([[3., -1], [-1., 3.]]),
... b=ivy.array([[2., 1.], [1., 1.]]))
>>> upper = ivy.Container(a=1, b=-1)
>>> y = ivy.Container.static_roll(x, upper=False)
>>> print(y)
{
a: ivy.array([[3., 3.],
[-1., -1.]]),
b: ivy.array([[1., 1.],
[1., 2.]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"cholesky",
x,
upper=upper,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def cholesky(
self: ivy.Container,
/,
*,
upper: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.cholesky. This method
simply wraps the function, and so the docstring for ivy.cholesky also
applies to this method with minimal changes.
Parameters
----------
self
input container having shape (..., M, M) and whose innermost two dimensions
form square symmetric positive-definite matrices. Should have a
floating-point data type.
upper
If True, the result must be the upper-triangular Cholesky factor U. If
False, the result must be the lower-triangular Cholesky factor L.
Default: ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the Cholesky factors for each square matrix. If upper
is False, the returned container must contain lower-triangular matrices;
otherwise, the returned container must contain upper-triangular matrices.
The returned container must have a floating-point data type determined by
Type Promotion Rules and must have the same shape as self.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[3., -1],[-1., 3.]]),
... b=ivy.array([[2., 1.],[1., 1.]]))
>>> y = x.cholesky(upper='false')
>>> print(y)
{
a: ivy.array([[1.73, -0.577],
[0., 1.63]]),
b: ivy.array([[1.41, 0.707],
[0., 0.707]])
}
"""
return self._static_cholesky(
self,
upper=upper,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_cross(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axis: Union[int, ivy.Container] = -1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.cross. This method simply
wraps the function, and so the docstring for ivy.cross also applies to
this method with minimal changes.
Parameters
----------
x1
first input array. Should have a numeric data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have a numeric data type.
axis
the axis (dimension) of x1 and x2 containing the vectors for which to
compute the cross product.vIf set to -1, the function computes the
cross product for vectors defined by the last axis (dimension).
Default: ``-1``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise products. The returned array must have
a data type determined by :ref:`type-promotion`.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.array([9., 0., 3.])
>>> y = ivy.Container(a=ivy.array([1., 1., 0.]), b=ivy.array([1., 0., 1.]))
>>> z = ivy.Container.static_cross(x, y)
>>> print(z)
{
a: ivy.array([-3., 3., 9.]),
b: ivy.array([0., -6., 0.])
}
With multiple :class:`ivy.Container` inputs:
>>> x = x = ivy.Container(a=ivy.array([5., 0., 0.]), b=ivy.array([0., 0., 2.]))
>>> y = ivy.Container(a=ivy.array([0., 7., 0.]), b=ivy.array([3., 0., 0.]))
>>> z = ivy.Container.static_cross(x, y)
>>> print(z)
{
a: ivy.array([0., 0., 35.]),
b: ivy.array([0., 6., 0.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"cross",
x1,
x2,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def cross(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axis: Union[int, ivy.Container] = -1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.cross. This method
simply wraps the function, and so the docstring for ivy.cross also
applies to this method with minimal changes.
Parameters
----------
self
first input array. Should have a numeric data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have a numeric data type.
axis
the axis (dimension) of x1 and x2 containing the vectors for which to
compute (default: -1) the cross product.vIf set to -1, the function
computes the cross product for vectors defined by the last axis (dimension).
Default: ``-1``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise products. The returned array must have
a data type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([5., 0., 0.]), b=ivy.array([0., 0., 2.]))
>>> y = ivy.Container(a=ivy.array([0., 7., 0.]), b=ivy.array([3., 0., 0.]))
>>> z = x.cross(y)
>>> print(z)
{
a: ivy.array([0., 0., 35.]),
b: ivy.array([0., 6., 0.])
}
"""
return self._static_cross(
self,
x2,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_det(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"det",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def det(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""
Examples
--------
>>> x = ivy.Container(a = ivy.array([[3., -1.], [-1., 3.]]) ,
... b = ivy.array([[2., 1.], [1., 1.]]))
>>> y = x.det()
>>> print(y)
{a:ivy.array(8.),b:ivy.array(1.)}
"""
return self._static_det(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_diagonal(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
offset: Union[int, ivy.Container] = 0,
axis1: Union[int, ivy.Container] = -2,
axis2: Union[int, ivy.Container] = -1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.diagonal. This method
simply wraps the function, and so the docstring for ivy.diagonal also
applies to this method with minimal changes.
Parameters
----------
x
input Container with leave arrays having shape
``(..., M, N)`` and whose innermost two dimensions form
``MxN`` matrices.
offset
offset specifying the off-diagonal relative to the main diagonal.
- ``offset = 0``: the main diagonal.
- ``offset > 0``: off-diagonal above the main diagonal.
- ``offset < 0``: off-diagonal below the main diagonal.
Default: `0`.
axis1
axis to be used as the first axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to first axis (-2).
axis2
axis to be used as the second axis of the 2-D sub-arrays from which the
diagonals should be taken. Defaults to second axis (-1).
out
optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
A container with the diagonals. More details can be found in
the docstring for ivy.diagonal.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([[1., 2.], [3., 4.]],
... b=ivy.array([[5., 6.], [7., 8.]])))
>>> d = ivy.Container.static_diagonal(x)
>>> print(d)
{
a:ivy.array([1., 4.]),
b:ivy.array([5., 8.])
}
>>> a = ivy.array([[0, 1, 2],
... [3, 4, 5],
... [6, 7, 8]])
>>> b = ivy.array([[-1., -2., -3.],
... [-3., 4., 5.],
... [5., 6., 7.]])],
>>> x = ivy.Container(a=a, b=b)
>>> d = ivy.Container.static_diagonal(offset=-1, axis1=0)
>>> print(d)
{
a:ivy.array([3., 7.]),
b:ivy.array([-3., 6.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"diagonal",
x,
offset=offset,
axis1=axis1,
axis2=axis2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def diagonal(
self: ivy.Container,
/,
*,
offset: Union[int, ivy.Container] = 0,
axis1: Union[int, ivy.Container] = -2,
axis2: Union[int, ivy.Container] = -1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.diagonal. This method
simply wraps the function, and so the docstring for ivy.diagonal also
applies to this method with minimal changes.
Parameters
----------
self
input Container with leave arrays having shape
``(..., M, N)`` and whose innermost two dimensions form
``MxN`` matrices.
offset
offset specifying the off-diagonal relative to the main diagonal.
- ``offset = 0``: the main diagonal.
- ``offset > 0``: off-diagonal above the main diagonal.
- ``offset < 0``: off-diagonal below the main diagonal.
Default: `0`.
axis1
axis to be used as the first axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to first axis (-2).
axis2
axis to be used as the second axis of the 2-D sub-arrays from which the
diagonals should be taken. Defaults to second axis (-1).
out
optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
A container with the diagonals. More details can be found in
the docstring for ivy.diagonal.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([[1., 2.], [3., 4.]]),
... b=ivy.array([[5., 6.], [7., 8.]]))
>>> d = x.diagonal()
>>> print(d)
{
a:ivy.array([1., 4.]),
b:ivy.array([5., 8.])
}
>>> a = ivy.array([[0, 1, 2],
... [3, 4, 5],
... [6, 7, 8]])
>>> b = ivy.array([[-1., -2., -3.],
... [-3., 4., 5.],
... [5., 6., 7.]]),
>>> x = ivy.Container(a=a, b=b)
>>> d = x.diagonal(offset=-1)
>>> print(d)
{
a: ivy.array([3, 7]),
b: ivy.array([[-3., 6.]])
}
"""
return self._static_diagonal(
self,
offset=offset,
axis1=axis1,
axis2=axis2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_diag(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
k: Union[int, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"diag",
x,
k=k,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def diag(
self: ivy.Container,
/,
*,
k: Union[int, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.diag. This method
simply wraps the function, and so the docstring for ivy.diag also
applies to this method with minimal changes.
Examples
--------
>>> x = ivy.Container(a=[[0, 1, 2],
>>> [3, 4, 5],
>>> [6, 7, 8]])
>>> ivy.diag(x, k=1)
{
a: ivy.array([1, 5])
}
"""
return self._static_diag(
self,
k=k,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_eigh(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
UPLO: Union[str, ivy.Container] = "L",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"eigh",
x,
UPLO=UPLO,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def eigh(
self: ivy.Container,
/,
*,
UPLO: Union[str, ivy.Container] = "L",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.eigh. This method
simply wraps the function, and so the docstring for ivy.eigh also
applies to this method with minimal changes.
Parameters
----------
self : ivy.Container
Ivy container having shape `(..., M, M)` and whose
innermost two dimensions form square matrices.
Should have a floating-point data type.
UPLO : str, optional
Specifies whether the upper or lower triangular part of the
Hermitian matrix should be
used for the eigenvalue decomposition. Default is 'L'.
key_chains : Union[List[str], Dict[str, str]], optional
The key-chains to apply or not apply the method to. Default is `None`.
to_apply : bool, optional
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is `True`.
prune_unapplied : bool, optional
Whether to prune key_chains for which the function was not applied.
Default is `False`.
map_sequences : bool, optional
Whether to also map method to sequences (lists, tuples).
Default is `False`.
out : ivy.Container, optional
Optional output container, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ivy.Container
A container containing the computed eigenvalues.
The returned array must have shape `(..., M)` and have the same
data type as `self`.
Examples
--------
With `ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([[[1.,2.],[2.,1.]]]),
... b=ivy.array([[[2.,4.],[4.,2.]]]))
>>> y = x.eigh()
>>> print(y)
{
a: ivy.array([[-1., 3.]]),
b: ivy.array([[-2., 6.]])
}
"""
return self._static_eigh(
self,
UPLO=UPLO,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_eigvalsh(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
UPLO: Union[str, ivy.Container] = "L",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.eigvalsh. This method
simply wraps the function, and so the docstring for ivy.eigvalsh also
applies to this method with minimal changes.
Parameters
----------
x
Ivy container having shape ``(..., M, M)`` and whose
innermost two dimensions form square matrices.
Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
a container containing the computed eigenvalues.
The returned array must have shape
(..., M) and have the same data type as x.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([[[1.,2.,3.],[2.,4.,5.],[3.,5.,6.]]]),
... b=ivy.array([[[1.,1.,2.],[1.,2.,1.],[2.,1.,1.]]]),
... c=ivy.array([[[2.,2.,2.],[2.,3.,3.],[2.,3.,3.]]]))
>>> e = ivy.Container.static_eigvalsh(x)
>>> print(e)
{
a: ivy.array([[-0.51572949, 0.17091519, 11.3448143]]),
b: ivy.array([[-1., 1., 4.]]),
c: ivy.array([[-8.88178420e-16, 5.35898387e-01, 7.46410179e+00]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"eigvalsh",
x,
UPLO=UPLO,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def eigvalsh(
self: ivy.Container,
/,
*,
UPLO: Union[str, ivy.Container] = "L",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.eigvalsh. This method
simply wraps the function, and so the docstring for ivy.eigvalsh also
applies to this method with minimal changes.
Parameters
----------
self
Ivy container having shape ``(..., M, M)`` and whose
innermost two dimensions form square matrices.
Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
a container containing the computed eigenvalues.
The returned array must have shape
(..., M) and have the same data type as x.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([[[1.,2.],[2.,1.]]]),
... b=ivy.array([[[2.,4.],[4.,2.]]]))
>>> y = ivy.eigvalsh(x)
>>> print(y)
{
a: ivy.array([[-1., 3.]]),
b: ivy.array([[-2., 6.]])
}
"""
return self._static_eigvalsh(
self,
UPLO=UPLO,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_inner(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.inner. This method simply
wraps the function, and so the docstring for ivy.inner also applies to
this method with minimal changes.
Return the inner product of two vectors ``x1`` and ``x2``.
Parameters
----------
x1
first one-dimensional input array of size N.
Should have a numeric data type.
a(N,) array_like
First input vector. Input is flattened if not already 1-dimensional.
x2
second one-dimensional input array of size M.
Should have a numeric data type.
b(M,) array_like
Second input vector. Input is flattened if not already 1-dimensional.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
a two-dimensional array containing the inner product and whose
shape is (N, M).
The returned array must have a data type determined by Type Promotion Rules.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([[1, 2], [3, 4]]))
>>> x2 = ivy.Container(a=ivy.array([5, 6]))
>>> y = ivy.Container.static_inner(x1, x2)
>>> print(y)
{
a: ivy.array([17, 39])
}
"""
return ContainerBase.cont_multi_map_in_function(
"inner",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def inner(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.inner. This method
simply wraps the function, and so the docstring for ivy.inner also
applies to this method with minimal changes.
Return the inner product of two vectors ``self`` and ``x2``.
Parameters
----------
self
input container of size N. Should have a numeric data type.
a(N,) array_like
First input vector. Input is flattened if not already 1-dimensional.
x2
one-dimensional input array of size M. Should have a numeric data type.
b(M,) array_like
Second input vector. Input is flattened if not already 1-dimensional.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
a new container representing the inner product and whose
shape is (N, M).
The returned array must have a data type determined by Type Promotion Rules.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([[1, 2], [3, 4]]))
>>> x2 = ivy.Container(a=ivy.array([5, 6]))
>>> y = ivy.Container.inner(x1, x2)
>>> print(y)
{
a: ivy.array([17, 39])
}
"""
return self._static_inner(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_inv(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
adjoint: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.inv. This method simply
wraps the function, and so the docstring for ivy.inv also applies to
this method with minimal changes.
Parameters
----------
x
Ivy container having shape ``(..., M, M)`` and whose
innermost two dimensions form square matrices.
Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
A container containing the multiplicative inverses.
The returned array must have a floating-point data type
determined by :ref:`type-promotion` and must have the
same shape as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[0., 1.], [4., 4.]]),
... b=ivy.array([[4., 4.], [2., 1.]]))
>>> y = ivy.Container.static_inv(x)
>>> print(y)
{
a: ivy.array([[-1, 0.25], [1., 0.]]),
b: ivy.array([-0.25, 1.], [0.5, -1.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"inv",
x,
adjoint=adjoint,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def inv(
self: ivy.Container,
/,
*,
adjoint: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.inv. This method simply
wraps the function, and so the docstring for ivy.inv also applies to
this method with minimal changes.
Parameters
----------
self
Ivy container having shape ``(..., M, M)`` and whose
innermost two dimensions form square matrices.
Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
A container containing the multiplicative inverses.
The returned array must have a floating-point data type
determined by :ref:`type-promotion` and must have the
same shape as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[0., 1.], [4., 4.]]),
... b=ivy.array([[4., 4.], [2., 1.]]))
>>> y = x.inv()
>>> print(y)
{
a: ivy.array([[-1, 0.25], [1., 0.]]),
b: ivy.array([-0.25, 1.], [0.5, -1.])
}
"""
return self._static_inv(
self,
adjoint=adjoint,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_pinv(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
rtol: Optional[Union[float, Tuple[float], ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container special method variant of ivy.pinv. This method simply
wraps the function, and so the docstring for ivy.pinv also applies to
this method with minimal changes.
Parameters
----------
x
input array having shape ``(..., M, N)`` and whose innermost two
dimensions form``MxN`` matrices. Should have a floating-point
data type.
rtol
relative tolerance for small singular values approximately less
than or equal to ``rtol * largest_singular_value`` are set to zero.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
an array containing the pseudo-inverses. The returned array must have a
floating-point data type determined by :ref:`type-promotion` and
must have shape ``(..., N, M)`` (i.e., must have the same shape as
``x``, except the innermost two dimensions must be transposed).
Examples
--------
>>> x = ivy.Container(a= ivy.array([[1., 2.], [3., 4.]]))
>>> y = ivy.Container.static_pinv(x)
>>> print(y)
{
a: ivy.array([[-2., 1.],
[1.5, -0.5]])
}
>>> x = ivy.Container(a=ivy.array([[1., 2.], [3., 4.]]))
>>> out = ivy.Container(a=ivy.zeros((2, 2)))
>>> ivy.Container.static_pinv(x, rtol=1e-1, out=out)
>>> print(out)
{
a: ivy.array([[0.0426, 0.0964],
[0.0605, 0.1368]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"pinv",
x,
rtol=rtol,
out=out,
)
def pinv(
self: ivy.Container,
/,
*,
rtol: Optional[Union[float, Tuple[float], ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.pinv. This method
simply wraps the function, and so the docstring for ivy.pinv also
applies to this method with minimal changes.
Parameters
----------
x
input array having shape ``(..., M, N)`` and whose innermost
two dimensions form``MxN`` matrices. Should have a floating-point
data type.
rtol
relative tolerance for small singular values approximately less
than or equal to ``rtol * largest_singular_value`` are set to zero.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
an array containing the pseudo-inverses. The returned array must have a
floating-point data type determined by :ref:`type-promotion` and
must have shape ``(..., N, M)`` (i.e., must have the same shape as
``x``, except the innermost two dimensions must be transposed).
Examples
--------
>>> x = ivy.Container(a= ivy.array([[1., 2.], [3., 4.]]))
>>> y = x.pinv()
>>> print(y)
{
a: ivy.array([[-1.99999988, 1.],
[1.5, -0.5]])
}
>>> x = ivy.Container(a = ivy.array([[1., 2.], [3., 4.]]))
>>> out = ivy.Container(a = ivy.zeros(x["a"].shape))
>>> x.pinv(out=out)
>>> print(out)
{
a: ivy.array([[-1.99999988, 1.],
[1.5, -0.5]])
}
"""
return self._static_pinv(
self,
rtol=rtol,
out=out,
)
@staticmethod
def _static_matrix_norm(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
ord: Union[int, float, Literal[inf, -inf, "fro", "nuc"], ivy.Container] = "fro",
axis: Tuple[int, int, ivy.Container] = (-2, -1),
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.matrix_norm. This method
simply wraps the function, and so the docstring for ivy.matrix_norm
also applies to this method with minimal changes.
Parameters
----------
x
Input array having shape (..., M, N) and whose innermost two deimensions
form MxN matrices. Should have a floating-point data type.
ord
Order of the norm. Default is "fro".
axis
specifies the axes that hold 2-D matrices. Default: (-2, -1).
keepdims
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original x. Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
Matrix norm of the array at specified axes.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[1.1, 2.2], [1., 2.]]), \
b=ivy.array([[1., 2.], [3., 4.]]))
>>> y = ivy.Container.static_matrix_norm(x, ord=1)
>>> print(y)
{
a: ivy.array(4.2),
b: ivy.array(6.)
}
>>> x = ivy.Container(a=ivy.arange(12, dtype=float).reshape((3, 2, 2)), \
b=ivy.arange(8, dtype=float).reshape((2, 2, 2)))
>>> ord = ivy.Container(a=1, b=float('inf'))
>>> axis = ivy.Container(a=(1, 2), b=(2, 1))
>>> k = ivy.Container(a=False, b=True)
>>> y = ivy.Container.static_matrix_norm(x, ord=ord, axis=axis, keepdims=k)
>>> print(y)
{
a: ivy.array([4.24, 11.4, 19.2]),
b: ivy.array([[[3.7]],
[[11.2]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"matrix_norm",
x,
ord=ord,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def matrix_norm(
self: ivy.Container,
/,
*,
ord: Union[int, float, Literal[inf, -inf, "fro", "nuc"], ivy.Container] = "fro",
axis: Tuple[int, int, ivy.Container] = (-2, -1),
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.matrix_norm. This
method simply wraps the function, and so the docstring for
ivy.matrix_norm also applies to this method with minimal changes.
Parameters
----------
self
Container having shape (..., M, N) and whose innermost two dimensions
form MxN matrices. Should have a floating-point data type.
ord
Order of the norm. Default is "fro".
axis
specifies the axes that hold 2-D matrices. Default: (-2, -1).
keepdims
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original x. Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
Matrix norm of the array at specified axes.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[1.1, 2.2], [1., 2.]]), \
b=ivy.array([[1., 2.], [3., 4.]]))
>>> y = x.matrix_norm(ord=1)
>>> print(y)
{
a: ivy.array(4.2),
b: ivy.array(6.)
}
>>> x = ivy.Container(a=ivy.arange(12, dtype=float).reshape((3, 2, 2)), \
b=ivy.arange(8, dtype=float).reshape((2, 2, 2)))
>>> ord = ivy.Container(a="nuc", b=ivy.inf)
>>> axis = ivy.Container(a=(1, 2), b=(2, 1))
>>> k = ivy.Container(a=True, b=False)
>>> y = x.matrix_norm(ord=ord, axis=axis, keepdims=k)
>>> print(y)
{
a: ivy.array([[[4.24]],
[[11.4]],
[[19.2]]]),
b: ivy.array([4., 12.])
}
"""
return self._static_matrix_norm(
self,
ord=ord,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_matrix_power(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
n: Union[int, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"matrix_power",
x,
n,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def matrix_power(
self: ivy.Container,
n: Union[int, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return self._static_matrix_power(
self,
n,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_matrix_rank(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
atol: Optional[Union[float, Tuple[float], ivy.Container]] = None,
rtol: Optional[Union[float, Tuple[float], ivy.Container]] = None,
hermitian: Optional[Union[bool, ivy.Container]] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.matrix_rank. This method
returns the rank (i.e., number of non-zero singular values) of a matrix
(or a stack of matrices).
Parameters
----------
x
input array or container having shape ``(..., M, N)`` and whose innermost
two dimensions form ``MxN`` matrices. Should have a floating-point data
type.
atol
absolute tolerance. When None it’s considered to be zero.
rtol
relative tolerance for small singular values. Singular values
approximately less than or equal to ``rtol * largest_singular_value`` are
set to zero. If a ``float``, the value is equivalent to a zero-dimensional
array having a floating-point data type determined by :ref:`type-promotion`
(as applied to ``x``) and must be broadcast against each matrix. If an
``array``, must have a floating-point data type and must be compatible with
``shape(x)[:-2]`` (see:ref:`broadcasting`). If ``None``, the default value
is ``max(M, N) * eps``, where ``eps`` must be the machine epsilon associated
with the floating-point data type determined by :ref:`type-promotion`
(as applied to ``x``).
Default: ``None``.
hermitian
indicates whether ``x`` is Hermitian. When ``hermitian=True``, ``x`` is
assumed to be Hermitian, enabling a more efficient method for finding
eigenvalues, but x is not checked inside the function. Instead, We just use
the lower triangular of the matrix to compute.
Default: ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
a container containing the ranks. The returned array must have a
floating-point data type determined by :ref:`type-promotion` and must have
shape ``(...)`` (i.e., must have a shape equal to ``shape(x)[:-2]``).
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[1., 0.], [0., 1.]]),
... b=ivy.array([[1., 0.], [0., 0.]]))
>>> y = ivy.Container.static_matrix_rank(x)
>>> print(y)
{
a: ivy.array(2.),
b: ivy.array(1.)
}
"""
return ContainerBase.cont_multi_map_in_function(
"matrix_rank",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
atol=atol,
rtol=rtol,
hermitian=hermitian,
out=out,
)
def matrix_rank(
self: ivy.Container,
/,
*,
atol: Optional[Union[float, Tuple[float], ivy.Container]] = None,
rtol: Optional[Union[float, Tuple[float], ivy.Container]] = None,
hermitian: Optional[Union[bool, ivy.Container]] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.matrix_rank. This
method returns the rank (i.e., number of non-zero singular values) of a
matrix (or a stack of matrices).
Parameters
----------
self
input container having shape ``(..., M, N)`` and whose innermost two
dimensions form ``MxN`` matrices. Should have a floating-point data type.
atol
absolute tolerance. When None it’s considered to be zero.
rtol
relative tolerance for small singular values. Singular values approximately
less than or equal to ``rtol * largest_singular_value`` are set to zero. If
a ``float``, the value is equivalent to a zero-dimensional array having a
floating-point data type determined by :ref:`type-promotion` (as applied to
``x``) and must be broadcast against each matrix. If an ``array``, must have
a floating-point data type and must be compatible with ``shape(x)[:-2]``
(see :ref:`broadcasting`). If ``None``, the default value is
``max(M, N) * eps``, where ``eps`` must be the machine epsilon associated
with the floating-point data type determined by :ref:`type-promotion`
(as applied to ``x``). Default: ``None``.
hermitian
indicates whether ``x`` is Hermitian. When ``hermitian=True``, ``x`` is
assumed to be Hermitian, enabling a more efficient method for finding
eigenvalues, but x is not checked inside the function. Instead, We just use
the lower triangular of the matrix to compute.
Default: ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
a container containing the ranks. The returned array must have a
floating-point data type determined by :ref:`type-promotion` and must have
shape ``(...)`` (i.e., must have a shape equal to ``shape(x)[:-2]``).
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[1., 0.], [0., 1.]]),
... b=ivy.array([[1., 0.], [0., 0.]]))
>>> y = x.matrix_rank()
>>> print(y)
{
a: ivy.array(2),
b: ivy.array(1)
}
"""
return self._static_matrix_rank(
self,
atol=atol,
rtol=rtol,
hermitian=hermitian,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_matrix_transpose(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
conjugate: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""Transpose a matrix (or a stack of matrices) ``x``.
Parameters
----------
x
input Container which will have arrays with shape ``(..., M, N)``
and whose innermost two dimensions form ``MxN`` matrices.
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
A container with the transposes for each matrix and having shape
``(..., N, M)``. The returned array must have the same data
type as ``x``.
Examples
--------
With :code:`ivy.Container` instance method:
>>> x = ivy.Container(a=ivy.array([[1., 1.], [0., 3.]]), \
b=ivy.array([[0., 4.], [3., 1.]]))
>>> y = ivy.Container.static_matrix_transpose(x)
>>> print(y)
{
a: ivy.array([[1., 0.],
[1., 3.]]),
b: ivy.array([[0., 3.],
[4., 1.]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"matrix_transpose",
x,
conjugate=conjugate,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def matrix_transpose(
self: ivy.Container,
/,
*,
conjugate: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""Transpose a matrix (or a stack of matrices) ``x``.
Parameters
----------
self
input Container which will have arrays with shape ``(..., M, N)``
and whose innermost two dimensions form ``MxN`` matrices.
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
A container with the transposes for each matrix and having shape
``(..., N, M)``. The returned array must have the same data
type as ``x``.
Examples
--------
With :code:`ivy.Container` instance method:
>>> x = ivy.Container(a=ivy.array([[1., 1.], [0., 3.]]), \
b=ivy.array([[0., 4.], [3., 1.]]))
>>> y = x.matrix_transpose()
>>> print(y)
{
a: ivy.array([[1., 0.],
[1., 3.]]),
b: ivy.array([[0., 3.],
[4., 1.]])
}
"""
return self._static_matrix_transpose(
self,
conjugate=conjugate,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_outer(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.outer. This method simply
wraps the function, and so the docstring for ivy.outer also applies to
this method with minimal changes.
Computes the outer product of two arrays, x1 and x2,
by computing the tensor product along the last dimension of both arrays.
Parameters
----------
x1
first input array having shape (..., N1)
x2
second input array having shape (..., N2)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
The container must have shape (..., N1, N2). The first x1.ndim-1
dimensions must have the same size as those of the input array x1
and the first x2.ndim-1 dimensions must have the same
size as those of the input array x2.
Returns
-------
ret
an ivy container whose shape is (..., N1, N2).
The first x1.ndim-1 dimensions have the same size as those
of the input array x1 and the first x2.ndim-1
dimensions have the same size as those of the input array x2.
Example
-------
>>> x1 =ivy.Container( a=ivy.array([[1, 2, 3], [4, 5, 6]]))
>>> x2 = ivy.Container(a=ivy.array([1, 2, 3]))
>>> y = ivy.Container.static_outer(x1, x2)
>>> print(y)
ivy.array([[[ 1., 2., 3.],
[ 2., 4., 6.],
[ 3., 6., 9.]],
[[ 4., 8., 12.],
[ 5., 10., 15.],
[ 6., 12., 18.]]])
"""
return ContainerBase.cont_multi_map_in_function(
"outer",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def outer(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""Return the outer product of two arrays or containers.
The instance method implementation of the static method static_outer of the
ivy.Container class. It calculates the outer product of two input arrays or
containers along the last dimension and returns the resulting container. The
input arrays should be either ivy.Container, ivy.Array, or ivy.NativeArray. The
output container shape is the concatenation of the shapes of the input
containers along the last dimension.
Parameters
----------
self : ivy.Container
Input container of shape (...,B) where the last dimension
represents B elements.
x2 : Union[ivy.Container, ivy.Array, ivy.NativeArray]
Second input array or container of shape (..., N)
where the last dimension represents N elements.
key_chains : Optional[Union[List[str], Dict[str, str]]]
The key-chains to apply or not apply the method to. Default is None.
to_apply : bool
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped.Default is True.
prune_unapplied : bool
Whether to prune key_chains for which the function was not applied.
Default is False.
map_sequences : bool
Whether to also map the method to sequences (lists, tuples).
Default is False.
out : Optional[ivy.Container]
Optional output container to write the result to.
If not provided, a new container will be created.
Returns
-------
ivy.Container
A new container of shape (..., M, N) representing
the outer product of the input arrays or containers
along the last dimension.
Examples
--------
>>> x = ivy.array([[1., 2.],[3., 4.]])
>>> y = ivy.array([[5., 6.],[7., 8.]])
>>> d = ivy.outer(x,y)
>>> print(d)
ivy.array([[ 5., 6., 7., 8.],
[10., 12., 14., 16.],
[15., 18., 21., 24.],
[20., 24., 28., 32.]])
"""
return self._static_outer(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_qr(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
mode: Union[str, ivy.Container] = "reduced",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Tuple[ivy.Container, ivy.Container]] = None,
) -> Tuple[ivy.Container, ivy.Container]:
"""ivy.Container static method variant of ivy.qr. This method simply
wraps the function, and so the docstring for ivy.qr also applies to
this method with minimal changes.
Returns the qr decomposition x = QR of a full column rank matrix (or a stack of
matrices), where Q is an orthonormal matrix (or a stack of matrices) and R is an
upper-triangular matrix (or a stack of matrices).
Parameters
----------
x
input container having shape (..., M, N) and whose innermost two dimensions
form MxN matrices of rank N. Should have a floating-point data type.
mode
decomposition mode. Should be one of the following modes:
- 'reduced': compute only the leading K columns of q, such that q and r have
dimensions (..., M, K) and (..., K, N), respectively, and where
K = min(M, N).
- 'complete': compute q and r with dimensions (..., M, M) and (..., M, N),
respectively.
Default: 'reduced'.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output tuple of containers, for writing the result to. The arrays
must have shapes that the inputs broadcast to.
Returns
-------
ret
a namedtuple (Q, R) whose
- first element must have the field name Q and must be an container whose
shape depends on the value of mode and contain matrices with orthonormal
columns. If mode is 'complete', the container must have shape (..., M, M).
If mode is 'reduced', the container must have shape (..., M, K), where
K = min(M, N). The first x.ndim-2 dimensions must have the same size as
those of the input container x.
- second element must have the field name R and must be an container whose
shape depends on the value of mode and contain upper-triangular matrices. If
mode is 'complete', the container must have shape (..., M, N). If mode is
'reduced', the container must have shape (..., K, N), where K = min(M, N).
The first x.ndim-2 dimensions must have the same size as those of the input
x.
Examples
--------
>>> x = ivy.Container(a = ivy.native_array([[1., 2.], [3., 4.]]),
... b = ivy.array([[2., 3.], [4. ,5.]]))
>>> q,r = ivy.Container.static_qr(x, mode='complete')
>>> print(q)
{
a: ivy.array([[-0.31622777, -0.9486833],
[-0.9486833, 0.31622777]]),
b: ivy.array([[-0.4472136, -0.89442719],
[-0.89442719, 0.4472136]])
}
>>> print(r)
{
a: ivy.array([[-3.16227766, -4.42718872],
[0., -0.63245553]]),
b: ivy.array([[-4.47213595, -5.81377674],
[0., -0.4472136]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"qr",
x,
mode=mode,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def qr(
self: ivy.Container,
/,
*,
mode: Union[str, ivy.Container] = "reduced",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Tuple[ivy.Container, ivy.Container]] = None,
) -> Tuple[ivy.Container, ivy.Container]:
"""ivy.Container instance method variant of ivy.qr. This method simply
wraps the function, and so the docstring for ivy.qr also applies to
this method with minimal changes.
Returns the qr decomposition x = QR of a full column rank matrix (or a stack of
matrices), where Q is an orthonormal matrix (or a stack of matrices) and R is an
upper-triangular matrix (or a stack of matrices).
Parameters
----------
self
input container having shape (..., M, N) and whose innermost two dimensions
form MxN matrices of rank N. Should have a floating-point data type.
mode
decomposition mode. Should be one of the following modes:
- 'reduced': compute only the leading K columns of q, such that q and r have
dimensions (..., M, K) and (..., K, N), respectively, and where
K = min(M, N).
- 'complete': compute q and r with dimensions (..., M, M) and (..., M, N),
respectively.
Default: 'reduced'.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output tuple of containers, for writing the result to. The arrays
must have shapes that the inputs broadcast to.
Returns
-------
ret
a namedtuple (Q, R) whose
- first element must have the field name Q and must be an container whose
shape depends on the value of mode and contain matrices with orthonormal
columns. If mode is 'complete', the container must have shape (..., M, M).
If mode is 'reduced', the container must have shape (..., M, K), where
K = min(M, N). The first x.ndim-2 dimensions must have the same size as
those of the input container x.
- second element must have the field name R and must be an container whose
shape depends on the value of mode and contain upper-triangular matrices. If
mode is 'complete', the container must have shape (..., M, N). If mode is
'reduced', the container must have shape (..., K, N), where K = min(M, N).
The first x.ndim-2 dimensions must have the same size as those of the input
x.
Examples
--------
>>> x = ivy.Container(a = ivy.native_array([[1., 2.], [3., 4.]]),
... b = ivy.array([[2., 3.], [4. ,5.]]))
>>> q,r = x.qr(mode='complete')
>>> print(q)
{
a: ivy.array([[-0.31622777, -0.9486833],
[-0.9486833, 0.31622777]]),
b: ivy.array([[-0.4472136, -0.89442719],
[-0.89442719, 0.4472136]])
}
>>> print(r)
{
a: ivy.array([[-3.16227766, -4.42718872],
[0., -0.63245553]]),
b: ivy.array([[-4.47213595, -5.81377674],
[0., -0.4472136]])
}
"""
return self._static_qr(
self,
mode=mode,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_slogdet(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.slogdet. This method
simply wraps the function, and so the docstring for ivy.slogdet also
applies to this method with minimal changes.
Parameters
----------
x
input array or container having shape (..., M, M) and whose innermost two
dimensions form square matrices. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
This function returns a container containing NamedTuples.
Each NamedTuple of output will have -
sign:
An array containing a number representing the sign of the determinant
for each square matrix.
logabsdet:
An array containing natural log of the absolute determinant of each
square matrix.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[1.0, 2.0],
... [3.0, 4.0]]),
... b=ivy.array([[1.0, 2.0],
... [2.0, 1.0]]))
>>> y = ivy.Container.static_slogdet(x)
>>> print(y)
{
a: [
sign = ivy.array(-1.),
logabsdet = ivy.array(0.6931472)
],
b: [
sign = ivy.array(-1.),
logabsdet = ivy.array(1.0986123)
]
}
"""
return ContainerBase.cont_multi_map_in_function(
"slogdet",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def slogdet(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.slogdet. This method
simply wraps the function, and so the docstring for ivy.slogdet also
applies to this method with minimal changes.
Parameters
----------
self
input container having shape (..., M, M) and whose innermost two dimensions
form square matrices. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
This function returns container containing NamedTuples.
Each NamedTuple of output will have -
sign:
An array of a number representing the sign of the determinant of each
square.
logabsdet:
An array of the natural log of the absolute value of the determinant of
each square.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[1.0, 2.0],
... [3.0, 4.0]]),
... b=ivy.array([[1.0, 2.0],
... [2.0, 1.0]]))
>>> y = x.slogdet()
>>> print(y)
[{
a: ivy.array(-1.),
b: ivy.array(-1.)
}, {
a: ivy.array(0.69314718),
b: ivy.array(1.09861231)
}]
"""
return self._static_slogdet(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def _static_solve(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
adjoint: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"solve",
x1,
x2,
adjoint=adjoint,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def solve(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
adjoint: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return self._static_solve(
self,
x2,
adjoint=adjoint,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_svd(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
compute_uv: Union[bool, ivy.Container] = True,
full_matrices: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> Union[ivy.Container, Tuple[ivy.Container, ...]]:
"""ivy.Container static method variant of ivy.svd. This method simply
wraps the function, and so the docstring for ivy.svd also applies to
this method with minimal changes.
Parameters
----------
x
input container with array leaves having shape ``(..., M, N)`` and whose
innermost two dimensions form matrices on which to perform singular value
decomposition. Should have a floating-point data type.
full_matrices
If ``True``, compute full-sized ``U`` and ``Vh``, such that ``U`` has
shape ``(..., M, M)`` and ``Vh`` has shape ``(..., N, N)``. If ``False``,
compute on the leading ``K`` singular vectors, such that ``U``
has shape ``(..., M, K)`` and ``Vh`` has shape ``(..., K, N)`` and where
``K = min(M, N)``. Default: ``True``.
compute_uv
If ``True`` then left and right singular vectors will be computed and
returned in ``U`` and ``Vh``, respectively. Otherwise, only the singular
values will be computed, which can be significantly faster.
.. note::
with backend set as torch, svd with still compute left and right singular
vectors irrespective of the value of compute_uv, however Ivy will
still only return the
singular values.
Returns
-------
.. note::
once complex numbers are supported, each square matrix must be Hermitian.
ret
A container of a namedtuples ``(U, S, Vh)``. More details in ivy.svd.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.random_normal(shape = (9, 6))
>>> y = ivy.random_normal(shape = (2, 4))
>>> z = ivy.Container(a=x, b=y)
>>> ret = ivy.Container.static_svd(z)
>>> aU, aS, aVh = ret.a
>>> bU, bS, bVh = ret.b
>>> print(aU.shape, aS.shape, aVh.shape, bU.shape, bS.shape, bVh.shape)
(9, 9) (6,) (6, 6) (2, 2) (2,) (4, 4)
"""
return ContainerBase.cont_multi_map_in_function(
"svd",
x,
compute_uv=compute_uv,
full_matrices=full_matrices,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def svd(
self: ivy.Container,
/,
*,
compute_uv: Union[bool, ivy.Container] = True,
full_matrices: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.svd. This method simply
wraps the function, and so the docstring for ivy.svd also applies to
this method with minimal changes.
Parameters
----------
self
input container with array leaves having shape ``(..., M, N)`` and whose
innermost two dimensions form matrices on which to perform singular value
decomposition. Should have a floating-point data type.
full_matrices
If ``True``, compute full-sized ``U`` and ``Vh``, such that ``U`` has
shape ``(..., M, M)`` and ``Vh`` has shape ``(..., N, N)``. If ``False``,
compute on the leading ``K`` singular vectors, such that ``U``
has shape ``(..., M, K)`` and ``Vh`` has shape ``(..., K, N)`` and where
``K = min(M, N)``. Default: ``True``.
compute_uv
If ``True`` then left and right singular vectors will be computed and
returned in ``U`` and ``Vh``, respectively. Otherwise, only the singular
values will be computed, which can be significantly faster.
.. note::
with backend set as torch, svd with still compute left and right singular
vectors irrespective of the value of compute_uv, however Ivy will
still only return the
singular values.
Returns
-------
.. note::
once complex numbers are supported, each square matrix must be Hermitian.
ret
A container of a namedtuples ``(U, S, Vh)``. More details in ivy.svd.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.random_normal(shape = (9, 6))
>>> y = ivy.random_normal(shape = (2, 4))
>>> z = ivy.Container(a=x, b=y)
>>> ret = z.svd()
>>> print(ret[0], ret[1], ret[2])
{
a: (<class ivy.data_classes.array.array.Array> shape=[9, 9]),
b: ivy.array([[-0.3475602, -0.93765765],
[-0.93765765, 0.3475602]])
} {
a: ivy.array([3.58776021, 3.10416126, 2.80644298, 1.87024701, 1.48127627,
0.79101127]),
b: ivy.array([1.98288572, 0.68917423])
} {
a: (<class ivy.data_classes.array.array.Array> shape=[6, 6]),
b: (<class ivy.data_classes.array.array.Array> shape=[4, 4])
}
"""
return self._static_svd(
self,
compute_uv=compute_uv,
full_matrices=full_matrices,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_svdvals(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"svdvals",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def svdvals(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return self._static_svdvals(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_tensordot(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axes: Union[int, Tuple[List[int], List[int]], ivy.Container] = 2,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"tensordot",
x1,
x2,
axes=axes,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def tensordot(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axes: Union[int, Tuple[List[int], List[int]], ivy.Container] = 2,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return self._static_tensordot(
self,
x2,
axes=axes,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_tensorsolve(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axes: Optional[Union[int, Tuple[List[int], List[int]], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"tensorsolve",
x1,
x2,
axes=axes,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def tensorsolve(
self: ivy.Container,
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axes: Optional[Union[int, Tuple[List[int], List[int]], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return self._static_tensorsolve(
self,
x2,
axes=axes,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_trace(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
offset: Union[int, ivy.Container] = 0,
axis1: Union[int, ivy.Container] = 0,
axis2: Union[int, ivy.Container] = 1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.trace. This method
Returns the sum along the specified diagonals of a matrix (or a stack
of matrices).
Parameters
----------
x
input container having shape ``(..., M, N)`` and whose innermost two
dimensions form ``MxN`` matrices. Should have a floating-point data type.
offset
Offset of the diagonal from the main diagonal. Can be both positive and
negative. Defaults to 0.
axis1
axis to be used as the first axis of the 2-D sub-arrays from which the
diagonals should be taken.
Defaults to ``0.`` .
axis2
axis to be used as the second axis of the 2-D sub-arrays from which the
diagonals should be taken.
Defaults to ``1.`` .
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
a container containing the traces and whose shape is determined by removing
the last two dimensions and storing the traces in the last array dimension.
For example, if ``x`` has rank ``k`` and shape ``(I, J, K, ..., L, M, N)``,
then an output array has rank ``k-2`` and shape ``(I, J, K, ..., L)`` where
::
out[i, j, k, ..., l] = trace(a[i, j, k, ..., l, :, :])
The returned array must have the same data type as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(
... a = ivy.array([[7, 1, 2],
... [1, 3, 5],
... [0, 7, 4]]),
... b = ivy.array([[4, 3, 2],
... [1, 9, 5],
... [7, 0, 6]])
)
>>> y = x.Container.static_trace(x)
>>> print(y)
{
a: ivy.array(14),
b: ivy.array(19)
}
"""
return ContainerBase.cont_multi_map_in_function(
"trace",
x,
offset=offset,
axis1=axis1,
axis2=axis2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def trace(
self: ivy.Container,
/,
*,
offset: Union[int, ivy.Container] = 0,
axis1: Union[int, ivy.Container] = 0,
axis2: Union[int, ivy.Container] = 1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.trace. This method
Returns the sum along the specified diagonals of a matrix (or a stack
of matrices).
Parameters
----------
self
input container having shape ``(..., M, N)`` and whose innermost two
dimensions form ``MxN`` matrices. Should have a floating-point data type.
offset
Offset of the diagonal from the main diagonal. Can be both positive and
negative. Defaults to 0.
axis1
axis to be used as the first axis of the 2-D sub-arrays from which the
diagonals should be taken.
Defaults to ``0.`` .
axis2
axis to be used as the second axis of the 2-D sub-arrays from which the
diagonals should be taken.
Defaults to ``1.`` .
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
a container containing the traces and whose shape is determined by removing
the last two dimensions and storing the traces in the last array dimension.
For example, if ``x`` has rank ``k`` and shape ``(I, J, K, ..., L, M, N)``,
then an output array has rank ``k-2`` and shape ``(I, J, K, ..., L)`` where
::
out[i, j, k, ..., l] = trace(a[i, j, k, ..., l, :, :])
The returned array must have the same data type as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(
... a = ivy.array([[7, 1, 2],
... [1, 3, 5],
... [0, 7, 4]]),
... b = ivy.array([[4, 3, 2],
... [1, 9, 5],
... [7, 0, 6]]))
>>> y = x.trace()
>>> print(y)
{
a: ivy.array(14),
b: ivy.array(19)
}
"""
return self._static_trace(
self,
offset=offset,
axis1=axis1,
axis2=axis2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_vecdot(
x1: Union[ivy.Container, ivy.Array, ivy.NativeArray],
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axis: Union[int, ivy.Container] = -1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"vecdot",
x1,
x2,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def vecdot(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axis: Union[int, ivy.Container] = -1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return self._static_vecdot(
self,
x2,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_vector_norm(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
ord: Union[int, float, Literal[inf, -inf], ivy.Container] = 2,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.vector_norm. This method
simply wraps the function, and so the docstring for ivy.vector_norm
also applies to this method with minimal changes.
Parameters
----------
x
input array. Should have a floating-point data type.
axis
If an integer, ``axis`` specifies the axis (dimension)
along which to compute vector norms. If an n-tuple,
``axis`` specifies the axes (dimensions) along
which to compute batched vector norms. If ``None``, the
vector norm must be computed over all array values
(i.e., equivalent to computing the vector norm of
a flattened array). Negative indices must be
supported. Default: ``None``.
keepdims
If ``True``, the axes (dimensions) specified by ``axis``
must be included in the result as singleton dimensions,
and, accordingly, the result must be compatible
with the input array (see :ref:`broadcasting`). Otherwise,
if ``False``, the axes (dimensions) specified by ``axis`` must
not be included in the result. Default: ``False``.
ord
order of the norm. The following mathematical norms must be supported:
+------------------+----------------------------+
| ord | description |
+==================+============================+
| 1 | L1-norm (Manhattan) |
+------------------+----------------------------+
| 2 | L2-norm (Euclidean) |
+------------------+----------------------------+
| inf | infinity norm |
+------------------+----------------------------+
| (int,float >= 1) | p-norm |
+------------------+----------------------------+
The following non-mathematical "norms" must be supported:
+------------------+--------------------------------+
| ord | description |
+==================+================================+
| 0 | sum(a != 0) |
+------------------+--------------------------------+
| -1 | 1./sum(1./abs(a)) |
+------------------+--------------------------------+
| -2 | 1./sqrt(sum(1./abs(a)/*/*2)) | # noqa
+------------------+--------------------------------+
| -inf | min(abs(a)) |
+------------------+--------------------------------+
| (int,float < 1) | sum(abs(a)/*/*ord)/*/*(1./ord) |
+------------------+--------------------------------+
Default: ``2``.
dtype
data type that may be used to perform the computation more precisely. The
input array ``x`` gets cast to ``dtype`` before the function's computations.
out
optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
an array containing the vector norms. If ``axis`` is
``None``, the returned array must be a zero-dimensional
array containing a vector norm. If ``axis`` is
a scalar value (``int`` or ``float``), the returned array
must have a rank which is one less than the rank of ``x``.
If ``axis`` is a ``n``-tuple, the returned array must have
a rank which is ``n`` less than the rank of ``x``. The returned
array must have a floating-point data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a = [1., 2., 3.], b = [-2., 0., 3.2])
>>> y = ivy.Container.static_vector_norm(x)
>>> print(y)
{
a: ivy.array([3.7416575]),
b: ivy.array([3.77359247])
}
"""
return ContainerBase.cont_multi_map_in_function(
"vector_norm",
x,
axis=axis,
keepdims=keepdims,
ord=ord,
dtype=dtype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def vector_norm(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
ord: Union[int, float, Literal[inf, -inf], ivy.Container] = 2,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
r"""ivy.Container instance method variant of ivy.vector_norm. This
method simply wraps the function, and so the docstring for
ivy.vector_norm also applies to this method with minimal changes.
Parameters
----------
self
input array. Should have a floating-point data type.
axis
If an integer, ``axis`` specifies the axis (dimension)
along which to compute vector norms. If an n-tuple, ``axis``
specifies the axes (dimensions) along which to compute
batched vector norms. If ``None``, the vector norm must be
computed over all array values (i.e., equivalent to computing
the vector norm of a flattened array). Negative indices must
be supported. Default: ``None``.
keepdims
If ``True``, the axes (dimensions) specified by ``axis`` must
be included in the result as singleton dimensions, and, accordingly,
the result must be compatible with the input array
(see :ref:`broadcasting`).Otherwise, if ``False``, the axes
(dimensions) specified by ``axis`` must not be included in
the result. Default: ``False``.
ord
order of the norm. The following mathematical norms must be supported:
+------------------+----------------------------+
| ord | description |
+==================+============================+
| 1 | L1-norm (Manhattan) |
+------------------+----------------------------+
| 2 | L2-norm (Euclidean) |
+------------------+----------------------------+
| inf | infinity norm |
+------------------+----------------------------+
| (int,float >= 1) | p-norm |
+------------------+----------------------------+
The following non-mathematical "norms" must be supported:
+------------------+--------------------------------+
| ord | description |
+==================+================================+
| 0 | sum(a != 0) |
+------------------+--------------------------------+
| -1 | 1./sum(1./abs(a)) |
+------------------+--------------------------------+
| -2 | 1./sqrt(sum(1./abs(a)/*/*2)) | # noqa
+------------------+--------------------------------+
| -inf | min(abs(a)) |
+------------------+--------------------------------+
| (int,float < 1) | sum(abs(a)/*/*ord)/*/*(1./ord) |
+------------------+--------------------------------+
Default: ``2``.
dtype
data type that may be used to perform the computation more precisely. The
input array ``x`` gets cast to ``dtype`` before the function's computations.
out
optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
an array containing the vector norms. If ``axis`` is ``None``,
the returned array must be a zero-dimensional array containing
a vector norm. If ``axis`` is a scalar value (``int`` or ``float``),
the returned array must have a rank which is one less than the
rank of ``x``. If ``axis`` is a ``n``-tuple, the returned
array must have a rank which is ``n`` less than the rank of
``x``. The returned array must have a floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a = [1., 2., 3.], b = [-2., 0., 3.2])
>>> y = x.vector_norm()
>>> print(y)
{
a: ivy.array([3.7416575]),
b: ivy.array([3.77359247])
}
"""
return self._static_vector_norm(
self,
axis=axis,
keepdims=keepdims,
ord=ord,
dtype=dtype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_vector_to_skew_symmetric_matrix(
vector: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"vector_to_skew_symmetric_matrix",
vector,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def vector_to_skew_symmetric_matrix(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return self._static_vector_to_skew_symmetric_matrix(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_vander(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
N: Optional[Union[int, ivy.Container]] = None,
increasing: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.vander. This method
simply wraps the function, and so the docstring for ivy.vander also
applies to this method with minimal changes.
Parameters
----------
x
ivy container that contains 1-D arrays.
N
Number of columns in the output. If N is not specified,
a square array is returned (N = len(x))
increasing
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
out
optional output container, for writing the result to.
Returns
-------
ret
container that contains the Vandermonde matrix of the arrays included
in the input container.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(
a = ivy.array([1, 2, 3, 5])
b = ivy.array([6, 7, 8, 9])
)
>>> ivy.Container.static_vander(x)
{
a: ivy.array(
[[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]]
),
b: ivy.array(
[[216, 36, 6, 1],
[343, 49, 7, 1],
[512, 64, 8, 1],
[729, 81, 9, 1]]
)
}
"""
return ContainerBase.cont_multi_map_in_function(
"vander",
x,
N=N,
increasing=increasing,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def vander(
self: ivy.Container,
/,
*,
N: Optional[Union[int, ivy.Container]] = None,
increasing: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.vander. This method
Returns the Vandermonde matrix of the input array.
Parameters
----------
self
1-D input array.
N
Number of columns in the output. If N is not specified,
a square array is returned (N = len(x))
increasing
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
out
optional output container, for writing the result to.
Returns
-------
ret
an container containing the Vandermonde matrices of the arrays
included in the input container.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(
a = ivy.array([1, 2, 3, 5])
b = ivy.array([6, 7, 8, 9])
)
>>> x.vander()
{
a: ivy.array(
[[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]]
),
b: ivy.array(
[[216, 36, 6, 1],
[343, 49, 7, 1],
[512, 64, 8, 1],
[729, 81, 9, 1]]
)
}
"""
return self._static_vander(
self,
N=N,
increasing=increasing,
out=out,
)
@staticmethod
def static_general_inner_product(
x1: Union[ivy.Container, ivy.Array, ivy.NativeArray],
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
n_modes: Optional[Union[int, ivy.Container]] = None,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.general_inner_product.
This method simply wraps the function, and so the docstring for
ivy.general_inner_product also applies to this method with minimal
changes.
Parameters
----------
x1
First input container containing input array.
x2
First input container containing input array.
n_modes
int, default is None. If None, the traditional inner product is returned
(i.e. a float) otherwise, the product between the `n_modes` last modes of
`x1` and the `n_modes` first modes of `x2` is returned. The resulting
tensor's order is `len(x1) - n_modes`.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Alternate output container in which to place the result.
The default is None.
Returns
-------
ret
Container including the inner product tensor.
Examples
--------
>>> x = ivy.Container(
a=ivy.reshape(ivy.arange(4), (2, 2)),
b=ivy.reshape(ivy.arange(8), (2, 4)),
)
>>> ivy.Container.general_inner_product(x, 1)
{
a: ivy.array(6),
b: ivy.array(28)
}
"""
return ContainerBase.cont_multi_map_in_function(
"general_inner_product",
x1,
x2,
n_modes,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def general_inner_product(
self: Union[ivy.Container, ivy.Array, ivy.NativeArray],
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
n_modes: Optional[Union[int, ivy.Container]] = None,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.general_inner_product.
This method simply wraps the function, and so the docstring for
ivy.general_inner_product also applies to this method with
minimal changes.
"""
return self.static_general_inner_product(
self,
x2,
n_modes,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
| ivy/ivy/data_classes/container/linear_algebra.py/0 | {
"file_path": "ivy/ivy/data_classes/container/linear_algebra.py",
"repo_id": "ivy",
"token_count": 63795
} | 11 |
from .base import FactorizedTensor
import ivy
import warnings
class TTTensor(FactorizedTensor):
def __init__(self, factors, inplace=False):
super().__init__()
shape, rank = ivy.TTTensor.validate_tt_tensor(factors)
self.shape = tuple(shape)
self.rank = tuple(rank)
self.factors = factors
# Built-ins #
def __getitem__(self, index):
return self.factors[index]
def __setitem__(self, index, value):
self.factors[index] = value
def __iter__(self):
for index in range(len(self)):
yield self[index]
def __len__(self):
return len(self.factors)
def __repr__(self):
message = (
f"factors list : rank-{self.rank} matrix-product-state tensor of shape"
f" {self.shape} "
)
return message
# Public Methods #
def to_tensor(self):
return ivy.TTTensor.tt_to_tensor(self)
def to_unfolding(self, mode):
return ivy.TTTensor.tt_to_unfolded(self, mode)
def to_vec(self):
return ivy.TTTensor.tt_to_vec(self)
# Properties #
@property
def n_param(self):
factor_params = []
for i, s in enumerate(self.shape):
factor_params.append(self.rank[i] * s * self.rank[i + 1])
return ivy.sum(factor_params)
# Class Methods #
@staticmethod
def validate_tt_tensor(tt_tensor):
factors = tt_tensor
n_factors = len(factors)
if isinstance(tt_tensor, TTTensor):
return tt_tensor.shape, tt_tensor.rank
elif isinstance(tt_tensor, (float, int)):
return 0, 0
rank = []
shape = []
for index, factor in enumerate(factors):
current_rank, current_shape, next_rank = ivy.shape(factor)
if len(ivy.shape(factor)) != 3:
raise ValueError(
"TT expresses a tensor as third order factors"
f" (tt-cores).\nHowever, len(ivy.shape(factors[{index}])) ="
f" {len(ivy.shape(factor))}"
)
if index and ivy.shape(factors[index - 1])[2] != current_rank:
raise ValueError(
"Consecutive factors should have matching ranks\n -- e.g."
" ivy.shape(factors[0])[2]) == ivy.shape(factors[1])[0])\nHowever,"
f" ivy.shape(factor[{index-1}])[2] =="
f" {ivy.shape(factors[index - 1])[2]} but"
f" ivy.shape(factor[{index}])[0] == {current_rank} "
)
if (index == 0) and current_rank != 1:
raise ValueError(
"Boundary conditions dictate factor[0].shape[0] == 1."
f"However, got factor[0].shape[0] = {current_rank}."
)
if (index == n_factors - 1) and next_rank != 1:
raise ValueError(
"Boundary conditions dictate factor[-1].shape[2] == 1."
f"However, got factor[{n_factors}].shape[2] = {next_rank}."
)
shape.append(current_shape)
rank.append(current_rank)
rank.append(next_rank)
return tuple(shape), tuple(rank)
@staticmethod
def tt_to_tensor(factors):
"""Return the full tensor whose TT decomposition is given by 'factors'.
Re-assembles 'factors', which represent a tensor in TT/Matrix-Product-State format
into the corresponding full tensor
Parameters
----------
factors
TT factors (TT-cores)
Returns
-------
output_tensor
tensor whose TT/MPS decomposition was given by 'factors'
""" # noqa: E501
if isinstance(factors, (float, int)):
return factors
full_shape = [f.shape[1] for f in factors]
full_tensor = ivy.reshape(factors[0], (full_shape[0], -1))
for factor in factors[1:]:
rank_prev, _, rank_next = factor.shape
factor = ivy.reshape(factor, (rank_prev, -1))
full_tensor = ivy.matmul(full_tensor, factor)
full_tensor = ivy.reshape(full_tensor, (-1, rank_next))
return ivy.reshape(full_tensor, full_shape)
@staticmethod
def tt_to_unfolded(factors, mode):
"""Return the unfolding matrix of a tensor given in TT (or Tensor-
Train) format.
Reassembles a full tensor from 'factors' and returns its unfolding matrix
with mode given by 'mode'
Parameters
----------
factors
TT factors
mode
unfolding matrix to be computed along this mode
Returns
-------
2-D array
unfolding matrix at mode given by 'mode'
"""
return ivy.unfold(ivy.TTTensor.tt_to_tensor(factors), mode)
@staticmethod
def tt_to_vec(factors):
"""Return the tensor defined by its TT format ('factors') into its
vectorized format.
Parameters
----------
factors
TT factors
Returns
-------
1-D array
vectorized format of tensor defined by 'factors'
"""
return ivy.reshape(ivy.TTTensor.tt_to_tensor(factors), (-1,))
@staticmethod
def _tt_n_param(tensor_shape, rank):
"""Return the number of parameters of a MPS decomposition for a given
`rank` and full `tensor_shape`.
Parameters
----------
tensor_shape
shape of the full tensor to decompose (or approximate)
rank
rank of the MPS decomposition
Return
-------
n_params
Number of parameters of a MPS decomposition of rank `rank` of
a full tensor of shape `tensor_shape`
"""
factor_params = []
for i, s in enumerate(tensor_shape):
factor_params.append(rank[i] * s * rank[i + 1])
return ivy.sum(factor_params)
@staticmethod
def validate_tt_rank(
tensor_shape,
rank="same",
constant_rank=False,
rounding="round",
allow_overparametrization=True,
):
"""Return the rank of a TT Decomposition.
Parameters
----------
tensor_shape
shape of the tensor to decompose
rank
way to determine the rank, by default 'same'
if 'same': rank is computed to keep the number of parameters (at most) the same
if float, computes a rank so as to keep rank percent of the original number of parameters
if int or tuple, just returns rank
constant_rank
if True, the *same* rank will be chosen for each modes
if False (default), the rank of each mode will be
proportional to the corresponding tensor_shape
used only if rank == 'same' or 0 < rank <= 1*
rounding
Mode for rounding
One of ["round", "floor", "ceil"]
allow_overparametrization
if False, the rank must be realizable through iterative application of SVD
Returns
-------
rank
rank of the decomposition
""" # noqa: E501
if rounding == "ceil":
rounding_fn = ivy.ceil
elif rounding == "floor":
rounding_fn = ivy.floor
elif rounding == "round":
rounding_fn = ivy.round
else:
raise ValueError(
f"Rounding should be round, floor or ceil, but got {rounding}"
)
if rank == "same":
rank = float(1)
if isinstance(rank, float) and constant_rank:
n_param_tensor = ivy.prod(tensor_shape) * rank
order = len(tensor_shape)
if order == 2:
rank = (1, n_param_tensor / (tensor_shape[0] + tensor_shape[1]), 1)
warnings.warn(
"Determining the tt-rank for the trivial case of a matrix (order 2"
f" tensor) of shape {tensor_shape}, not a higher-order tensor."
)
a = ivy.sum(tensor_shape[1:-1])
b = ivy.sum(tensor_shape[0] + tensor_shape[-1])
c = -n_param_tensor
delta = ivy.sqrt(b**2 - 4 * a * c)
solution = int(rounding_fn((-b + delta) / (2 * a)))
rank = rank = (1,) + (solution,) * (order - 1) + (1,)
elif isinstance(rank, float):
order = len(tensor_shape)
avg_dim = [
(tensor_shape[i] + tensor_shape[i + 1]) / 2 for i in range(order - 1)
]
if len(avg_dim) > 1:
a = sum(
avg_dim[i - 1] * tensor_shape[i] * avg_dim[i]
for i in range(1, order - 1)
)
else:
warnings.warn(
"Determining the tt-rank for the trivial case of a matrix (order 2"
f" tensor) of shape {tensor_shape}, not a higher-order tensor."
)
a = avg_dim[0] ** 2 * tensor_shape[0]
b = tensor_shape[0] * avg_dim[0] + tensor_shape[-1] * avg_dim[-1]
c = -ivy.prod(tensor_shape) * rank
delta = ivy.sqrt(b**2 - 4 * a * c)
fraction_param = (-b + delta) / (2 * a)
rank = tuple(max(int(rounding_fn(d * fraction_param)), 1) for d in avg_dim)
rank = (1,) + rank + (1,)
else:
n_dim = len(tensor_shape)
if isinstance(rank, int):
rank = [1] + [rank] * (n_dim - 1) + [1]
elif n_dim + 1 != len(rank):
message = (
"Provided incorrect number of ranks. Should verify len(rank) =="
f" len(ivy.shape(tensor)) + 1, but len(rank) = {len(rank)} while"
f" len(ivy.shape(tensor)) + 1 = {n_dim+1}"
)
raise (ValueError(message))
if rank[0] != 1:
message = (
f"Provided rank[0] == {rank[0]} but boundary conditions dictate"
" rank[0] == rank[-1] == 1."
)
raise ValueError(message)
if rank[-1] != 1:
message = (
f"Provided rank[-1] == {rank[-1]} but boundary conditions dictate"
" rank[0] == rank[-1] == 1."
)
raise ValueError(message)
if allow_overparametrization:
return list(rank)
else:
validated_rank = [1]
for i, s in enumerate(tensor_shape[:-1]):
n_row = int(rank[i] * s)
n_column = ivy.prod(tensor_shape[(i + 1) :])
validated_rank.append(min(n_row, n_column, rank[i + 1]))
validated_rank.append(1)
return validated_rank
@staticmethod
def pad_tt_rank(factor_list, n_padding=1, pad_boundaries=False):
"""Pad the factors of a Tensor-Train so as to increase its rank without
changing its reconstruction.
The tensor-train (ring) will be padded with 0s to increase its rank only but
not the underlying tensor it represents.
Parameters
----------
factor_list
tensor list
n_padding
how much to increase the rank (bond dimension) by
pad_boundaries
if True, also pad the boundaries (useful for a tensor-ring)
should be False for a tensor-train to keep the boundary rank to be 1
Returns
-------
padded_factor_list
"""
new_factors = []
n_factors = len(factor_list)
for i, factor in enumerate(factor_list):
n_padding_left = n_padding_right = n_padding
if (i == 0) and not pad_boundaries:
n_padding_left = 0
elif (i == n_factors - 1) and not pad_boundaries:
n_padding_right = 0
r1, *s, r2 = ivy.shape(factor)
new_factor = ivy.zeros((r1 + n_padding_left, *s, r2 + n_padding_right))
new_factors.append(
ivy.TTTensor.index_update(
new_factor,
(slice(None, r1, None), ..., slice(None, r2, None)),
factor,
)
)
return new_factors
@staticmethod
def index_update(tensor, indices, values):
tensor[indices] = values
return tensor
| ivy/ivy/data_classes/factorized_tensor/tt_tensor.py/0 | {
"file_path": "ivy/ivy/data_classes/factorized_tensor/tt_tensor.py",
"repo_id": "ivy",
"token_count": 6319
} | 12 |
//! A device (CPUs, GPUs, TPUs) where computations can be run.
use super::{ArrayElement, Literal, PjRtBuffer, PjRtDevice, PjRtLoadedExecutable, XlaComputation};
use crate::{c_lib, Error, Result};
use std::marker::PhantomData;
use std::rc::Rc;
use pyo3::prelude::*;
pub(super) struct PjRtClientInternal(pub(self) c_lib::pjrt_client);
/// A client represents a device that can be used to run some computations. A computation graph is
/// compiled in a way that is specific to a device before it can be run.
#[derive(Clone)]
#[pyclass(unsendable)]
pub struct PjRtClient(Rc<PjRtClientInternal>);
impl PjRtClient {
/// A CPU client, this can run computations on multiple CPUs at the same time.
pub fn cpu() -> Result<Self> {
let mut ptr: c_lib::pjrt_client = std::ptr::null_mut();
let status = unsafe { c_lib::pjrt_cpu_client_create(&mut ptr) };
super::handle_status(status)?;
Ok(Self(Rc::new(PjRtClientInternal(ptr))))
}
/// A GPU client, the memory requirements are limited by the specified `memory_fraction` and
/// this memory can either be allocated dynamically or pre-allocated depending on
/// `preallocate`.
pub fn gpu(memory_fraction: f64, preallocate: bool) -> Result<Self> {
let mut ptr: c_lib::pjrt_client = std::ptr::null_mut();
let status =
unsafe { c_lib::pjrt_gpu_client_create(&mut ptr, memory_fraction, preallocate) };
super::handle_status(status)?;
Ok(Self(Rc::new(PjRtClientInternal(ptr))))
}
/// A TPU client.
pub fn tpu(max_inflight_computations: usize) -> Result<Self> {
let mut ptr: c_lib::pjrt_client = std::ptr::null_mut();
let status =
unsafe { c_lib::pjrt_tpu_client_create(&mut ptr, max_inflight_computations as i32) };
super::handle_status(status)?;
Ok(Self(Rc::new(PjRtClientInternal(ptr))))
}
fn ptr(&self) -> c_lib::pjrt_client {
self.0 .0
}
/// Compile a computation for this device, and return the executable.
pub fn compile(&self, c: &XlaComputation) -> Result<PjRtLoadedExecutable> {
let mut exe: c_lib::pjrt_loaded_executable = std::ptr::null_mut();
let status = unsafe { c_lib::compile(self.ptr(), c.0, &mut exe) };
super::handle_status(status)?;
Ok(PjRtLoadedExecutable { exe, client: self.clone() })
}
/// The number of devices that this client has detected, e.g. the number of GPUs.
pub fn device_count(&self) -> usize {
unsafe { c_lib::pjrt_client_device_count(self.ptr()) as usize }
}
/// The number of devices that this client can use.
pub fn addressable_device_count(&self) -> usize {
unsafe { c_lib::pjrt_client_addressable_device_count(self.ptr()) as usize }
}
/// The name of the platform.
pub fn platform_name(&self) -> String {
unsafe {
let ptr = c_lib::pjrt_client_platform_name(self.ptr());
super::c_ptr_to_string(ptr)
}
}
/// The version of the platform.
pub fn platform_version(&self) -> String {
unsafe {
let ptr = c_lib::pjrt_client_platform_version(self.ptr());
super::c_ptr_to_string(ptr)
}
}
/// A list of devices attached to this client.
pub fn devices(&self) -> Vec<PjRtDevice> {
let device_count = self.device_count();
let mut device_ptrs = vec![std::ptr::null_mut(); device_count];
unsafe { c_lib::pjrt_client_devices(self.ptr(), device_ptrs.as_mut_ptr()) };
device_ptrs.into_iter().map(|device| PjRtDevice { device, marker: PhantomData }).collect()
}
/// A list of devices that can be used by this client.
pub fn addressable_devices(&self) -> Vec<PjRtDevice> {
let device_count = self.addressable_device_count();
let mut device_ptrs = vec![std::ptr::null_mut(); device_count];
unsafe { c_lib::pjrt_client_addressable_devices(self.ptr(), device_ptrs.as_mut_ptr()) };
device_ptrs.into_iter().map(|device| PjRtDevice { device, marker: PhantomData }).collect()
}
/// Transfer some data from the host to a `PjRtBuffer` stored on the target device. If the
/// device is not specified, the default device is used.
/// The source data is passed as a slice of the specified primitive type, as well as the
/// dimensions. The dimensions have to match the number of elements in the source data,
/// otherwise an error is returned.
pub fn buffer_from_host_buffer<T: ArrayElement>(
&self,
data: &[T],
dims: &[usize],
device: Option<&PjRtDevice>,
) -> Result<PjRtBuffer> {
let mut buffer: c_lib::pjrt_buffer = std::ptr::null_mut();
let element_count: usize = dims.iter().product();
if element_count != data.len() {
Err(Error::WrongElementCount { dims: dims.to_vec(), element_count })?
}
let device = device.map_or(std::ptr::null_mut(), |d| d.device);
let dims: Vec<_> = dims.iter().map(|d| *d as i64).collect();
let status = unsafe {
c_lib::pjrt_buffer_from_host_buffer(
self.ptr(),
device,
data.as_ptr() as *const libc::c_void,
T::TY.primitive_type() as i32,
dims.len() as i32,
dims.as_ptr(),
&mut buffer,
)
};
super::handle_status(status)?;
Ok(PjRtBuffer { buffer, client: self.clone() })
}
/// Transfer some data from the host to a `PjRtBuffer` stored on the target device. If the
/// device is not specified, the default device is used.
/// The source data is passed as a slice of raw bytes, as well as the dimensions. The
/// dimensions have to match the number of bytes in the source data, otherwise an error
/// is returned.
pub fn buffer_from_host_raw_bytes(
&self,
ty: super::ElementType,
data: &[u8],
dims: &[usize],
device: Option<&PjRtDevice>,
) -> Result<PjRtBuffer> {
let mut buffer: c_lib::pjrt_buffer = std::ptr::null_mut();
let element_count: usize = dims.iter().product();
let element_size_in_bytes = ty.element_size_in_bytes();
if element_count * element_size_in_bytes != data.len() {
Err(Error::WrongElementCount { dims: dims.to_vec(), element_count })?
}
let device = device.map_or(std::ptr::null_mut(), |d| d.device);
let dims: Vec<_> = dims.iter().map(|d| *d as i64).collect();
let status = unsafe {
c_lib::pjrt_buffer_from_host_buffer(
self.ptr(),
device,
data.as_ptr() as *const libc::c_void,
ty as i32,
dims.len() as i32,
dims.as_ptr(),
&mut buffer,
)
};
super::handle_status(status)?;
Ok(PjRtBuffer { buffer, client: self.clone() })
}
/// Transfer some data from the host to a `PjRtBuffer` stored on the target device. If the
/// device is not specified, the default device is used.
/// The source data is passed as a literal.
pub fn buffer_from_host_literal(
&self,
device: Option<&PjRtDevice>,
literal: &Literal,
) -> Result<PjRtBuffer> {
let mut buffer: c_lib::pjrt_buffer = std::ptr::null_mut();
let device = device.map_or(std::ptr::null_mut(), |d| d.device);
let status = unsafe {
c_lib::pjrt_buffer_from_host_literal(self.ptr(), device, literal.0, &mut buffer)
};
super::handle_status(status)?;
Ok(PjRtBuffer { buffer, client: self.clone() })
}
}
impl Drop for PjRtClientInternal {
fn drop(&mut self) {
unsafe { c_lib::pjrt_client_free(self.0) }
}
}
| ivy/ivy/engines/XLA/rust_api/src/wrappers/pjrt_client.rs/0 | {
"file_path": "ivy/ivy/engines/XLA/rust_api/src/wrappers/pjrt_client.rs",
"repo_id": "ivy",
"token_count": 3369
} | 13 |
# global
from numbers import Number
import numpy as np
from typing import Union, Optional, List, Sequence, Tuple
import jax.dlpack
import jax.numpy as jnp
import jax._src as _src
import jaxlib.xla_extension
# local
import ivy
from ivy import as_native_dtype
from ivy.functional.backends.jax import JaxArray
from ivy.functional.backends.jax.device import dev
from ivy.functional.ivy.creation import (
_asarray_to_native_arrays_and_back,
_asarray_infer_device,
_asarray_infer_dtype,
_asarray_handle_nestable,
NestedSequence,
SupportsBufferProtocol,
_asarray_inputs_to_native_shapes,
)
# Array API Standard #
# ------------------ #
def arange(
start: float,
/,
stop: Optional[float] = None,
step: float = 1,
*,
dtype: Optional[jnp.dtype] = None,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if dtype:
dtype = as_native_dtype(dtype)
ivy.utils.assertions._check_jax_x64_flag(dtype.name)
res = jnp.arange(start, stop, step, dtype=dtype)
if not dtype:
if res.dtype == jnp.float64:
return res.astype(jnp.float32)
elif res.dtype == jnp.int64:
return res.astype(jnp.int32)
return res
@_asarray_to_native_arrays_and_back
@_asarray_infer_device
@_asarray_handle_nestable
@_asarray_inputs_to_native_shapes
@_asarray_infer_dtype
def asarray(
obj: Union[
JaxArray,
bool,
int,
float,
tuple,
NestedSequence,
SupportsBufferProtocol,
np.ndarray,
],
/,
*,
copy: Optional[bool] = None,
dtype: Optional[jnp.dtype] = None,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
ivy.utils.assertions._check_jax_x64_flag(dtype)
ret = jnp.asarray(obj, dtype=dtype)
# jnp.copy is used to ensure correct device placement
# it's slower than jax.device_put before JIT, but it's necessary to use since
# jax device objects aren't serializable and prevent saving transpiled graphs
# this workaround only works because we are inside jax.default_device context
# invoked in @handle_device decorator
return jnp.copy(ret) if (dev(ret, as_native=True) != device or copy) else ret
def empty(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.empty(shape, dtype)
def empty_like(
x: JaxArray,
/,
*,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.empty_like(x, dtype=dtype)
def eye(
n_rows: int,
n_cols: Optional[int] = None,
/,
*,
k: int = 0,
batch_shape: Optional[Union[int, Sequence[int]]] = None,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if n_cols is None:
n_cols = n_rows
i = jnp.eye(n_rows, n_cols, k, dtype)
if batch_shape is None:
return i
reshape_dims = [1] * len(batch_shape) + [n_rows, n_cols]
tile_dims = list(batch_shape) + [1, 1]
return_mat = jnp.tile(jnp.reshape(i, reshape_dims), tile_dims)
return return_mat
def to_dlpack(x, /, *, out: Optional[JaxArray] = None):
return jax.dlpack.to_dlpack(x)
def from_dlpack(x, /, *, out: Optional[JaxArray] = None) -> JaxArray:
return jax.dlpack.from_dlpack(x)
def full(
shape: Union[ivy.NativeShape, Sequence[int]],
fill_value: Union[int, float, bool],
*,
dtype: Optional[Union[ivy.Dtype, jnp.dtype]] = None,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
dtype = ivy.default_dtype(dtype=dtype, item=fill_value, as_native=True)
return jnp.full(shape, fill_value, dtype)
def full_like(
x: JaxArray,
/,
fill_value: Number,
*,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.full_like(x, fill_value, dtype=dtype)
# https://github.com/google/jax/blob/8b2e4f975c8c830502f5cc749b7253b02e78c9e8/jax/_src/numpy/lax_numpy.py#L2164
# with some modification
def linspace(
start: Union[JaxArray, float],
stop: float,
/,
num: int,
*,
axis: Optional[int] = None,
endpoint: bool = True,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if axis is None:
axis = -1
if num < 0:
raise ivy.utils.exceptions.IvyException(
f"Number of samples, {num}, must be non-negative."
)
if dtype is None:
dtype = ivy.promote_types(start.dtype, stop.dtype)
dtype = jnp.dtype(dtype)
computation_dtype = dtype
start = jnp.asarray(start, dtype=computation_dtype)
stop = jnp.asarray(stop, dtype=computation_dtype)
bounds_shape = list(jax.lax.broadcast_shapes(jnp.shape(start), jnp.shape(stop)))
broadcast_start = jnp.broadcast_to(start, bounds_shape)
broadcast_stop = jnp.broadcast_to(stop, bounds_shape)
axis = len(bounds_shape) + axis + 1 if axis < 0 else axis
bounds_shape.insert(axis, 1)
div = (num - 1) if endpoint else num
if num > 1:
iota_shape = [
1,
] * len(bounds_shape)
iota_shape[axis] = div
# This approach recovers the endpoints with float32 arithmetic,
# but can lead to rounding errors for integer outputs.
real_dtype = jnp.finfo(computation_dtype).dtype
step = jnp.reshape(jax.lax.iota(real_dtype, div), iota_shape) / div
step = step.astype(computation_dtype)
start_reshaped = jnp.reshape(broadcast_start, bounds_shape)
end_reshaped = jnp.reshape(broadcast_stop, bounds_shape)
out = start_reshaped + step * (end_reshaped - start_reshaped)
if endpoint:
out = jax.lax.concatenate(
[out, jax.lax.expand_dims(broadcast_stop, (axis,))],
_src.util.canonicalize_axis(axis, out.ndim),
)
elif num == 1:
out = jnp.reshape(broadcast_start, bounds_shape)
else: # num == 0 degenerate case, match numpy behavior
empty_shape = list(jax.lax.broadcast_shapes(jnp.shape(start), jnp.shape(stop)))
empty_shape.insert(axis, 0)
out = jnp.reshape(jnp.array([], dtype=dtype), empty_shape)
if jnp.issubdtype(dtype, jnp.integer) and not jnp.issubdtype(
out.dtype, jnp.integer
):
out = jax.lax.floor(out)
ans = jax.lax.convert_element_type(out, dtype)
return ans
def meshgrid(
*arrays: JaxArray,
sparse: bool = False,
indexing: str = "xy",
out: Optional[JaxArray] = None,
) -> List[JaxArray]:
return jnp.meshgrid(*arrays, sparse=sparse, indexing=indexing)
def ones(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.ones(shape, dtype)
def ones_like(
x: JaxArray,
/,
*,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.ones_like(x, dtype=dtype)
def tril(x: JaxArray, /, *, k: int = 0, out: Optional[JaxArray] = None) -> JaxArray:
return jnp.tril(x, k)
def triu(x: JaxArray, /, *, k: int = 0, out: Optional[JaxArray] = None) -> JaxArray:
return jnp.triu(x, k)
def zeros(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.zeros(shape, dtype)
def zeros_like(
x: JaxArray,
/,
*,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.zeros_like(x, dtype=dtype)
# Extra #
# ------#
array = asarray
def copy_array(
x: JaxArray, *, to_ivy_array: bool = True, out: Optional[JaxArray] = None
) -> JaxArray:
x = (
jax.core.ShapedArray(x.shape, x.dtype)
if isinstance(x, jax.core.ShapedArray)
else jnp.array(x)
)
if to_ivy_array:
return ivy.to_ivy(x)
return x
def one_hot(
indices: JaxArray,
depth: int,
/,
*,
on_value: Optional[Number] = None,
off_value: Optional[Number] = None,
axis: Optional[int] = None,
dtype: Optional[jnp.dtype] = None,
device: jaxlib.xla_extension.Device = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
on_none = on_value is None
off_none = off_value is None
if dtype is None:
if on_none and off_none:
dtype = jnp.float32
else:
if not on_none:
dtype = jnp.array(on_value).dtype
elif not off_none:
dtype = jnp.array(off_value).dtype
res = jnp.eye(depth, dtype=dtype)[jnp.array(indices, dtype="int64").reshape(-1)]
res = res.reshape(list(indices.shape) + [depth])
if not on_none and not off_none:
res = jnp.where(res == 1, on_value, off_value)
if axis is not None:
res = jnp.moveaxis(res, -1, axis)
return res
def frombuffer(
buffer: bytes,
dtype: Optional[jnp.dtype] = float,
count: Optional[int] = -1,
offset: Optional[int] = 0,
) -> JaxArray:
return jnp.frombuffer(buffer, dtype=dtype, count=count, offset=offset)
def triu_indices(
n_rows: int,
n_cols: Optional[int] = None,
k: int = 0,
/,
*,
device: jaxlib.xla_extension.Device = None,
) -> Tuple[JaxArray]:
return jnp.triu_indices(n=n_rows, k=k, m=n_cols)
| ivy/ivy/functional/backends/jax/creation.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/creation.py",
"repo_id": "ivy",
"token_count": 4480
} | 14 |
# local
from typing import (
Iterable,
Optional,
Union,
Sequence,
Tuple,
NamedTuple,
Literal,
Callable,
Any,
List,
)
import jax.numpy as jnp
import jax.lax as jlax
from numbers import Number
from collections import namedtuple
from ivy.func_wrapper import handle_out_argument
# local
import ivy
from ivy.functional.backends.jax import JaxArray
def moveaxis(
a: JaxArray,
source: Union[int, Sequence[int]],
destination: Union[int, Sequence[int]],
/,
*,
copy: Optional[bool] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.moveaxis(a, source, destination)
def heaviside(
x1: JaxArray,
x2: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.heaviside(x1, x2)
def flipud(
m: JaxArray,
/,
*,
copy: Optional[bool] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.flipud(m)
def vstack(
arrays: Sequence[JaxArray],
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.vstack(arrays)
def hstack(
arrays: Sequence[JaxArray],
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.hstack(arrays)
def rot90(
m: JaxArray,
/,
*,
copy: Optional[bool] = None,
k: int = 1,
axes: Tuple[int, int] = (0, 1),
out: Optional[JaxArray] = None,
) -> JaxArray:
if isinstance(axes, list):
axes = tuple(axes)
return jnp.rot90(m, k, axes)
def top_k(
x: JaxArray,
k: int,
/,
*,
axis: int = -1,
largest: bool = True,
sorted: bool = True,
out: Optional[Tuple[JaxArray, JaxArray]] = None,
) -> Tuple[JaxArray, JaxArray]:
k = min(k, x.shape[axis])
if not largest:
indices = jnp.argsort(x, axis=axis)
indices = jnp.take(indices, jnp.arange(k), axis=axis)
else:
indices = jnp.argsort(-x, axis=axis)
indices = jnp.take(indices, jnp.arange(k), axis=axis)
if not sorted:
indices = jnp.sort(indices, axis=axis)
topk_res = NamedTuple("top_k", [("values", JaxArray), ("indices", JaxArray)])
val = jnp.take_along_axis(x, indices, axis=axis)
return topk_res(val, indices)
def fliplr(
m: JaxArray,
/,
*,
copy: Optional[bool] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.fliplr(m)
def i0(
x: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.i0(x)
def _flat_array_to_1_dim_array(x):
return x.reshape((1,)) if x.shape == () else x
def _to_nested_tuple(nested_list):
ret = ()
if hasattr(nested_list, "__iter__"):
for inner_list in nested_list:
if hasattr(inner_list, "__iter__"):
ret += (tuple(inner_list),)
else:
ret += (inner_list,)
return ret
if ret == ():
return nested_list
def pad(
input: JaxArray,
pad_width: Union[Iterable[Tuple[int]], int],
/,
*,
mode: Union[
Literal[
"constant",
"dilated",
"edge",
"linear_ramp",
"maximum",
"mean",
"median",
"minimum",
"reflect",
"symmetric",
"wrap",
"empty",
],
Callable,
] = "constant",
stat_length: Union[Iterable[Tuple[int]], int] = 1,
constant_values: Union[Iterable[Tuple[Number]], Number] = 0,
end_values: Union[Iterable[Tuple[Number]], Number] = 0,
reflect_type: Literal["even", "odd"] = "even",
**kwargs: Optional[Any],
) -> JaxArray:
pad_width = _to_nested_tuple(pad_width)
stat_length = _to_nested_tuple(stat_length)
constant_values = _to_nested_tuple(constant_values)
end_values = _to_nested_tuple(end_values)
input_dtype = input.dtype
if mode == "dilated":
if not ivy.is_array(constant_values) or constant_values.dtype != input_dtype:
constant_values = jnp.array(constant_values, dtype=input_dtype)
return jlax.pad(input, constant_values, pad_width)
if callable(mode):
ret = jnp.pad(
_flat_array_to_1_dim_array(input),
pad_width,
mode=mode,
**kwargs,
)
elif mode in ["maximum", "mean", "median", "minimum"]:
ret = jnp.pad(
_flat_array_to_1_dim_array(input),
pad_width,
mode=mode,
stat_length=stat_length,
)
elif mode == "constant":
ret = jnp.pad(
_flat_array_to_1_dim_array(input),
pad_width,
mode=mode,
constant_values=constant_values,
)
elif mode == "linear_ramp":
ret = jnp.pad(
_flat_array_to_1_dim_array(input),
pad_width,
mode=mode,
end_values=end_values,
)
elif mode in ["reflect", "symmetric"]:
ret = jnp.pad(
_flat_array_to_1_dim_array(input),
pad_width,
mode=mode,
reflect_type=reflect_type,
)
else:
ret = jnp.pad(
_flat_array_to_1_dim_array(input),
pad_width,
mode=mode,
)
if jnp.issubdtype(input_dtype, jnp.integer) and mode in ["mean", "median"]:
ret = jnp.round(ret).astype(input_dtype)
return ret
def vsplit(
ary: JaxArray,
indices_or_sections: Union[int, Sequence[int], JaxArray],
/,
*,
copy: Optional[bool] = None,
) -> List[JaxArray]:
if ary.ndim < 2:
raise ivy.exceptions.IvyError(
"vsplit only works on arrays of 2 or more dimensions"
)
return ivy.split(ary, num_or_size_splits=indices_or_sections, axis=0)
def dsplit(
ary: JaxArray,
indices_or_sections: Union[int, Sequence[int], JaxArray],
/,
*,
copy: Optional[bool] = None,
) -> List[JaxArray]:
if ary.ndim < 3:
raise ivy.utils.exceptions.IvyError(
"dsplit only works on arrays of 3 or more dimensions"
)
return ivy.split(ary, num_or_size_splits=indices_or_sections, axis=2)
def atleast_1d(
*arys: Union[JaxArray, bool, Number], copy: Optional[bool] = None
) -> List[JaxArray]:
return jnp.atleast_1d(*arys)
def dstack(
arrays: Sequence[JaxArray],
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.dstack(arrays)
def atleast_2d(*arys: JaxArray, copy: Optional[bool] = None) -> List[JaxArray]:
return jnp.atleast_2d(*arys)
def atleast_3d(
*arys: Union[JaxArray, bool, Number], copy: Optional[bool] = None
) -> List[JaxArray]:
return jnp.atleast_3d(*arys)
def take_along_axis(
arr: JaxArray,
indices: JaxArray,
axis: int,
/,
*,
mode: str = "fill",
out: Optional[JaxArray] = None,
) -> JaxArray:
if arr.ndim != indices.ndim and axis is not None:
raise ivy.utils.exceptions.IvyException(
"arr and indices must have the same number of dimensions;"
+ f" got {arr.ndim} vs {indices.ndim}"
)
return jnp.take_along_axis(arr, indices, axis, mode=mode)
def hsplit(
ary: JaxArray,
indices_or_sections: Union[int, Tuple[int, ...]],
/,
*,
copy: Optional[bool] = None,
) -> List[JaxArray]:
if ary.ndim == 1:
return ivy.split(ary, num_or_size_splits=indices_or_sections, axis=0)
return ivy.split(ary, num_or_size_splits=indices_or_sections, axis=1)
def broadcast_shapes(*shapes: Union[List[int], List[Tuple]]) -> Tuple[int]:
return jnp.broadcast_shapes(*shapes)
def expand(
x: JaxArray,
shape: Union[List[int], List[Tuple]],
/,
*,
copy: Optional[bool] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
shape = list(shape)
if len(shape) > len(x.shape):
x = jnp.expand_dims(x, range(len(shape) - len(x.shape)))
for i, dim in enumerate(shape):
if dim < 0:
shape[i] = x.shape[i]
return jnp.broadcast_to(x, tuple(shape))
def concat_from_sequence(
input_sequence: Union[Tuple[JaxArray], List[JaxArray]],
/,
*,
new_axis: int = 0,
axis: int = 0,
out: Optional[JaxArray] = None,
) -> JaxArray:
is_tuple = type(input_sequence) is tuple
if is_tuple:
input_sequence = list(input_sequence)
if new_axis == 0:
ret = jnp.concatenate(input_sequence, axis=axis)
return ret
elif new_axis == 1:
ret = jnp.stack(input_sequence, axis=axis)
return ret
def unique_consecutive(
x: JaxArray,
/,
*,
axis: Optional[int] = None,
) -> Tuple[JaxArray, JaxArray, JaxArray]:
Results = namedtuple(
"Results",
["output", "inverse_indices", "counts"],
)
x_shape = None
if axis is None:
x_shape = x.shape
x = x.flatten()
axis = -1
if axis < 0:
axis += x.ndim
sub_arrays = jnp.split(
x,
jnp.where(
jnp.any(
jnp.diff(x, axis=axis) != 0,
axis=tuple(i for i in jnp.arange(x.ndim) if i != axis),
)
)[0]
+ 1,
axis=axis,
)
output = jnp.concatenate(
[jnp.unique(sub_array, axis=axis) for sub_array in sub_arrays],
axis=axis,
)
counts = jnp.array([sub_array.shape[axis] for sub_array in sub_arrays])
inverse_indices = jnp.repeat(jnp.arange(len(counts)), counts)
if x_shape:
inverse_indices = jnp.reshape(inverse_indices, x_shape)
return Results(
output.astype(x.dtype),
inverse_indices,
counts,
)
def fill_diagonal(
a: JaxArray,
v: Union[int, float],
/,
*,
wrap: bool = False,
) -> JaxArray:
shape = jnp.array(a.shape)
end = None
if len(shape) == 2:
step = shape[1] + 1
if not wrap:
end = shape[1] * shape[1]
else:
step = 1 + (jnp.cumprod(shape[:-1])).sum()
a = jnp.reshape(a, (-1,))
a = a.at[:end:step].set(jnp.array(v).astype(a.dtype))
a = jnp.reshape(a, shape)
return a
def take(
x: Union[int, JaxArray],
indices: Union[int, JaxArray],
/,
*,
axis: Optional[int] = None,
mode: str = "fill",
fill_value: Optional[Number] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if mode not in ["raise", "wrap", "clip", "fill"]:
raise ValueError("mode must be one of 'clip', 'raise', 'wrap', or 'fill'")
if not isinstance(x, JaxArray):
x = jnp.array(x)
if len(x.shape) == 0:
x = jnp.array([x])
if not isinstance(indices, JaxArray):
indices = jnp.array(indices)
if jnp.issubdtype(indices.dtype, jnp.floating):
indices = indices.astype(jnp.int64)
# raise
if mode == "raise":
mode = "fill"
if ivy.exists(axis):
try:
x_shape = x.shape[axis]
except Exception as e:
raise ValueError(
f"axis {axis} is out of bounds for array of dimension"
f" {len(x.shape)}"
) from e
else:
x_shape = jnp.prod(x.shape)
bound_check = (indices < -x_shape) | (indices >= x_shape)
if jnp.any(bound_check):
if len(indices.shape) != 0:
indices = indices[bound_check].flatten()[0]
raise IndexError(
f"index {indices} is out of bounds for axis "
f"{axis if axis else 0} with size {x_shape}"
)
# clip, wrap, fill
ret = jnp.take(x, indices, axis=axis, mode=mode, fill_value=fill_value)
if ivy.exists(out):
ivy.inplace_update(out, ret)
return ret
def trim_zeros(a: JaxArray, /, *, trim: Optional[str] = "bf") -> JaxArray:
return jnp.trim_zeros(a, trim=trim)
@handle_out_argument
def unflatten(
x: JaxArray,
/,
shape: Tuple[int] = None,
dim: int = 0,
*,
out: Optional[JaxArray] = None,
order: Optional[str] = None,
) -> JaxArray:
dim = abs(len(x.shape) + dim) if dim < 0 else dim
res_shape = x.shape[:dim] + shape + x.shape[dim + 1 :]
res = jnp.reshape(x, res_shape)
return res
| ivy/ivy/functional/backends/jax/experimental/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/experimental/manipulation.py",
"repo_id": "ivy",
"token_count": 6029
} | 15 |
from numbers import Number
from typing import Optional, Tuple, Union
import jax.numpy as jnp
import ivy
from ivy.functional.backends.jax import JaxArray
from . import backend_version
from ivy.func_wrapper import with_unsupported_dtypes
# Array API Standard #
# ------------------ #
@with_unsupported_dtypes({"0.4.24 and below": ("complex",)}, backend_version)
def argmax(
x: JaxArray,
/,
*,
axis: Optional[int] = None,
keepdims: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
select_last_index: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
if select_last_index:
x = jnp.flip(x, axis=axis)
ret = jnp.array(jnp.argmax(x, axis=axis, keepdims=keepdims))
if axis is not None:
ret = x.shape[axis] - ret - 1
else:
ret = x.size - ret - 1
else:
ret = jnp.argmax(x, axis=axis, keepdims=keepdims)
if dtype:
dtype = ivy.as_native_dtype(dtype)
return ret.astype(dtype)
return ret
@with_unsupported_dtypes({"0.4.24 and below": ("complex",)}, backend_version)
def argmin(
x: JaxArray,
/,
*,
axis: Optional[int] = None,
keepdims: bool = False,
dtype: Optional[jnp.dtype] = None,
select_last_index: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
if select_last_index:
x = jnp.flip(x, axis=axis)
ret = jnp.array(jnp.argmin(x, axis=axis, keepdims=keepdims))
if axis is not None:
ret = x.shape[axis] - ret - 1
else:
ret = x.size - ret - 1
else:
ret = jnp.argmin(x, axis=axis, keepdims=keepdims)
if dtype:
dtype = ivy.as_native_dtype(dtype)
return ret.astype(dtype)
return ret
def nonzero(
x: JaxArray,
/,
*,
as_tuple: bool = True,
size: Optional[int] = None,
fill_value: Number = 0,
) -> Union[JaxArray, Tuple[JaxArray]]:
res = jnp.nonzero(x, size=size, fill_value=fill_value)
if as_tuple:
return tuple(res)
return jnp.stack(res, axis=1)
def where(
condition: JaxArray,
x1: JaxArray,
x2: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return ivy.astype(jnp.where(condition, x1, x2), x1.dtype, copy=False)
# Extra #
# ----- #
def argwhere(
x: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.argwhere(x)
| ivy/ivy/functional/backends/jax/searching.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/searching.py",
"repo_id": "ivy",
"token_count": 1171
} | 16 |
import mxnet as mx
from typing import Union, Optional
import numpy as np
from ivy.utils.exceptions import IvyNotImplementedException
def current_backend_str() -> str:
return "mxnet"
def is_native_array(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
exclusive: bool = False,
) -> bool:
if exclusive:
return isinstance(x, mx.ndarray.NDArray)
else:
return isinstance(x, (mx.ndarray.NDArray, np.ndarray))
def to_numpy(x: mx.ndarray.NDArray, /, *, copy: bool = True) -> np.ndarray:
if copy:
if x.shape == ():
new_arr = x.asnumpy()
return new_arr
return x.copy().asnumpy()
else:
return x.asnumpy()
def itemsize(x: mx.ndarray.NDArray, /) -> int:
return x.asnumpy().itemsize
def container_types():
return []
def gather(
x: mx.ndarray.NDArray,
indices: mx.ndarray.NDArray,
/,
*,
axis: int = -1,
batch_dims: int = 0,
out: Optional[mx.ndarray.NDArray] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/general.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/general.py",
"repo_id": "ivy",
"token_count": 482
} | 17 |
# global
from numbers import Number
from typing import Union, Optional, List, Sequence, Tuple
import numpy as np
# local
import ivy
from ivy.functional.ivy.creation import (
_asarray_to_native_arrays_and_back,
_asarray_infer_device,
_asarray_infer_dtype,
_asarray_handle_nestable,
NestedSequence,
SupportsBufferProtocol,
_asarray_inputs_to_native_shapes,
)
from .data_type import as_native_dtype
# Array API Standard #
# -------------------#
def arange(
start: float,
/,
stop: Optional[float] = None,
step: float = 1,
*,
dtype: Optional[np.dtype] = None,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if dtype:
dtype = as_native_dtype(dtype)
res = np.arange(start, stop, step, dtype=dtype)
if not dtype:
if res.dtype == np.float64:
return res.astype(np.float32)
elif res.dtype == np.int64:
return res.astype(np.int32)
return res
@_asarray_to_native_arrays_and_back
@_asarray_infer_device
@_asarray_handle_nestable
@_asarray_inputs_to_native_shapes
@_asarray_infer_dtype
def asarray(
obj: Union[
np.ndarray, bool, int, float, tuple, NestedSequence, SupportsBufferProtocol
],
/,
*,
copy: Optional[bool] = None,
dtype: Optional[np.dtype] = None,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
ret = np.asarray(obj, dtype=dtype)
return np.copy(ret) if copy else ret
def empty(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: np.dtype,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.empty(shape, dtype)
def empty_like(
x: np.ndarray,
/,
*,
dtype: np.dtype,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.empty_like(x, dtype=dtype)
def eye(
n_rows: int,
n_cols: Optional[int] = None,
/,
*,
k: int = 0,
batch_shape: Optional[Union[int, Sequence[int]]] = None,
dtype: np.dtype,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if n_cols is None:
n_cols = n_rows
i = np.eye(n_rows, n_cols, k, dtype)
if batch_shape is None:
return i
else:
reshape_dims = [1] * len(batch_shape) + [n_rows, n_cols]
tile_dims = list(batch_shape) + [1, 1]
return_mat = np.tile(np.reshape(i, reshape_dims), tile_dims)
return return_mat
def to_dlpack(x, /, *, out: Optional[np.ndarray] = None):
return x.__dlpack__()
class _dlpack_wrapper:
def __init__(self, capsule) -> None:
self.capsule = capsule
def dlpack(self):
return self.capsule
def from_dlpack(x, /, *, out: Optional[np.ndarray] = None):
if not hasattr(x, "__dlpack__"):
capsule = _dlpack_wrapper(x)
else:
capsule = x
return np.from_dlpack(capsule)
def full(
shape: Union[ivy.NativeShape, Sequence[int]],
fill_value: Union[int, float, bool],
*,
dtype: Optional[Union[ivy.Dtype, np.dtype]] = None,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
dtype = ivy.default_dtype(dtype=dtype, item=fill_value, as_native=True)
return np.full(shape, fill_value, dtype)
def full_like(
x: np.ndarray,
/,
fill_value: Number,
*,
dtype: np.dtype,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.full_like(x, fill_value, dtype=dtype)
def linspace(
start: Union[np.ndarray, float],
stop: Union[np.ndarray, float],
/,
num: int,
*,
axis: Optional[int] = None,
endpoint: bool = True,
dtype: np.dtype,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if axis is None:
axis = -1
ans = np.linspace(start, stop, num, endpoint, dtype=dtype, axis=axis)
# Waiting for fix when start is -0.0: https://github.com/numpy/numpy/issues/21513
if (
ans.shape[0] >= 1
and (not isinstance(start, np.ndarray))
and (not isinstance(stop, np.ndarray))
):
ans[0] = start
return ans
def meshgrid(
*arrays: np.ndarray,
sparse: bool = False,
indexing: str = "xy",
out: Optional[np.ndarray] = None,
) -> List[np.ndarray]:
return np.meshgrid(*arrays, sparse=sparse, indexing=indexing)
def ones(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: np.dtype,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.ones(shape, dtype)
def ones_like(
x: np.ndarray,
/,
*,
dtype: np.dtype,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.ones_like(x, dtype=dtype)
def tril(
x: np.ndarray, /, *, k: int = 0, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.tril(x, k)
def triu(
x: np.ndarray, /, *, k: int = 0, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.triu(x, k)
def zeros(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: np.dtype,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.zeros(shape, dtype)
def zeros_like(
x: np.ndarray,
/,
*,
dtype: np.dtype,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.zeros_like(x, dtype=dtype)
# Extra #
# ------#
array = asarray
def copy_array(
x: np.ndarray,
*,
to_ivy_array: bool = True,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if to_ivy_array:
return ivy.to_ivy(x.copy())
return x.copy()
def one_hot(
indices: np.ndarray,
depth: int,
/,
*,
on_value: Optional[Number] = None,
off_value: Optional[Number] = None,
axis: Optional[int] = None,
dtype: Optional[np.dtype] = None,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
on_none = on_value is None
off_none = off_value is None
if dtype is None:
if on_none and off_none:
dtype = np.float32
else:
if not on_none:
dtype = np.array(on_value).dtype
elif not off_none:
dtype = np.array(off_value).dtype
res = np.eye(depth, dtype=dtype)[np.array(indices, dtype="int64").reshape(-1)]
res = res.reshape(list(indices.shape) + [depth])
if not on_none and not off_none:
res = np.where(res == 1, on_value, off_value)
if axis is not None:
res = np.moveaxis(res, -1, axis)
return res
def frombuffer(
buffer: bytes,
dtype: Optional[np.dtype] = float,
count: Optional[int] = -1,
offset: Optional[int] = 0,
) -> np.ndarray:
if isinstance(dtype, list):
dtype = np.dtype(dtype[0])
return np.frombuffer(buffer, dtype=dtype, count=count, offset=offset)
def triu_indices(
n_rows: int,
n_cols: Optional[int] = None,
k: int = 0,
/,
*,
device: Optional[str] = None,
) -> Tuple[np.ndarray]:
return tuple(np.asarray(np.triu_indices(n=n_rows, k=k, m=n_cols)))
| ivy/ivy/functional/backends/numpy/creation.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/creation.py",
"repo_id": "ivy",
"token_count": 3330
} | 18 |
# global
from typing import (
Iterable,
Optional,
Union,
Sequence,
Tuple,
NamedTuple,
Literal,
Callable,
Any,
List,
)
from numbers import Number
from collections import namedtuple
import numpy as np
# local
import ivy
from ivy.functional.backends.numpy.helpers import _scalar_output_to_0d_array
from ivy.func_wrapper import with_supported_dtypes, handle_out_argument
# noinspection PyProtectedMember
from . import backend_version
def moveaxis(
a: np.ndarray,
source: Union[int, Sequence[int]],
destination: Union[int, Sequence[int]],
/,
*,
copy: Optional[bool] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.moveaxis(a, source, destination)
moveaxis.support_native_out = False
def heaviside(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.heaviside(
x1,
x2,
out=out,
)
heaviside.support_native_out = True
def flipud(
m: np.ndarray,
/,
*,
copy: Optional[bool] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.flipud(m)
flipud.support_native_out = False
def vstack(
arrays: Sequence[np.ndarray],
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.vstack(arrays)
def hstack(
arrays: Sequence[np.ndarray],
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.hstack(arrays)
def rot90(
m: np.ndarray,
/,
*,
copy: Optional[bool] = None,
k: int = 1,
axes: Tuple[int, int] = (0, 1),
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.rot90(m, k, axes)
def top_k(
x: np.ndarray,
k: int,
/,
*,
axis: int = -1,
largest: bool = True,
sorted: bool = True,
out: Optional[Tuple[np.ndarray, np.ndarray]] = None,
) -> Tuple[np.ndarray, np.ndarray]:
k = min(k, x.shape[axis])
if not largest:
indices = np.argsort(x, axis=axis)
indices = np.take(indices, np.arange(k), axis=axis)
else:
indices = np.argsort(-x, axis=axis)
indices = np.take(indices, np.arange(k), axis=axis)
if not sorted:
indices = np.sort(indices, axis=axis)
topk_res = NamedTuple("top_k", [("values", np.ndarray), ("indices", np.ndarray)])
val = np.take_along_axis(x, indices, axis=axis)
return topk_res(val, indices)
def fliplr(
m: np.ndarray,
/,
*,
copy: Optional[bool] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.fliplr(m)
fliplr.support_native_out = False
def i0(
x: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.i0(x)
i0.support_native_out = False
def _flat_array_to_1_dim_array(x):
return x.reshape((1,)) if x.shape == () else x
def _slice(operand, start_indices, limit_indices, strides=None):
strides = [1] * len(operand.shape) if strides is None else strides
full_slice = ()
for i, _ in enumerate(operand.shape):
strides_i = int(strides[i])
start_i = int(start_indices[i])
limit_i = int(limit_indices[i])
full_slice += (slice(start_i, limit_i, strides_i),)
return operand[full_slice]
def _interior_pad(operand, padding_value, padding_config):
for axis, (_, _, interior) in enumerate(padding_config):
if interior > 0:
new_shape = list(operand.shape)
new_shape[axis] = new_shape[axis] + (new_shape[axis] - 1) * interior
new_array = np.full(new_shape, padding_value, dtype=operand.dtype)
src_indices = np.arange(operand.shape[axis])
dst_indices = src_indices * (interior + 1)
index_tuple = [slice(None)] * operand.ndim
index_tuple[axis] = dst_indices
new_array[tuple(index_tuple)] = operand
operand = new_array
start_indices = [0] * operand.ndim
limit_indices = [0] * operand.ndim
for axis, (low, high, _) in enumerate(padding_config):
if low < 0:
start_indices[axis] = abs(low)
if high < 0:
limit_indices[axis] = high
else:
limit_indices[axis] = operand.shape[axis] + 1
padded = _slice(operand, start_indices, limit_indices)
pad_width = [(0, 0)] * operand.ndim
for axis, (low, high, _) in enumerate(padding_config):
if low > 0 and high > 0:
pad_width[axis] = (low, high)
elif low > 0 and not high > 0:
pad_width[axis] = (low, 0)
elif high > 0 and not low > 0:
pad_width[axis] = (0, high)
padded = np.pad(padded, pad_width, constant_values=padding_value)
return padded
def pad(
input: np.ndarray,
pad_width: Union[Iterable[Tuple[int]], int],
/,
*,
mode: Union[
Literal[
"constant",
"dilated",
"edge",
"linear_ramp",
"maximum",
"mean",
"median",
"minimum",
"reflect",
"symmetric",
"wrap",
"empty",
],
Callable,
] = "constant",
stat_length: Union[Iterable[Tuple[int]], int] = 1,
constant_values: Union[Iterable[Tuple[Number]], Number] = 0,
end_values: Union[Iterable[Tuple[Number]], Number] = 0,
reflect_type: Literal["even", "odd"] = "even",
**kwargs: Optional[Any],
) -> np.ndarray:
if mode == "dilated":
return _interior_pad(input, constant_values, pad_width)
if callable(mode):
return np.pad(
_flat_array_to_1_dim_array(input),
pad_width,
mode=mode,
**kwargs,
)
if mode in ["maximum", "mean", "median", "minimum"]:
return np.pad(
_flat_array_to_1_dim_array(input),
pad_width,
mode=mode,
stat_length=stat_length,
)
elif mode == "constant":
return np.pad(
_flat_array_to_1_dim_array(input),
pad_width,
mode=mode,
constant_values=constant_values,
)
elif mode == "linear_ramp":
return np.pad(
_flat_array_to_1_dim_array(input),
pad_width,
mode=mode,
end_values=end_values,
)
elif mode in ["reflect", "symmetric"]:
return np.pad(
_flat_array_to_1_dim_array(input),
pad_width,
mode=mode,
reflect_type=reflect_type,
)
else:
return np.pad(
_flat_array_to_1_dim_array(input),
pad_width,
mode=mode,
)
def vsplit(
ary: np.ndarray,
indices_or_sections: Union[int, Sequence[int], np.ndarray],
/,
*,
copy: Optional[bool] = None,
) -> List[np.ndarray]:
if ary.ndim < 2:
raise ivy.exceptions.IvyError(
"vsplit only works on arrays of 2 or more dimensions"
)
return ivy.split(ary, num_or_size_splits=indices_or_sections, axis=0)
def dsplit(
ary: np.ndarray,
indices_or_sections: Union[int, Tuple[int, ...]],
/,
*,
copy: Optional[bool] = None,
) -> List[np.ndarray]:
if ary.ndim < 3:
raise ivy.utils.exceptions.IvyError(
"dsplit only works on arrays of 3 or more dimensions"
)
return ivy.split(ary, num_or_size_splits=indices_or_sections, axis=2)
def atleast_1d(
*arys: Union[np.ndarray, bool, Number], copy: Optional[bool] = None
) -> List[np.ndarray]:
return np.atleast_1d(*arys)
def dstack(
arrays: Sequence[np.ndarray],
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.dstack(arrays)
def atleast_2d(*arys: np.ndarray, copy: Optional[bool] = None) -> List[np.ndarray]:
return np.atleast_2d(*arys)
def atleast_3d(
*arys: Union[np.ndarray, bool, Number], copy: Optional[bool] = None
) -> List[np.ndarray]:
return np.atleast_3d(*arys)
@_scalar_output_to_0d_array
def take_along_axis(
arr: np.ndarray,
indices: np.ndarray,
axis: int,
/,
*,
mode: str = "fill",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if arr.ndim != indices.ndim:
raise ivy.utils.exceptions.IvyException(
"arr and indices must have the same number of dimensions;"
+ f" got {arr.ndim} vs {indices.ndim}"
)
if mode not in ["clip", "fill", "drop"]:
raise ValueError(
f"Invalid mode '{mode}'. Valid modes are 'clip', 'fill', 'drop'."
)
arr_shape = arr.shape
if axis < 0:
axis += arr.ndim
if mode == "clip":
max_index = arr.shape[axis] - 1
indices = np.clip(indices, 0, max_index)
elif mode in ("fill", "drop"):
if "float" in str(arr.dtype) or "complex" in str(arr.dtype):
fill_value = np.NAN
elif "uint" in str(arr.dtype):
fill_value = np.iinfo(arr.dtype).max
elif "int" in str(arr.dtype):
fill_value = -np.iinfo(arr.dtype).max - 1
else:
raise TypeError(
f"Invalid dtype '{arr.dtype}'. Valid dtypes are 'float', 'complex',"
" 'uint', 'int'."
)
indices = np.where((indices < 0) | (indices >= arr.shape[axis]), -1, indices)
arr_shape = list(arr_shape)
arr_shape[axis] = 1
fill_arr = np.full(arr_shape, fill_value, dtype=arr.dtype)
arr = np.concatenate([arr, fill_arr], axis=axis)
return np.take_along_axis(arr, indices, axis)
def hsplit(
ary: np.ndarray,
indices_or_sections: Union[int, Tuple[int, ...]],
/,
*,
copy: Optional[bool] = None,
) -> List[np.ndarray]:
if ary.ndim == 1:
return ivy.split(ary, num_or_size_splits=indices_or_sections, axis=0)
return ivy.split(ary, num_or_size_splits=indices_or_sections, axis=1)
take_along_axis.support_native_out = False
def broadcast_shapes(*shapes: Union[List[int], List[Tuple]]) -> Tuple[int]:
return np.broadcast_shapes(*shapes)
broadcast_shapes.support_native_out = False
def expand(
x: np.ndarray,
shape: Union[List[int], List[Tuple]],
/,
*,
copy: Optional[bool] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
shape = list(shape)
n_extra_dims = len(shape) - x.ndim
if n_extra_dims > 0:
x = np.expand_dims(x, tuple(range(n_extra_dims)))
for i, dim in enumerate(shape):
if dim < 0:
shape[i] = x.shape[i]
return np.broadcast_to(x, tuple(shape))
expand.support_native_out = False
def concat_from_sequence(
input_sequence: Union[Tuple[np.ndarray], List[np.ndarray]],
/,
*,
new_axis: int = 0,
axis: int = 0,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
is_tuple = type(input_sequence) is tuple
if is_tuple:
input_sequence = list(input_sequence)
if new_axis == 0:
ret = np.concatenate(input_sequence, axis=axis)
return ret
elif new_axis == 1:
ret = np.stack(input_sequence, axis=axis)
return ret
def unique_consecutive(
x: np.ndarray,
/,
*,
axis: Optional[int] = None,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
Results = namedtuple(
"Results",
["output", "inverse_indices", "counts"],
)
x_shape = None
if axis is None:
x_shape = x.shape
x = x.flatten()
axis = -1
if axis < 0:
axis += x.ndim
sub_arrays = np.split(
x,
np.where(
np.any(
np.diff(x, axis=axis) != 0,
axis=tuple(i for i in np.arange(x.ndim) if i != axis),
)
)[0]
+ 1,
axis=axis,
)
output = np.concatenate(
[np.unique(sub_array, axis=axis) for sub_array in sub_arrays],
axis=axis,
)
counts = np.array([sub_array.shape[axis] for sub_array in sub_arrays])
inverse_indices = np.repeat(np.arange(len(counts)), counts)
if x_shape:
inverse_indices = np.reshape(inverse_indices, x_shape)
return Results(
output.astype(x.dtype),
inverse_indices,
counts,
)
def fill_diagonal(
a: np.ndarray,
v: Union[int, float, np.ndarray],
/,
*,
wrap: bool = False,
) -> np.ndarray:
np.fill_diagonal(a, v, wrap=wrap)
return a
@_scalar_output_to_0d_array
def take(
x: Union[int, List, np.ndarray],
indices: Union[int, List, np.ndarray],
/,
*,
axis: Optional[int] = None,
mode: str = "raise",
fill_value: Optional[Number] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if mode not in ["raise", "wrap", "clip", "fill"]:
raise ValueError("mode must be one of 'clip', 'raise', 'wrap', or 'fill'")
# raise, clip, wrap
if mode != "fill":
return np.take(x, indices, axis=axis, mode=mode, out=out)
if not isinstance(x, np.ndarray):
x = np.array(x)
if len(x.shape) == 0:
x = np.array([x])
if not isinstance(indices, np.ndarray):
indices = np.array(indices)
if np.issubdtype(indices.dtype, np.floating):
indices = indices.astype(np.int64)
# fill
x_dtype = x.dtype
if fill_value is None:
# set according to jax behaviour
# https://tinyurl.com/66jn68uj
# NaN for inexact types (let fill_value as None)
if not np.issubdtype(x_dtype, np.inexact):
if np.issubdtype(x_dtype, np.bool_):
# True for booleans
fill_value = True
elif np.issubdtype(x_dtype, np.unsignedinteger):
# the largest positive value for unsigned types
fill_value = np.iinfo(x_dtype).max
else:
# the largest negative value for signed types
fill_value = np.iinfo(x_dtype).min
fill_value = np.array(fill_value, dtype=x_dtype)
x_shape = x.shape
ret = np.take(x, indices, axis=axis, mode="wrap")
if len(ret.shape) == 0:
# if scalar, scalar fill (replace)
if np.any(indices != 0):
ret = fill_value
else:
if ivy.exists(axis):
rank = len(x.shape)
axis = ((axis % rank) + rank) % rank
x_shape = x_shape[axis]
else:
axis = 0
x_shape = np.prod(x_shape)
bound_check = (indices < -x_shape) | (indices >= x_shape)
if np.any(bound_check):
if axis > 0:
bound_check = np.broadcast_to(
bound_check, (*x.shape[:axis], *bound_check.shape)
)
ret[bound_check] = fill_value
if ivy.exists(out):
ivy.inplace_update(out, ret)
return ret
take.support_native_out = True
def trim_zeros(
a: np.ndarray,
/,
*,
trim: Optional[str] = "fb",
) -> np.ndarray:
return np.trim_zeros(a, trim=trim)
def column_stack(
arrays: Sequence[np.ndarray], /, *, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.column_stack(arrays)
@with_supported_dtypes(
{"1.25.2 and below": ("float32", "float64", "int32", "int64")}, backend_version
)
def put_along_axis(
arr: np.ndarray,
indices: np.ndarray,
values: Union[int, np.ndarray],
axis: int,
/,
*,
mode: Literal["sum", "min", "max", "mul", "replace"] = "replace",
out: Optional[np.ndarray] = None,
):
ret = np.put_along_axis(arr.copy(), indices, values, axis)
return ivy.inplace_update(out, ret) if ivy.exists(out) else ret
put_along_axis.partial_mixed_handler = lambda *args, mode=None, **kwargs: mode in [
"replace",
]
@handle_out_argument
def unflatten(
x: np.ndarray,
/,
shape: Tuple[int] = None,
dim: Optional[int] = 0,
*,
out: Optional[np.ndarray] = None,
order: Optional[str] = None,
) -> np.ndarray:
dim = abs(len(x.shape) + dim) if dim < 0 else dim
res_shape = x.shape[:dim] + shape + x.shape[dim + 1 :]
res = np.reshape(x, res_shape)
return res
| ivy/ivy/functional/backends/numpy/experimental/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/experimental/manipulation.py",
"repo_id": "ivy",
"token_count": 7757
} | 19 |
"""Collection of Numpy random functions, wrapped to fit Ivy syntax and
signature."""
# global
import numpy as np
from typing import Optional, Union, Sequence
# local
import ivy
from ivy.functional.ivy.random import (
_check_bounds_and_get_shape,
_randint_check_dtype_and_bound,
_check_valid_scale,
)
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
# Extra #
# ------#
def random_uniform(
*,
low: Union[float, np.ndarray] = 0.0,
high: Union[float, np.ndarray] = 1.0,
shape: Optional[Union[ivy.NativeShape, Sequence[int], np.ndarray]] = None,
dtype: np.dtype,
device: Optional[str] = None,
out: Optional[np.ndarray] = None,
seed: Optional[int] = None,
) -> np.ndarray:
if seed:
np.random.seed(seed)
shape = _check_bounds_and_get_shape(low, high, shape).shape
return np.asarray(np.random.uniform(low, high, shape), dtype=dtype)
def random_normal(
*,
mean: Union[float, np.ndarray] = 0.0,
std: Union[float, np.ndarray] = 1.0,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[str] = None,
dtype: np.dtype,
seed: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
_check_valid_scale(std)
shape = _check_bounds_and_get_shape(mean, std, shape).shape
if seed:
np.random.seed(seed)
return np.asarray(np.random.normal(mean, std, shape), dtype=dtype)
@with_unsupported_dtypes({"1.26.3 and below": ("bfloat16",)}, backend_version)
def multinomial(
population_size: int,
num_samples: int,
/,
*,
batch_size: int = 1,
probs: Optional[np.ndarray] = None,
replace: bool = True,
device: Optional[str] = None,
seed: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if seed:
np.random.seed(seed)
if probs is None:
probs = (
np.ones(
(
batch_size,
population_size,
)
)
/ population_size
)
orig_probs_shape = list(probs.shape)
num_classes = orig_probs_shape[-1]
probs_flat = np.reshape(probs, (-1, orig_probs_shape[-1]))
probs_flat = probs_flat / np.sum(probs_flat, -1, keepdims=True, dtype="float64")
probs_stack = np.split(probs_flat, probs_flat.shape[0])
samples_stack = [
np.random.choice(num_classes, num_samples, replace, p=prob[0])
for prob in probs_stack
]
samples_flat = np.stack(samples_stack)
return np.asarray(np.reshape(samples_flat, orig_probs_shape[:-1] + [num_samples]))
def randint(
low: Union[float, np.ndarray],
high: Union[float, np.ndarray],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[str] = None,
dtype: Optional[Union[np.dtype, ivy.Dtype]] = None,
seed: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if not dtype:
dtype = ivy.default_int_dtype()
dtype = ivy.as_native_dtype(dtype)
_randint_check_dtype_and_bound(low, high, dtype)
shape = _check_bounds_and_get_shape(low, high, shape).shape
if seed:
np.random.seed(seed)
return np.random.randint(low, high, shape, dtype=dtype)
def seed(*, seed_value: int = 0):
np.random.seed(seed_value)
return
def shuffle(
x: np.ndarray,
axis: Optional[int] = 0,
/,
*,
seed: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if seed:
np.random.seed(seed)
if len(x.shape) == 0:
return x
x = np.array(x)
rng = np.random.default_rng()
rng.shuffle(x, axis=axis)
return x
| ivy/ivy/functional/backends/numpy/random.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/random.py",
"repo_id": "ivy",
"token_count": 1676
} | 20 |
# global
import paddle
from typing import Optional, Union
# local
import ivy
# invert_permutation
def invert_permutation(
x: Union[paddle.Tensor, list, tuple],
/,
) -> paddle.Tensor:
x = paddle.to_tensor(x) if not ivy.is_array(x) else x
sorted_indices = paddle.argsort(x)
inverse = paddle.zeros_like(sorted_indices)
inverse[sorted_indices] = paddle.arange(len(x))
inverse_permutation = paddle.argsort(inverse)
return inverse_permutation
# lexsort
def lexsort(
keys: paddle.Tensor, /, *, axis: int = -1, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
shape = keys.shape
if len(shape) == 1:
return paddle.argsort(keys, axis=axis)
if shape[0] == 0:
raise TypeError("need sequence of keys with len > 0 in lexsort")
if len(shape) == 2 and shape[1] == 1:
return paddle.to_tensor([0], dtype=paddle.int64)
result = paddle.argsort(keys[0], axis=axis)
if shape[0] == 1:
return result
for i in range(1, shape[0]):
key = keys[i]
ind = paddle.take_along_axis(key, result, axis=axis)
temp = paddle.argsort(ind, axis=axis)
result = paddle.take_along_axis(result, temp, axis=axis)
return result
| ivy/ivy/functional/backends/paddle/experimental/sorting.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/experimental/sorting.py",
"repo_id": "ivy",
"token_count": 505
} | 21 |
# global
import paddle
from typing import Union, Optional, Sequence
import ivy.functional.backends.paddle as paddle_backend
def all(
x: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x = x.cast("bool")
if axis is None:
axis = list(range(x.ndim))
if isinstance(axis, int):
ret = paddle.all(x, axis=axis, keepdim=keepdims)
else:
axis = [i % x.ndim for i in axis]
axis.sort()
ret = x.clone()
for i, a in enumerate(axis):
ret = paddle.all(ret, axis=a if keepdims else a - i, keepdim=keepdims)
# The following code is to simulate other frameworks
# output shapes behaviour since min output dim is 1 in paddle
if isinstance(axis, Sequence):
if len(axis) == x.ndim:
axis = None
if (x.ndim == 1 or axis is None) and not keepdims:
ret = paddle_backend.squeeze(ret, axis=-1)
return ret
def any(
x: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x = x.cast("bool")
if axis is None:
axis = list(range(x.ndim))
if isinstance(axis, int):
ret = paddle.any(x, axis=axis, keepdim=keepdims)
else:
axis = [i % x.ndim for i in axis]
axis.sort()
ret = x.clone()
for i, a in enumerate(axis):
ret = paddle.any(ret, axis=a if keepdims else a - i, keepdim=keepdims)
# The following code is to simulate other frameworks
# output shapes behaviour since min output dim is 1 in paddle
if isinstance(axis, Sequence):
if len(axis) == x.ndim:
axis = None
if (x.ndim == 1 or axis is None) and not keepdims:
ret = paddle_backend.squeeze(ret, axis=-1)
return ret
| ivy/ivy/functional/backends/paddle/utility.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/utility.py",
"repo_id": "ivy",
"token_count": 840
} | 22 |
# global
import math
from typing import Union, Optional, Tuple, List, Literal, Sequence, Callable
import tensorflow as tf
# local
from ivy.func_wrapper import (
inputs_to_ivy_arrays,
output_to_native_arrays,
with_unsupported_dtypes,
with_supported_dtypes,
with_supported_device_and_dtypes,
)
from .. import backend_version
import ivy
from ivy.functional.ivy.layers import (
_handle_padding,
_get_num_padded_values,
_validate_max_pool_params,
_depth_max_pooling_helper,
)
from ivy.functional.ivy.experimental.layers import _padding_ceil_mode, _get_size
def _determine_depth_max_pooling(x, kernel, strides, dims, data_format="channel_last"):
# Determine depth pooling
kernel, strides, depth_pooling = _depth_max_pooling_helper(
x.shape, kernel, strides, dims=dims, data_format=data_format
)
if depth_pooling:
x = tf.transpose(x, (0, dims + 1, *range(1, dims + 1)))
return x, kernel, strides, depth_pooling
def max_pool1d(
x: Union[tf.Tensor, tf.Variable],
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
dilation: Union[int, Tuple[int]] = 1,
ceil_mode: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
dims = 1
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
permuted_x = False
if data_format == "NCW" and ivy.dev(x) == "cpu":
x = tf.transpose(x, (0, 2, 1))
kernel = [kernel[i] for i in [0, 2, 1]] if len(kernel) == (dims + 2) else kernel
strides = (
[strides[i] for i in [0, 2, 1]] if len(strides) == (dims + 2) else strides
)
data_format = "NWC"
permuted_x = True
# determine depth pooling
x, kernel, strides, depth_pooling = _determine_depth_max_pooling(
x, kernel, strides, dims, data_format=data_format
)
if not depth_pooling:
if ceil_mode:
new_kernel = [kernel[0] + (kernel[0] - 1) * (dilation[0] - 1)]
if data_format == "NCW":
x_shape = x.shape[2:]
else:
x_shape = x.shape[1:-1]
if isinstance(padding, str):
pad_w = _handle_padding(x_shape[0], strides[0], new_kernel[0], padding)
padding = [(pad_w // 2, pad_w - pad_w // 2)]
padding[0] = _padding_ceil_mode(
x_shape[0], new_kernel[0], padding[0], strides[0]
)
if isinstance(padding, list):
if any(item != 0 for sublist in padding for item in sublist):
if len(padding) < dims + 2:
if data_format == "NCW":
padding = [(0, 0), (0, 0), *padding]
else:
padding = [(0, 0), *padding, (0, 0)]
x = tf.pad(x, padding, constant_values=tf.math.reduce_min(x))
padding = "VALID"
elif isinstance(padding, list):
if any(item != 0 for sublist in padding for item in sublist):
raise NotImplementedError(
"Nonzero explicit padding is not supported for depthwise max pooling"
)
else:
padding = "VALID"
res = tf.nn.pool(
x, kernel, "MAX", strides, padding, dilations=dilation, data_format=data_format
)
if depth_pooling:
res = tf.transpose(res, (0, 2, 1))
if permuted_x:
return tf.transpose(res, (0, 2, 1))
return res
def max_pool2d(
x: Union[tf.Tensor, tf.Variable],
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
dilation: Union[int, Tuple[int, ...]] = 1,
ceil_mode: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
dims = 2
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
permuted_x = False
if data_format == "NCHW" and ivy.dev(x) == "cpu":
x = tf.transpose(x, (0, 2, 3, 1))
kernel = (
[kernel[i] for i in [0, 2, 3, 1]] if len(kernel) == (dims + 2) else kernel
)
strides = (
[strides[i] for i in [0, 2, 3, 1]]
if len(strides) == (dims + 2)
else strides
)
data_format = "NHWC"
permuted_x = True
# determine depth pooling
x, kernel, strides, depth_pooling = _determine_depth_max_pooling(
x, kernel, strides, dims, data_format=data_format
)
if not depth_pooling:
if ceil_mode:
new_kernel = [
kernel[i] + (kernel[i] - 1) * (dilation[i] - 1) for i in range(dims)
]
if data_format == "NCHW":
x_shape = x.shape[2:]
else:
x_shape = x.shape[1:-1]
if isinstance(padding, str):
pad_h = _handle_padding(x_shape[0], strides[0], new_kernel[0], padding)
pad_w = _handle_padding(x_shape[1], strides[1], new_kernel[1], padding)
padding = [
(pad_h // 2, pad_h - pad_h // 2),
(pad_w // 2, pad_w - pad_w // 2),
]
for i in range(dims):
padding[i] = _padding_ceil_mode(
x_shape[i], new_kernel[i], padding[i], strides[i]
)
if isinstance(padding, list):
if any(item != 0 for sublist in padding for item in sublist):
if len(padding) < dims + 2:
if data_format == "NCHW":
padding = [(0, 0), (0, 0), *padding]
else:
padding = [(0, 0), *padding, (0, 0)]
x = tf.pad(x, padding, constant_values=tf.math.reduce_min(x))
padding = "VALID"
elif isinstance(padding, list):
if any(item != 0 for sublist in padding for item in sublist):
raise NotImplementedError(
"Nonzero explicit padding is not supported for depthwise max pooling"
)
else:
padding = "VALID"
if any(d > 1 for d in dilation):
res = tf.nn.pool(
x,
kernel,
"MAX",
strides,
padding,
dilations=dilation,
data_format=data_format,
)
else: # faster
res = tf.nn.max_pool2d(x, kernel, strides, padding, data_format=data_format)
if depth_pooling:
res = tf.transpose(res, (0, 2, 3, 1))
if permuted_x:
return tf.transpose(res, (0, 3, 1, 2))
return res
@with_unsupported_dtypes(
{"2.15.0 and below": ("bfloat16", "float64", "float16")}, backend_version
)
def max_pool3d(
x: Union[tf.Tensor, tf.Variable],
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
dilation: Union[int, Tuple[int, ...]] = 1,
ceil_mode: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
dims = 3
kernel, strides, padding, dilation = _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
)
permuted_x = False
if data_format == "NCDHW" and ivy.dev(x) == "cpu":
x = tf.transpose(x, (0, 2, 3, 4, 1))
kernel = (
[kernel[i] for i in [0, 2, 3, 4, 1]]
if len(kernel) == (dims + 2)
else kernel
)
strides = (
[strides[i] for i in [0, 2, 3, 4, 1]]
if len(strides) == (dims + 2)
else strides
)
data_format = "NDHWC"
permuted_x = True
# determine depth pooling
x, kernel, strides, depth_pooling = _determine_depth_max_pooling(
x, kernel, strides, dims, data_format=data_format
)
if not depth_pooling:
if ceil_mode:
new_kernel = [
kernel[i] + (kernel[i] - 1) * (dilation[i] - 1) for i in range(dims)
]
if data_format == "NCDHW":
x_shape = x.shape[2:]
else:
x_shape = x.shape[1:-1]
if isinstance(padding, str):
pad_d = _handle_padding(x_shape[0], strides[0], new_kernel[0], padding)
pad_h = _handle_padding(x_shape[1], strides[1], new_kernel[1], padding)
pad_w = _handle_padding(x_shape[2], strides[2], new_kernel[2], padding)
padding = [
(pad_d // 2, pad_d - pad_d // 2),
(pad_h // 2, pad_h - pad_h // 2),
(pad_w // 2, pad_w - pad_w // 2),
]
for i in range(dims):
padding[i] = _padding_ceil_mode(
x_shape[i], new_kernel[i], padding[i], strides[i]
)
if isinstance(padding, list):
if any(item != 0 for sublist in padding for item in sublist):
if len(padding) < dims + 2:
if data_format == "NCDHW":
padding = [(0, 0), (0, 0), *padding]
else:
padding = [(0, 0), *padding, (0, 0)]
x = tf.pad(x, padding, constant_values=tf.math.reduce_min(x))
padding = "VALID"
elif isinstance(padding, list):
if any(item != 0 for sublist in padding for item in sublist):
raise NotImplementedError(
"Nonzero explicit padding is not supported for depthwise max pooling"
)
else:
padding = "VALID"
res = tf.nn.pool(
x, kernel, "MAX", strides, padding, dilations=dilation, data_format=data_format
)
if depth_pooling:
res = tf.transpose(res, (0, 2, 3, 4, 1))
if permuted_x:
return tf.transpose(res, (0, 4, 1, 2, 3))
return res
def _handle_manual_pad_avg_pool(x, kernel, strides, padding, ceil_mode, dims):
if isinstance(padding, str):
pad_specific = [
_handle_padding(x.shape[i + 1], strides[i], kernel[i], padding)
for i in range(dims)
]
padding = [
(pad_specific[i] // 2, pad_specific[i] - pad_specific[i] // 2)
for i in range(dims)
]
else:
if isinstance(padding, int):
padding = [(padding,) * 2] * dims
pad_specific = [sum(padding[i]) for i in range(dims)]
c = []
if ceil_mode:
for i in range(dims):
padding[i], c_i = _padding_ceil_mode(
x.shape[i + 1], kernel[i], padding[i], strides[i], True
)
c.append(c_i)
pad_specific[i] = sum(padding[i])
return padding, pad_specific, c
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "float64")}, backend_version)
def avg_pool1d(
x: Union[tf.Tensor, tf.Variable],
kernel: Union[int, Tuple[int]],
strides: Union[int, Tuple[int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if isinstance(kernel, int):
kernel = [kernel]
elif len(kernel) == 1:
kernel = [kernel[0]]
if isinstance(strides, int):
strides = [strides]
elif len(strides) == 1:
strides = [strides[0]]
if data_format in ("NCW", "NCL"):
print("why")
x = tf.transpose(x, (0, 2, 1))
manual_padding = False
# Have to manually pad if explicit padding is provided, or if ceil_mode is True
if not isinstance(padding, str) or ceil_mode or count_include_pad:
padding, pad_specific, c = _handle_manual_pad_avg_pool(
x, kernel, strides, padding, ceil_mode, 1
)
x = tf.pad(x, [(0, 0), *padding, (0, 0)], constant_values=0)
manual_padding = True
padding = "VALID"
res = tf.nn.avg_pool1d(x, kernel, strides, padding)
# removing any manual padding added because of ceil_mode or count_include_pad
if (manual_padding and not count_include_pad) or ceil_mode:
if not count_include_pad:
num_padded_values = tf.convert_to_tensor(
ivy.map(
_get_num_padded_values,
constant={
"p": pad_specific[0],
"n": x.shape[1] - pad_specific[0],
"k": kernel[0],
"s": strides[0],
},
unique={
"i": tf.range(res.shape[1]),
},
),
dtype=res.dtype,
)
else:
num_padded_values = tf.scatter_nd(
tf.constant([[res.shape[1] - 1]]),
tf.constant([c[0]], dtype=res.dtype),
tf.constant([res.shape[1]], dtype=tf.int32),
)
res = (kernel[0] * res) / (kernel[0] - num_padded_values[:, None])
if data_format in ("NCW", "NCL"):
res = tf.transpose(res, (0, 2, 1))
return res
@with_unsupported_dtypes(
{"2.15.0 and below": ("bfloat16", "float64", "float16")}, backend_version
)
def avg_pool2d(
x: Union[tf.Tensor, tf.Variable],
kernel: Union[int, Tuple[int], Tuple[int, int]],
strides: Union[int, Tuple[int], Tuple[int, int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if isinstance(kernel, int):
kernel = (kernel,) * 2
elif len(kernel) == 1:
kernel = (kernel[0],) * 2
if isinstance(strides, int):
strides = (strides,) * 2
elif len(strides) == 1:
strides = (strides[0],) * 2
if data_format == "NCHW":
x = tf.transpose(x, (0, 2, 3, 1))
manual_padding = False
# Have to manually pad if explicit padding is provided, or if ceil_mode is True
if not isinstance(padding, str) or ceil_mode or count_include_pad:
padding, pad_specific, c = _handle_manual_pad_avg_pool(
x, kernel, strides, padding, ceil_mode, 2
)
x = tf.pad(x, [(0, 0), *padding, (0, 0)], constant_values=0)
manual_padding = True
padding = "VALID"
if divisor_override is not None:
# sum pooling then dividing by divisor_override if it is provided
res = tf.nn.depthwise_conv2d(
x, tf.ones(kernel + (x.shape[-1], 1)), (1,) + strides + (1,), padding
)
res = res / divisor_override
else:
res = tf.nn.avg_pool2d(x, kernel, strides, padding)
# removing any manual padding added because of ceil_mode or count_include_pad
if (manual_padding and not count_include_pad) or ceil_mode and not divisor_override:
if not count_include_pad:
num_padded_values = [
tf.convert_to_tensor(
ivy.map(
_get_num_padded_values,
constant={
"p": pad_specific[i],
"n": x.shape[i + 1] - pad_specific[i],
"k": kernel[i],
"s": strides[i],
},
unique={
"i": tf.range(res.shape[i + 1]),
},
),
dtype=res.dtype,
)
for i in range(2)
]
else:
num_padded_values = []
for i in range(2):
num_pad = tf.scatter_nd(
tf.constant([[res.shape[i + 1] - 1]]),
tf.constant([c[i]], dtype=res.dtype),
tf.constant([res.shape[i + 1]], dtype=tf.int32),
)
num_padded_values.append(num_pad)
num_padded_values1 = num_padded_values[0][:, None]
num_padded_values2 = num_padded_values[1][None, :]
num_padded_values = (
num_padded_values1 * kernel[1]
+ num_padded_values2 * kernel[0]
- num_padded_values1 * num_padded_values2
)
kernel_mul = tf.cast(tf.math.reduce_prod(kernel), res.dtype)
res = (kernel_mul * res) / (kernel_mul - tf.expand_dims(num_padded_values, -1))
if data_format == "NCHW":
return tf.transpose(res, (0, 3, 1, 2))
return res
@with_unsupported_dtypes(
{"2.15.0 and below": ("bfloat16", "float64", "float16")}, backend_version
)
def avg_pool3d(
x: Union[tf.Tensor, tf.Variable],
kernel: Union[int, Tuple[int], Tuple[int, int, int]],
strides: Union[int, Tuple[int], Tuple[int, int, int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if isinstance(kernel, int):
kernel = (kernel,) * 3
elif len(kernel) == 1:
kernel = (kernel[0],) * 3
if isinstance(strides, int):
strides = (strides,) * 3
elif len(strides) == 1:
strides = (strides[0],) * 3
if data_format == "NCDHW":
x = tf.transpose(x, (0, 2, 3, 4, 1))
manual_padding = False
# Have to manually pad if explicit padding is provided, or if ceil_mode is True
if not isinstance(padding, str) or ceil_mode or count_include_pad:
padding, pad_specific, c = _handle_manual_pad_avg_pool(
x, kernel, strides, padding, ceil_mode, 3
)
x = tf.pad(x, [(0, 0), *padding, (0, 0)], constant_values=0)
manual_padding = True
padding = "VALID"
if divisor_override is not None:
# sum pooling then dividing by divisor_override if it is provided
res = ivy.conv_general_dilated(
x,
tf.ones(kernel + (1, x.shape[-1])),
list(strides),
padding,
dims=3,
feature_group_count=x.shape[-1],
)
res = res / divisor_override
else:
res = tf.nn.avg_pool3d(x, kernel, strides, padding)
# removing any manual padding added because of ceil_mode or count_include_pad
if (
(manual_padding and not count_include_pad) or ceil_mode
) and not divisor_override:
if not count_include_pad:
num_padded_values = [
tf.convert_to_tensor(
ivy.map(
_get_num_padded_values,
constant={
"p": pad_specific[i],
"n": x.shape[i + 1] - pad_specific[i],
"k": kernel[i],
"s": strides[i],
},
unique={
"i": tf.range(res.shape[i + 1]),
},
),
dtype=res.dtype,
)
for i in range(3)
]
else:
num_padded_values = []
for i in range(3):
num_pad = tf.scatter_nd(
tf.constant([[res.shape[i + 1] - 1]]),
tf.constant([c[i]], dtype=res.dtype),
tf.constant([res.shape[i + 1]], dtype=tf.int32),
)
num_padded_values.append(num_pad)
num_padded_values1 = tf.reshape(num_padded_values[0], (-1, 1, 1))
num_padded_values2 = tf.reshape(num_padded_values[1], (1, -1, 1))
num_padded_values3 = tf.reshape(num_padded_values[2], (1, 1, -1))
num_padded_values = (
num_padded_values1 * kernel[1] * kernel[2]
+ num_padded_values2 * kernel[0] * kernel[2]
+ num_padded_values3 * kernel[0] * kernel[1]
+ num_padded_values1 * num_padded_values2 * num_padded_values3
- num_padded_values1 * num_padded_values2 * kernel[2]
- num_padded_values1 * num_padded_values3 * kernel[1]
- num_padded_values2 * num_padded_values3 * kernel[0]
)
kernel_mul = tf.cast(tf.math.reduce_prod(kernel), res.dtype)
res = (kernel_mul * res) / (kernel_mul - tf.expand_dims(num_padded_values, -1))
if data_format == "NCDHW":
return tf.transpose(res, (0, 4, 1, 2, 3))
return res
@with_unsupported_dtypes(
{"2.15.0 and below": ("bfloat16", "float64", "float16")}, backend_version
)
def pool(
x: Union[tf.Tensor, tf.Variable],
window_shape: Union[int, Tuple[int], Tuple[int, int]],
pool_type: str,
/,
*,
strides: Optional[Union[int, Tuple[int], Tuple[int, int]]] = None,
padding: str = "VALID",
data_format: Optional[str] = None,
dilations: Optional[Union[int, Tuple[int], Tuple[int, int]]] = None,
ceil_mode: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.nn.pool(
x,
window_shape,
pool_type,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
)
@with_supported_dtypes({"2.15.0 and below": ("float32", "float64")}, backend_version)
def dct(
x: Union[tf.Tensor, tf.Variable],
/,
*,
type: Literal[1, 2, 3, 4] = 2,
n: Optional[int] = None,
axis: int = -1,
norm: Optional[Literal["ortho"]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> tf.Tensor:
# ToDo: Update this once tf.signal.dct supports axis other than -1
if axis != -1:
new_dims = list(range(len(x.shape)))
if axis < 0:
axis = len(x.shape) + axis
new_dims[axis], new_dims[-1] = new_dims[-1], axis
x = tf.transpose(x, new_dims)
dct_out = tf.signal.dct(x, type=type, n=n, axis=-1, norm=norm)
dct_out = tf.transpose(dct_out, new_dims)
else:
dct_out = tf.signal.dct(x, type=type, n=n, axis=-1, norm=norm)
return dct_out
def idct(
x: Union[tf.Tensor, tf.Variable],
/,
*,
type: Literal[1, 2, 3, 4] = 2,
n: Optional[int] = None,
axis: int = -1,
norm: Optional[Literal["ortho"]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> tf.Tensor:
inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type]
return dct(x, type=inverse_type, n=n, axis=axis, norm=norm, out=out)
def _fft_norm(
x: Union[tf.Tensor, tf.Variable],
dim: int,
/,
*,
norm: str = "backward",
):
n = tf.constant(x.shape[dim], dtype=x.dtype)
if norm == "backward":
return x
elif norm == "ortho":
return x / tf.cast(tf.sqrt(tf.cast(n, tf.float32)), x.dtype)
elif norm == "forward":
return x / tf.cast(n, x.dtype)
else:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
def _ifft_norm(
x: Union[tf.Tensor, tf.Variable],
dim: int,
*,
norm: str = "backward",
):
n = x.shape[dim]
if norm == "backward":
return x
elif norm == "ortho":
return x * math.sqrt(n)
elif norm == "forward":
return x * n
else:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
@with_supported_dtypes(
{"2.15.0 and below": ("complex", "float32", "float64")}, backend_version
)
def fft(
x: Union[tf.Tensor, tf.Variable],
dim: int,
/,
*,
norm: str = "backward",
n: Optional[Union[int, Tuple[int]]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
# ToDo: Remove conversion from float to complex when casting mode is working
if x.dtype == "float32":
x = tf.cast(x, tf.complex64)
elif x.dtype == "float64":
x = tf.cast(x, tf.complex128)
if not isinstance(dim, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(dim)}"
)
if n is None:
n = x.shape[dim]
if n < -len(x.shape):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {dim}, expecting ranging"
" from {-len(x.shape)} to {len(x.shape)-1} "
)
if not isinstance(n, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(n)}"
)
if n <= 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {n}, expecting more than 1"
)
if norm not in ["backward", "ortho", "forward"]:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
if x.shape[dim] != n:
s = list(x.shape)
if s[dim] > n:
index = [slice(None)] * len(s)
index[dim] = slice(0, n)
x = x[tuple(index)]
del index
else:
s[dim] = n - s[dim]
z = tf.zeros(s, x.dtype)
x = tf.concat([x, z], dim)
del s
operation_name = f"{n} points FFT at dim {dim} with {norm} normalization"
if dim != -1 or dim != len(x.shape) - 1:
permute = [i for i in range(len(x.shape))]
permute[dim], permute[-1] = permute[-1], permute[dim]
x = tf.transpose(x, permute)
ret = tf.signal.fft(x, operation_name)
ret = tf.transpose(ret, permute)
del permute
else:
ret = tf.signal.fft(x, operation_name)
ret = _fft_norm(ret, dim, norm=norm)
return ret
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def dropout(
x: Union[tf.Tensor, tf.Variable],
prob: float,
/,
*,
scale: bool = True,
dtype: tf.DType = None,
training: bool = True,
seed: Optional[int] = None,
noise_shape: Optional[Sequence[int]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x = ivy.astype(x, dtype) if dtype and x.dtype != dtype else x
if prob == 0 or not training:
return x
res = tf.nn.dropout(x, prob, noise_shape=noise_shape, seed=seed)
res = tf.multiply(res, (1.0 - prob)) if not scale else res
return res
def dropout1d(
x: Union[tf.Tensor, tf.Variable],
prob: float,
/,
*,
training: bool = True,
data_format: str = "NWC",
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if training:
is_batched = len(x.shape) == 3
if data_format == "NCW":
perm = (0, 2, 1) if is_batched else (1, 0)
x = tf.transpose(x, perm)
res = tf.nn.dropout(x, prob)
if data_format == "NCW":
res = tf.transpose(res, perm)
else:
res = x
return res
def dropout2d(
x: Union[tf.Tensor, tf.Variable],
prob: float,
/,
*,
training: bool = True,
data_format: str = "NHWC",
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if training:
is_batched = len(x.shape) == 4
if data_format == "NCHW":
perm = (0, 2, 3, 1) if is_batched else (1, 2, 0)
x = tf.transpose(x, perm)
res = tf.nn.dropout(x, prob)
if data_format == "NCHW":
perm = (0, 3, 1, 2) if is_batched else (2, 0, 1)
res = tf.transpose(res, perm)
else:
res = x
return res
def dropout3d(
x: Union[tf.Tensor, tf.Variable],
prob: float,
/,
*,
training: bool = True,
data_format: str = "NDHWC",
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if training:
is_batched = len(x.shape) == 5
if data_format == "NCDHW":
perm = (0, 2, 3, 4, 1) if is_batched else (1, 2, 3, 0)
x = tf.transpose(x, perm)
res = tf.nn.dropout(x, prob)
if data_format == "NCDHW":
perm = (0, 4, 1, 2, 3) if is_batched else (3, 0, 1, 2)
res = tf.transpose(res, perm)
else:
res = x
return res
def ifft(
x: Union[tf.Tensor, tf.Variable],
dim: int,
*,
norm: str = "backward",
n: Optional[Union[int, Tuple[int]]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if not isinstance(dim, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(dim)}"
)
if n is None:
n = x.shape[dim]
if n < -len(x.shape):
raise ivy.utils.exceptions.IvyError(
f"Invalid dim {dim}, expecting ranging"
" from {-len(x.shape)} to {len(x.shape)-1} "
)
if not isinstance(n, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(n)}"
)
if n <= 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {n}, expecting more than 1"
)
if norm not in ["backward", "ortho", "forward"]:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
if x.shape[dim] != n:
s = list(x.shape)
if s[dim] > n:
index = [slice(None)] * len(s)
index[dim] = slice(0, n)
x = x[tuple(index)]
del index
else:
s[dim] = n - s[dim]
z = tf.zeros(s, x.dtype)
x = tf.concat([x, z], axis=dim)
del s
operation_name = f"{n} points FFT at dim {dim} with {norm} normalization"
if dim != -1 or dim != len(x.shape) - 1:
permute = [i for i in range(len(x.shape))]
permute[dim], permute[-1] = permute[-1], permute[dim]
x = tf.transpose(x, permute)
ret = tf.signal.ifft(x, operation_name)
ret = tf.transpose(ret, permute)
del permute
else:
ret = tf.signal.ifft(x, operation_name)
ret = _ifft_norm(ret, dim, norm=norm)
return ret
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def embedding(
weights: Union[tf.Tensor, tf.Variable],
indices: Union[tf.Tensor, tf.Variable],
/,
*,
max_norm: Optional[float] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
ivy.utils.assertions.check_equal(
len(weights.shape), 2, message="weights must be 2-d", as_array=False
)
return tf.nn.embedding_lookup(weights, indices, max_norm=max_norm)
def interpolate(
x: Union[tf.Tensor, tf.Variable],
size: Union[Sequence[int], int],
/,
*,
mode: Literal[
"linear",
"bilinear",
"trilinear",
"nd",
"nearest",
"area",
"nearest_exact",
"tf_area",
"tf_bicubic",
"bicubic",
"mitchellcubic",
"lanczos3",
"lanczos5",
"gaussian",
] = "linear",
scale_factor: Optional[Union[Sequence[int], int]] = None,
recompute_scale_factor: Optional[bool] = None,
align_corners: bool = False,
antialias: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
):
input_size = ivy.shape(x)[2:]
dims = len(input_size)
size, _ = _get_size(scale_factor, size, dims, input_size)
if all(a == b for a, b in zip(size, input_size)):
ret = x
else:
remove_dim = False
if mode in ["linear", "tf_area", "lanczos3", "lanczos5", "nearest-exact"]:
if dims == 1:
size = (1,) + tuple(size)
x = tf.expand_dims(x, axis=-2)
dims = 2
remove_dim = True
mode = (
"bilinear"
if mode == "linear"
else (
"area"
if mode == "tf_area"
else "nearest" if mode == "nearest-exact" else mode
)
)
if mode == "tf_bicubic":
mode = "bicubic"
x = tf.transpose(x, (0, *range(2, dims + 2), 1))
ret = tf.transpose(
tf.cast(
tf.image.resize(x, size=size, method=mode, antialias=antialias), x.dtype
),
(0, dims + 1, *range(1, dims + 1)),
)
if remove_dim:
ret = tf.squeeze(ret, axis=-2)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
interpolate.partial_mixed_handler = (
lambda x, *args, mode="linear", recompute_scale_factor=None, align_corners=None, **kwargs: len( # noqa: E501
x.shape
)
< 4
and mode not in ["nearest", "area", "bicubic", "nd"]
and not align_corners
and recompute_scale_factor
)
def _fft2_norm(
x: Union[tf.Tensor, tf.Variable],
s: Optional[Sequence[int]] = None,
dim: Sequence[int] = (-2, -1),
norm: str = "backward",
):
n = tf.constant(s[0] * s[1], dtype=x.dtype)
if norm == "backward":
return x
elif norm == "ortho":
return x / tf.sqrt(n)
elif norm == "forward":
return x / n
else:
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
def trans_x_to_s(
x: Union[tf.Tensor, tf.Variable],
s: Optional[Sequence[int]] = None,
dim: Sequence[int] = (-2, -1),
) -> Union[tf.Tensor, tf.Variable]:
"""Change the shape of the input array x to the desired output shape s."""
if x.dtype not in [tf.complex128, tf.complex64]:
x = tf.cast(x, tf.float32)
x_shape = x.shape
if dim in [(-1, -2), (1, 0)]:
s = (s[1], s[0])
if s[0] >= x_shape[0] and s[1] >= x_shape[1]:
paddings = tf.constant([[0, s[0] - x_shape[0]], [0, s[1] - x_shape[1]]])
x_new = tf.pad(x, paddings=paddings)
elif (s[0] <= x_shape[0] or s[1] <= x_shape[1]) and min(s) > min(x_shape):
x_new = x[: s[0], : s[1]]
if s[0] != x_new.shape[0]:
size = s[0] - x_new.shape[0]
z = tf.zeros((size, s[1]), dtype=x.dtype)
x_new = tf.concat([x_new, z], 0)
elif s[1] != x_new.shape[1]:
size = s[1] - x_new.shape[1]
z = tf.zeros((s[0], size), dtype=x.dtype)
x_new = tf.concat([x_new, z], 1)
elif (s[0] >= x_shape[0] and s[1] <= x_shape[1]) and min(s) <= min(x_shape):
x_new = x[: s[0], : s[1]]
size = s[0] - x_new.shape[0]
z = tf.zeros((size, s[1]), dtype=x.dtype)
x_new = tf.concat([x_new, z], 0)
elif (s[0] < x_shape[0] and s[1] > x_shape[1]) and min(s) == min(x_shape):
x_new = x[: s[0], : s[1]]
size = s[1] - x_new.shape[1]
z = tf.zeros((s[0], size), dtype=x.dtype)
x_new = tf.concat([x_new, z], axis=1)
else:
x_new = x[: s[0], : s[1]]
return x_new
def fft2_operations(x, rank):
if x.shape.rank == 1:
x = tf.signal.fft(x)
elif x.shape.rank == 2:
x = tf.switch_case(
rank - 1, {0: lambda: tf.signal.fft(x), 1: lambda: tf.signal.fft2d(x)}
)
else:
x = tf.switch_case(
rank - 1,
{
0: lambda: tf.signal.fft(x),
1: lambda: tf.signal.fft2d(x),
2: lambda: tf.signal.fft3d(x),
},
)
return x
def _fft2_helper(x, shape, axes):
x = fft_input_validation(tf.convert_to_tensor(x))
input_shape = x.shape
input_rank_tensor = tf.rank(x)
shape_, axes_ = shape_and_axes_validation(shape, axes, input_rank_tensor)
axes = axes_initialization(shape, axes, input_shape, input_rank_tensor)
perform_padding, perform_transpose = perform_actions_initialization(
shape, axes, input_shape, input_rank_tensor
)
shape = shape_initialization(shape, axes, x)
rank = rank_initialization(axes)
x = get_x_after_pad_or_crop(x, shape, axes, perform_padding, input_rank_tensor)
perm = get_perm(input_rank_tensor, axes)
x = transpose_x(x, perm, perform_transpose)
x = fft2_operations(x, rank)
x = transpose_x(x, tf.argsort(perm), perform_transpose)
x = tf.ensure_shape(x, static_output_shape(input_shape, shape_, axes_))
return x
@with_supported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def fft2(
x: Union[tf.Tensor, tf.Variable],
*,
s: Optional[Sequence[int]] = None,
dim: Sequence[int] = (-2, -1),
norm: str = "backward",
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if s is None:
s = (x.shape[dim[0]], x.shape[dim[1]])
if len(x.shape) > 2:
result = _fft2_helper(x, s, dim)
else:
x_new = trans_x_to_s(x, s, dim)
x_complex = tf.cast(x_new, tf.complex128)
result = tf.signal.fft2d(x_complex)
result = _fft2_norm(result, s, dim, norm)
if x.dtype == tf.complex64:
result = tf.cast(result, dtype=tf.complex128)
return result
# --- IFFTN --- #
def fft_input_validation(x):
if not x.dtype.is_complex:
raise TypeError(
f"Invalid FFT input: `x` must be of a complex dtype. Received: {x.dtype}"
)
return x
def shape_and_axes_validation(shape, axes, input_rank_tensor):
if shape is not None:
shape = tf.convert_to_tensor(shape, dtype=tf.dtypes.int32)
checks_shape = [
tf.debugging.assert_less_equal(
tf.size(shape),
input_rank_tensor,
message=(
"Argument `shape` cannot have length greater than the rank of `x`."
f" Received: {shape}"
),
)
]
with tf.control_dependencies(checks_shape):
shape = tf.identity(shape)
if axes is not None:
axes = tf.convert_to_tensor(axes, dtype=tf.dtypes.int32)
checks_axes = [
tf.debugging.assert_less_equal(
tf.size(axes),
input_rank_tensor,
message=(
"Argument `axes` cannot have length greater than the rank of `x`."
f" Received: {axes}"
),
),
tf.debugging.assert_less(
axes,
input_rank_tensor,
message=f"Argument `axes` contains invalid indices. Received: {axes}",
),
tf.debugging.assert_greater_equal(
axes,
-input_rank_tensor,
message=f"Argument `axes` contains invalid indices. Received: {axes}",
),
]
with tf.control_dependencies(checks_axes):
axes = tf.identity(axes)
if shape is not None and axes is not None:
checks_shape_axes = [
tf.debugging.assert_equal(
tf.size(shape),
tf.size(axes),
message=(
"Arguments `shape` and `axes` must have equal length. Received:"
f" {shape}, {axes}"
),
)
]
with tf.control_dependencies(checks_shape_axes):
shape, axes = tf.identity_n([shape, axes])
return shape, axes
def axes_initialization(shape, axes, input_shape, input_rank_tensor):
if axes is None:
axes = (
tf.range(-tf.size(input_shape), 0)
if shape is None
else tf.range(-tf.size(shape), 0)
)
axes = tf.where(tf.math.less(axes, 0), axes + input_rank_tensor, axes)
return axes
def perform_actions_initialization(shape, axes, input_shape, input_rank_tensor):
perform_padding = shape is not None
perform_transpose = tf.math.logical_not(
tf.math.reduce_all(
tf.math.equal(
axes, tf.range(input_rank_tensor - tf.size(axes), input_rank_tensor)
)
)
)
return perform_padding, perform_transpose
def shape_initialization(shape, axes, x):
if shape is None:
shape = tf.gather(tf.shape(x), axes, axis=0)
return shape
def rank_initialization(axes):
rank = tf.size(axes)
with tf.control_dependencies(
[
tf.debugging.assert_less_equal(
rank, 3, message="N-D FFT supported only up to 3-D."
)
]
):
rank = tf.identity(rank)
return rank
def norm_initialization(norm, shape, x):
if norm == "backward":
norm_factor = tf.constant(1, x.dtype)
elif norm in ["forward", "ortho"]:
norm_factor = tf.cast(tf.math.reduce_prod(shape), x.dtype)
if norm == "ortho":
norm_factor = tf.math.sqrt(norm_factor)
return norm_factor
def get_x_after_pad_or_crop(x, shape, axes, perform_padding, input_rank_tensor):
if perform_padding:
pad_shape = -tf.ones([input_rank_tensor], dtype=tf.int32)
pad_shape = tf.tensor_scatter_nd_update(
pad_shape, tf.expand_dims(axes, -1), shape
)
x = _right_pad_or_crop(x, pad_shape)
return x
def get_perm(input_rank_tensor, axes):
all_dims = tf.range(input_rank_tensor, dtype=tf.dtypes.int32)
perm = tf.concat(
[
tf.boolean_mask(
all_dims,
tf.foldl(
lambda acc, elem: tf.math.logical_and(
acc, tf.math.not_equal(all_dims, elem)
),
axes,
initializer=tf.fill(all_dims.shape, True),
),
),
axes,
],
0,
)
return perm
def ifft_operations(x, rank, norm_factor):
if x.shape.rank == 1:
x = tf.signal.ifft(x)
elif x.shape.rank == 2:
x = tf.switch_case(
rank - 1, {0: lambda: tf.signal.ifft(x), 1: lambda: tf.signal.ifft2d(x)}
)
else:
x = tf.switch_case(
rank - 1,
{
0: lambda: tf.signal.ifft(x),
1: lambda: tf.signal.ifft2d(x),
2: lambda: tf.signal.ifft3d(x),
},
)
x = x * norm_factor
return x
def transpose_x(x, perm, perform_transpose):
x = tf.cond(perform_transpose, lambda: tf.transpose(x, perm=perm), lambda: x)
return x
def static_output_shape(input_shape, shape, axes):
output_shape = input_shape.as_list()
if shape is not None:
if axes is None:
axes = list(range(-len(shape), 0))
if isinstance(shape, tf.Tensor):
if isinstance(axes, tf.Tensor):
output_shape = [None] * len(output_shape)
else:
for ax in axes:
output_shape[ax] = None
else:
for idx, ax in enumerate(axes):
output_shape[ax] = shape[idx]
return tf.TensorShape(output_shape)
def _right_pad_or_crop(tensor, shape):
input_shape = tf.shape(tensor)
shape = tf.convert_to_tensor(shape, dtype=tf.dtypes.int32)
with tf.control_dependencies(
[tf.debugging.assert_less_equal(tf.size(shape), tf.size(input_shape))]
):
shape = tf.identity(shape)
shape = tf.concat([input_shape[: tf.size(input_shape) - tf.size(shape)], shape], 0)
pad_sizes = tf.math.maximum(shape - input_shape, 0)
pad_sizes = tf.expand_dims(pad_sizes, -1)
pad_sizes = tf.concat(
[tf.zeros(pad_sizes.shape, dtype=tf.dtypes.int32), pad_sizes], -1
)
tensor = tf.pad(tensor, pad_sizes, constant_values=0)
crop_tensor = tf.zeros(shape.shape, dtype=tf.dtypes.int32)
tensor = tf.slice(tensor, crop_tensor, shape)
return tensor
def _ifftn_helper(x, shape, axes, norm):
x = fft_input_validation(tf.convert_to_tensor(x))
input_shape = x.shape
input_rank_tensor = tf.rank(x)
shape_, axes_ = shape_and_axes_validation(shape, axes, input_rank_tensor)
axes = axes_initialization(shape, axes, input_shape, input_rank_tensor)
perform_padding, perform_transpose = perform_actions_initialization(
shape, axes, input_shape, input_rank_tensor
)
shape = shape_initialization(shape, axes, x)
rank = rank_initialization(axes)
norm_factor = norm_initialization(norm, shape, x)
x = get_x_after_pad_or_crop(x, shape, axes, perform_padding, input_rank_tensor)
perm = get_perm(input_rank_tensor, axes)
x = transpose_x(x, perm, perform_transpose)
x = ifft_operations(x, rank, norm_factor)
x = transpose_x(x, tf.argsort(perm), perform_transpose)
x = tf.ensure_shape(x, static_output_shape(input_shape, shape_, axes_))
return x
def ifftn(
x: Union[tf.Tensor, tf.Variable],
s: Optional[Union[int, Tuple[int]]] = None,
axes: Optional[Union[int, Tuple[int]]] = None,
*,
norm: Optional[str] = "backward",
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
result = _ifftn_helper(x, s, axes, norm)
if out is not None:
out = result
return out
else:
return result
"""
RFFTN Function
"""
def rfft_input_validation(x):
if not x.dtype.is_floating:
raise TypeError(
f"Invalid FFT input: `x` must be of a real dtype. Received: {x.dtype}"
)
return x
def rfft_operations(x, rank, norm_factor):
if x.shape.rank == 1:
x = tf.signal.rfft(x)
elif x.shape.rank == 2:
x = tf.switch_case(
rank - 1, {0: lambda: tf.signal.rfft(x), 1: lambda: tf.signal.rfft2d(x)}
)
else:
x = tf.switch_case(
rank - 1,
{
0: lambda: tf.signal.rfft(x),
1: lambda: tf.signal.rfft2d(x),
2: lambda: tf.signal.rfft3d(x),
},
)
norm_factor = tf.cast(norm_factor, tf.complex128)
x = tf.cast(x, tf.complex128)
x = x / norm_factor
return x
def _rfftn_helper(x, shape, axes, norm):
x = rfft_input_validation(tf.convert_to_tensor(x))
input_shape = x.shape
input_rank_tensor = tf.rank(x)
shape_, axes_ = shape_and_axes_validation(shape, axes, input_rank_tensor)
axes = axes_initialization(shape, axes, input_shape, input_rank_tensor)
perform_padding, perform_transpose = perform_actions_initialization(
shape, axes, input_shape, input_rank_tensor
)
shape = shape_initialization(shape, axes, x)
rank = rank_initialization(axes)
norm_factor = norm_initialization(norm, shape, x)
x = get_x_after_pad_or_crop(x, shape, axes, perform_padding, input_rank_tensor)
perm = get_perm(input_rank_tensor, axes)
x = transpose_x(x, perm, perform_transpose)
x = rfft_operations(x, rank, norm_factor)
x = transpose_x(x, tf.argsort(perm), perform_transpose)
x = tf.ensure_shape(x, static_output_shape(input_shape, shape_, axes_))
return x
def rfft(
x: Union[tf.Tensor, tf.Variable],
/,
*,
n: Optional[int] = None,
axis: int = -1,
norm: Literal["backward", "ortho", "forward"] = "backward",
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
# type cast
if x.dtype in [tf.complex64, tf.complex128]:
x = tf.math.real(x)
if x.dtype not in [tf.float32, tf.float64]:
x = tf.cast(x, tf.float32)
# axis check
if not isinstance(axis, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(axis)}"
)
# axis normalization
naxis = axis
if axis < 0:
naxis = x.ndim + axis
if naxis < 0 or naxis >= x.ndim:
raise ivy.utils.exceptions.IvyError(
f"Axis {axis} is out of bounds for array of dimension {x.ndim}"
)
axis = naxis
# n checks
if n is None:
n = x.shape[axis]
if not isinstance(n, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(n)}"
)
if n < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid number of FFT data points ({n}) specified."
)
# norm check & value
if norm == "backward":
inv_norm = tf.constant(1, dtype=x.dtype)
elif norm in ["forward", "ortho"]:
inv_norm = tf.cast(tf.math.reduce_prod(n), dtype=x.dtype)
if norm == "ortho":
inv_norm = tf.math.sqrt(inv_norm)
else:
raise ivy.utils.exceptions.IvyError(
f'Invalid norm value {norm}; should be "backward", "ortho" or "forward".'
)
fct = 1 / inv_norm
if x.shape[axis] != n:
s = list(x.shape)
if s[axis] > n:
index = [slice(None)] * len(s)
index[axis] = slice(0, n)
x = x[tuple(index)]
else:
s[axis] = n - s[axis]
z = tf.zeros(s, x.dtype)
x = tf.concat([x, z], axis=axis)
if axis == x.ndim - 1:
ret = tf.signal.rfft(x, fft_length=None, name=None)
else:
x = tf.experimental.numpy.swapaxes(x, axis, -1)
ret = tf.signal.rfft(x, fft_length=None, name=None)
ret = tf.experimental.numpy.swapaxes(ret, axis, -1)
ret *= tf.cast(fct, dtype=ret.dtype)
if x.dtype != tf.float64:
ret = tf.cast(ret, dtype=tf.complex64)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
@with_supported_device_and_dtypes(
{
"2.5.0 and above": {
"cpu": (
"float32",
"float64",
"complex128",
)
}
},
backend_version,
)
def rfftn(
x: Union[tf.Tensor, tf.Variable],
s: Optional[Union[int, Tuple[int]]] = None,
axes: Optional[Union[int, Tuple[int]]] = None,
*,
norm: str = "backward",
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
result = _rfftn_helper(x, s, axes, norm)
if out is not None:
out = tf.cast(result, tf.complex128)
# out = result
return out
else:
# return result
return tf.cast(result, tf.complex128)
# stft
@with_supported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def stft(
signals: Union[tf.Tensor, tf.Variable],
frame_length: int,
frame_step: int,
/,
*,
fft_length: Optional[int] = None,
window_fn: Optional[Callable] = None,
pad_end: Optional[bool] = False,
name: Optional[str] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if not isinstance(frame_length, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(frame_length)}"
)
if frame_length < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {frame_length}, expecting frame_length larger than or"
" equal to 1"
)
if not isinstance(frame_step, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(frame_step)}"
)
if frame_step < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {frame_length}, expecting frame_length larger than or"
" equal to 1"
)
if fft_length is not None:
if not isinstance(fft_length, int):
raise ivy.utils.exceptions.IvyError(
f"Expecting <class 'int'> instead of {type(fft_length)}"
)
if fft_length < 1:
raise ivy.utils.exceptions.IvyError(
f"Invalid data points {frame_length}, expecting frame_length larger"
" than or equal to 1"
)
result = tf.signal.stft(
signals,
frame_length,
frame_step,
fft_length=fft_length,
window_fn=window_fn,
pad_end=pad_end,
name=name,
)
if out is not None:
return out
else:
return result
def _to_4d(x):
t = x # Start with the original tensor
while len(t.shape) < 4: # Continue expanding dimensions until 4D
t = tf.expand_dims(t, axis=0)
return t
def sliding_window(
input: Union[tf.Tensor, tf.Variable],
kernel_size: Union[int, Tuple[int, int]],
/,
*,
stride: Union[int, Tuple[int, int]] = 1,
dilation: Union[int, Tuple[int, int]] = 1,
padding: Union[str, int, Tuple[int, int]] = "VALID",
) -> Union[tf.Tensor, tf.Variable]:
if len(input.shape) != 4:
input = _to_4d(input)
input = tf.transpose(input, (0, 2, 3, 1))
kernel_size = (
[1]
+ ([kernel_size] * 2 if isinstance(kernel_size, int) else list(kernel_size))
+ [1]
)
if len(kernel_size) < 4:
kernel_size.append(1)
stride = [1] + ([stride] * 2 if isinstance(stride, int) else list(stride)) + [1]
if len(stride) < 4:
stride.append(1)
dilation = (
[1] + ([dilation] * 2 if isinstance(dilation, int) else list(dilation)) + [1]
)
if len(dilation) < 4:
dilation.append(1)
padding = [padding] * 2 if isinstance(padding, int) else padding
if isinstance(padding, str) and padding.upper() in ["VALID", "SAME"]:
padding = padding
elif padding[0] == padding[1] == 0:
padding = "VALID"
elif padding[0] == padding[1] != 0:
padding = "SAME"
else:
raise ivy.utils.exceptions.IvyError(
f"Cannot convert padding sequence {padding} to TensorFlow padding mode"
)
return tf.image.extract_patches(
images=input, sizes=kernel_size, strides=stride, rates=dilation, padding=padding
)
def rnn(
step_function,
inputs,
initial_states,
/,
*,
go_backwards: bool = False,
mask: Optional[Union[tf.Tensor, tf.Variable]] = None,
constants: Optional[Union[tf.Tensor, tf.Variable]] = None,
unroll: bool = False,
input_length: Optional[int] = None,
time_major: bool = False,
zero_output_for_mask: bool = False,
return_all_outputs: bool = True,
):
step_function = inputs_to_ivy_arrays(output_to_native_arrays(step_function))
return tf.keras.backend.rnn(
step_function,
inputs,
initial_states,
go_backwards=go_backwards,
mask=mask,
constants=constants,
unroll=unroll,
input_length=input_length,
time_major=time_major,
zero_output_for_mask=zero_output_for_mask,
return_all_outputs=return_all_outputs,
)
| ivy/ivy/functional/backends/tensorflow/experimental/layers.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/experimental/layers.py",
"repo_id": "ivy",
"token_count": 27661
} | 23 |
# global
import math
from numbers import Number
from typing import Union, Tuple, Optional, List, Sequence
import numpy as np
import tensorflow as tf
# local
import ivy
# noinspection PyProtectedMember
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.ivy.manipulation import _calculate_out_shape
from . import backend_version
def _reshape_fortran_tf(x, shape):
if len(x.shape) > 0:
x = tf.transpose(x)
return tf.transpose(tf.reshape(x, shape[::-1]))
# Array API Standard #
# -------------------#
def concat(
xs: Union[Tuple[tf.Tensor, ...], List[tf.Tensor]],
/,
*,
axis: int = 0,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if axis is not None:
try:
return tf.concat(xs, axis)
except tf.errors.InvalidArgumentError as error:
if "(zero-based) was expected to be" in error.message:
highest_dtype = xs[0].dtype
for i in xs:
highest_dtype = ivy.promote_types(highest_dtype, i.dtype)
highest_dtype = ivy.as_native_dtype(highest_dtype)
return tf.concat(
[
tf.cast(x, highest_dtype) if x.dtype != highest_dtype else x
for x in xs
],
axis,
)
else:
raise
return concat([tf.reshape(x, -1) for x in xs], axis=0)
def expand_dims(
x: Union[tf.Tensor, tf.Variable],
/,
*,
copy: Optional[bool] = None,
axis: Union[int, Sequence[int]] = 0,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
try:
out_shape = _calculate_out_shape(axis, x.shape)
ret = tf.reshape(x, shape=out_shape)
return ret
except (tf.errors.InvalidArgumentError, np.AxisError) as error:
raise ivy.utils.exceptions.IvyIndexError(error) from error
def flip(
x: Union[tf.Tensor, tf.Variable],
/,
*,
copy: Optional[bool] = None,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
num_dims = len(x.shape)
if not num_dims:
ret = x
else:
if axis is None:
new_axis = list(range(num_dims))
else:
new_axis = axis
if isinstance(new_axis, int):
new_axis = [new_axis]
else:
new_axis = new_axis
new_axis = [item + num_dims if item < 0 else item for item in new_axis]
ret = tf.reverse(x, new_axis)
return ret
def permute_dims(
x: Union[tf.Tensor, tf.Variable],
/,
axes: Tuple[int, ...],
*,
copy: Optional[bool] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.transpose(x, perm=axes)
@with_unsupported_dtypes({"2.15.0 and below": ("bool",)}, backend_version)
def reshape(
x: Union[tf.Tensor, tf.Variable],
/,
shape: Union[ivy.NativeShape, Sequence[int]],
*,
copy: Optional[bool] = None,
order: str = "C",
allowzero: bool = True,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
ivy.utils.assertions.check_elem_in_list(order, ["C", "F"])
if not allowzero:
shape = [
new_s if con else old_s
for new_s, con, old_s in zip(shape, tf.constant(shape) != 0, x.shape)
]
if order == "F":
return _reshape_fortran_tf(x, shape)
return tf.reshape(x, shape)
def roll(
x: Union[tf.Tensor, tf.Variable],
/,
shift: Union[int, Sequence[int]],
*,
axis: Optional[Union[int, Sequence[int]]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if axis is None:
originalShape = x.shape
axis = 0
x = tf.reshape(x, [-1])
roll = tf.roll(x, shift, axis)
ret = tf.reshape(roll, originalShape)
else:
if isinstance(shift, int) and (type(axis) in [list, tuple]):
shift = [shift for _ in range(len(axis))]
ret = tf.roll(x, shift, axis)
return ret
def squeeze(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
copy: Optional[bool] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if isinstance(axis, int):
if ivy.any(x.shape[axis] > 1):
raise ValueError(f"{x.shape[axis]} must be lesser than or equal to 1")
ret = tf.squeeze(x, axis)
elif axis is None:
ret = tf.squeeze(x)
else:
if isinstance(axis, tuple):
axis = list(axis)
normalise_axis = [
(len(x.shape) - abs(element)) if element < 0 else element
for element in axis
]
normalise_axis.sort()
axis_updated_after_squeeze = [
dim - key for (key, dim) in enumerate(normalise_axis)
]
for i in axis_updated_after_squeeze:
if x.shape[i] > 1:
raise ValueError(
"Expected dimension of size 1, but found dimension size"
f" {x.shape[i]}"
)
else:
x = tf.squeeze(x, i)
ret = x
return ret
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16",)}, backend_version)
def stack(
arrays: Union[Tuple[tf.Tensor], List[tf.Tensor]],
/,
*,
axis: int = 0,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
try:
return tf.experimental.numpy.stack(arrays, axis)
except ValueError as e:
raise ivy.utils.exceptions.IvyIndexError(e) from e
# Extra #
# ------#
def split(
x: Union[tf.Tensor, tf.Variable],
/,
*,
copy: Optional[bool] = None,
num_or_size_splits: Optional[
Union[int, Sequence[int], Union[tf.Tensor, tf.Variable]]
] = None,
axis: int = 0,
with_remainder: bool = False,
) -> Union[tf.Tensor, tf.Variable]:
if x.shape == ():
if num_or_size_splits is not None and num_or_size_splits != 1:
raise ivy.utils.exceptions.IvyException(
"input array had no shape, but num_sections specified was"
f" {num_or_size_splits}"
)
return [x]
if num_or_size_splits is None:
dim_size = tf.shape(x)[axis]
num_or_size_splits = int(dim_size)
if isinstance(num_or_size_splits, (tf.Tensor, tf.Variable)):
num_or_size_splits = tf.cast(num_or_size_splits, tf.int32)
elif isinstance(num_or_size_splits, int) and with_remainder:
num_chunks = x.shape[axis] / num_or_size_splits
num_chunks_int = math.floor(num_chunks)
remainder = num_chunks - num_chunks_int
if remainder != 0:
num_or_size_splits = [num_or_size_splits] * num_chunks_int + [
int(remainder * num_or_size_splits)
]
return tf.split(x, num_or_size_splits, axis)
def repeat(
x: Union[tf.Tensor, tf.Variable],
/,
repeats: Union[int, List[int]],
*,
axis: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.repeat(x, repeats, axis)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"uint8",
"uint16",
"uint32",
"int8",
"int16",
)
},
backend_version,
)
def tile(
x: Union[tf.Tensor, tf.Variable],
/,
repeats: Sequence[int],
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if x.shape == ():
x = tf.reshape(x, (-1,))
if isinstance(repeats, Number):
repeats = [repeats]
if isinstance(repeats, tf.Tensor) and repeats.shape == ():
repeats = tf.reshape(repeats, (-1,))
# code to unify behaviour with numpy and torch
if len(x.shape) < len(repeats):
while len(x.shape) != len(repeats):
x = tf.expand_dims(x, 0)
elif len(x.shape) > len(repeats):
repeats = list(repeats)
while len(x.shape) != len(repeats):
repeats = [1] + repeats
# TODO remove the unifying behaviour code if tensorflow handles this
# https://github.com/tensorflow/tensorflow/issues/58002
return tf.tile(x, repeats)
def constant_pad(
x, /, pad_width, *, value=0, out: Optional[Union[tf.Tensor, tf.Variable]] = None
):
if x.shape == ():
x = tf.reshape(x, (-1,))
return tf.pad(x, pad_width, constant_values=value)
def zero_pad(x, /, pad_width, *, out: Optional[Union[tf.Tensor, tf.Variable]] = None):
if x.shape == ():
x = tf.reshape(x, (-1,))
return tf.pad(x, pad_width)
def swapaxes(
x,
axis0,
axis1,
/,
*,
copy: Optional[bool] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
):
x_shape = x.shape
num_dims = len(x_shape)
axis0 %= num_dims
axis1 %= num_dims
config = list(range(num_dims))
config.pop(axis0)
config.insert(axis0, axis1)
config.pop(axis1)
config.insert(axis1, axis0)
return tf.transpose(x, config)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def clip(
x: Union[tf.Tensor, tf.Variable],
/,
x_min: Optional[Union[Number, tf.Tensor, tf.Variable]] = None,
x_max: Optional[Union[Number, tf.Tensor, tf.Variable]] = None,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if x_min is None and x_max is None:
raise ValueError("At least one of the x_min or x_max must be provided")
promoted_type = x.dtype
if x_min is not None:
if not hasattr(x_min, "dtype"):
x_min = ivy.array(x_min).data
promoted_type = ivy.as_native_dtype(ivy.promote_types(x.dtype, x_min.dtype))
if x_max is not None:
if not hasattr(x_max, "dtype"):
x_max = ivy.array(x_max).data
promoted_type = ivy.as_native_dtype(
ivy.promote_types(promoted_type, x_max.dtype)
)
x_max = tf.cast(x_max, promoted_type)
x = tf.cast(x, promoted_type)
if x_min is not None:
x_min = tf.cast(x_min, promoted_type)
cond = True
if x_min is not None and x_max is not None:
if tf.math.reduce_any(tf.experimental.numpy.greater(x_min, x_max)):
cond = False
if cond:
return tf.experimental.numpy.clip(x, x_min, x_max)
else:
return tf.experimental.numpy.minimum(
x_max, tf.experimental.numpy.maximum(x, x_min)
)
def unstack(
x: Union[tf.Tensor, tf.Variable],
/,
*,
copy: Optional[bool] = None,
axis: int = 0,
keepdims: bool = False,
) -> List[tf.Tensor]:
if x.shape == ():
return [x]
ret = tf.unstack(x, axis=axis)
if keepdims:
return [tf.expand_dims(r, axis) for r in ret]
return ret
| ivy/ivy/functional/backends/tensorflow/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/manipulation.py",
"repo_id": "ivy",
"token_count": 5311
} | 24 |
# global
import copy
from numbers import Number
from typing import Union, List, Optional, Sequence, Tuple
import numpy as np
import torch
from torch import Tensor
# local
import ivy
from ivy.func_wrapper import (
with_unsupported_dtypes,
with_unsupported_device_and_dtypes,
)
from ivy.functional.ivy.creation import (
_asarray_to_native_arrays_and_back,
_asarray_infer_device,
_asarray_infer_dtype,
_asarray_handle_nestable,
NestedSequence,
SupportsBufferProtocol,
_asarray_inputs_to_native_shapes,
_remove_np_bfloat16,
)
from . import backend_version
# noinspection PyProtectedMember
# Array API Standard #
# -------------------#
def _differentiable_linspace(start, stop, num, *, device, dtype=None):
if num == 1:
return torch.unsqueeze(start, 0)
n_m_1 = num - 1
increment = (stop - start) / n_m_1
increment_tiled = increment.repeat(n_m_1)
increments = increment_tiled * torch.linspace(
1, n_m_1, n_m_1, device=device, dtype=dtype
)
res = torch.cat(
(torch.unsqueeze(torch.tensor(start, dtype=dtype), 0), start + increments), 0
)
return res
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, backend_version)
def arange(
start: float,
/,
stop: Optional[float] = None,
step: float = 1,
*,
dtype: Optional[Union[ivy.Dtype, torch.dtype]] = None,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if stop is None:
stop = start
start = 0
if (step > 0 and start > stop) or (step < 0 and start < stop):
if isinstance(stop, float):
stop = float(start)
else:
stop = start
if dtype is None:
if isinstance(start, int) and isinstance(stop, int) and isinstance(step, int):
return torch.arange(start, stop, step, dtype=torch.int64, device=device).to(
torch.int32
)
else:
return torch.arange(start, stop, step, device=device)
else:
dtype = ivy.as_native_dtype(ivy.default_dtype(dtype=dtype))
return torch.arange(start, stop, step, dtype=dtype, device=device)
arange.support_native_out = True
def _stack_tensors(x, dtype):
if isinstance(x, (list, tuple)) and len(x) != 0 and isinstance(x[0], (list, tuple)):
for i, item in enumerate(x):
x[i] = _stack_tensors(item, dtype)
x = torch.stack(x)
else:
if isinstance(x, (list, tuple)):
if isinstance(x[0], torch.Tensor):
x = torch.stack([torch.as_tensor(i, dtype=dtype) for i in x])
else:
x = torch.as_tensor(x, dtype=dtype)
return x
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, backend_version)
@_asarray_to_native_arrays_and_back
@_asarray_infer_device
@_asarray_handle_nestable
@_asarray_inputs_to_native_shapes
@_asarray_infer_dtype
def asarray(
obj: Union[
torch.Tensor,
np.ndarray,
torch.Size,
bool,
int,
float,
NestedSequence,
SupportsBufferProtocol,
],
/,
*,
copy: Optional[bool] = None,
dtype: Optional[Union[ivy.Dtype, torch.dtype]] = None,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
obj = ivy.nested_map(_remove_np_bfloat16, obj, shallow=False)
if isinstance(obj, Sequence) and len(obj) != 0:
contain_tensor = ivy.nested_any(obj, lambda x: isinstance(x, torch.Tensor))
# if `obj` is a list of specifically tensors or
# a multidimensional list which contains a tensor
if contain_tensor:
ret = _stack_tensors(obj, dtype).to(device)
return ret.clone().detach() if copy else ret
try:
ret = torch.as_tensor(obj, dtype=dtype, device=device)
except ValueError as e:
if "At least one stride in the given numpy array is negative" in str(e):
ret = torch.as_tensor(obj.copy(), dtype=dtype, device=device)
else:
raise
return ret.clone().detach() if copy else ret
def empty(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: torch.dtype,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.empty(
shape,
dtype=dtype,
device=device,
out=out,
)
empty.support_native_out = True
def empty_like(
x: torch.Tensor,
/,
*,
dtype: torch.dtype,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.empty_like(x, dtype=dtype, device=device)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, backend_version)
def eye(
n_rows: int,
n_cols: Optional[int] = None,
/,
*,
k: int = 0,
batch_shape: Optional[Union[int, Sequence[int]]] = None,
dtype: torch.dtype,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if n_cols is None:
n_cols = n_rows
if batch_shape is None:
batch_shape = []
i = torch.eye(n_rows, n_cols, dtype=dtype, device=device)
reshape_dims = [1] * len(batch_shape) + [n_rows, n_cols]
tile_dims = list(batch_shape) + [1, 1]
return_mat = torch.reshape(i, reshape_dims).repeat(tile_dims)
# k=index of the diagonal. A positive value refers to an upper diagonal,
# a negative value to a lower diagonal, and 0 to the main diagonal.
# Default: ``0``.
# value of k ranges from -n_rows < k < n_cols
if k == 0: # refers to the main diagonal
ret = return_mat
# when k is negative
elif -n_rows < k < 0:
mat = torch.concat(
[
torch.zeros([-k, n_cols], dtype=dtype, device=device, out=out),
i[: n_rows + k],
],
0,
)
ret = torch.reshape(mat, reshape_dims).repeat(tile_dims)
# when k is positive
elif 0 < k < n_cols:
mat = torch.concat(
[
torch.zeros([n_rows, k], dtype=dtype, device=device),
i[:, : n_cols - k],
],
1,
)
ret = torch.reshape(mat, reshape_dims).repeat(tile_dims)
if out is not None:
return ivy.inplace_update(out, ret)
else:
ret = torch.zeros(
batch_shape + [n_rows, n_cols], dtype=dtype, device=device, out=out
)
return ret
eye.support_native_out = True
def to_dlpack(x, /, *, out: Optional[torch.Tensor] = None):
return torch.to_dlpack(x)
def from_dlpack(x, /, *, out: Optional[torch.Tensor] = None):
return torch.from_dlpack(x)
def full(
shape: Union[ivy.NativeShape, Sequence[int]],
fill_value: Union[int, float, bool],
*,
dtype: Optional[Union[ivy.Dtype, torch.dtype]] = None,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> Tensor:
dtype = ivy.default_dtype(dtype=dtype, item=fill_value, as_native=True)
if isinstance(shape, int):
shape = (shape,)
return torch.full(
shape,
fill_value,
dtype=dtype,
device=device,
out=out,
)
full.support_native_out = True
def full_like(
x: torch.Tensor,
/,
fill_value: Number,
*,
dtype: torch.dtype,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.full_like(x, fill_value, dtype=dtype, device=device)
def _slice_at_axis(sl, axis):
return (slice(None),) * axis + (sl,) + (...,)
@with_unsupported_device_and_dtypes(
{"2.2 and below": {"cpu": ("float16",)}}, backend_version
)
def linspace(
start: Union[torch.Tensor, float],
stop: Union[torch.Tensor, float],
/,
num: int,
*,
axis: Optional[int] = None,
endpoint: bool = True,
dtype: torch.dtype,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if axis is None:
axis = -1
if not endpoint:
if dtype is not None:
ans = linspace_helper(
start, stop, num + 1, axis, dtype=dtype, device=device
)
else:
ans = linspace_helper(start, stop, num + 1, axis, device=device)
if axis < 0:
axis += len(ans.shape)
ans = ans[_slice_at_axis(slice(None, -1), axis)]
else:
if dtype is not None:
ans = linspace_helper(start, stop, num, axis, dtype=dtype, device=device)
else:
ans = linspace_helper(start, stop, num, axis, device=device)
if (
endpoint
and ans.shape[0] > 1
and (not isinstance(start, torch.Tensor))
and (not isinstance(stop, torch.Tensor))
):
ans[-1] = stop
if (
ans.shape[0] >= 1
and (not isinstance(start, torch.Tensor))
and (not isinstance(stop, torch.Tensor))
and ans[0] != start
):
ans[0] = start
if "int" in str(dtype) and torch.is_floating_point(ans):
ans = torch.floor(ans)
return ans.to(dtype)
linspace.support_native_out = True
def linspace_helper(start, stop, num, axis=None, *, dtype=None, device):
num = num.detach().numpy().item() if isinstance(num, torch.Tensor) else num
start_is_array = isinstance(start, torch.Tensor)
stop_is_array = isinstance(stop, torch.Tensor)
linspace_method = torch.linspace
sos_shape = []
if start_is_array:
start_shape = list(start.shape)
sos_shape = start_shape
if num == 1:
if axis is not None:
return start.unsqueeze(axis).to(device)
else:
return start.unsqueeze(-1).to(device)
start = start.reshape((-1,))
linspace_method = (
_differentiable_linspace if start.requires_grad else torch.linspace
)
if stop_is_array:
stop_shape = list(stop.shape)
sos_shape = stop_shape
if num == 1:
return (
torch.ones(
stop_shape[:axis] + [1] + stop_shape[axis:],
device=device,
)
* start
)
stop = stop.reshape((-1,))
linspace_method = (
_differentiable_linspace if stop.requires_grad else torch.linspace
)
if start_is_array and stop_is_array:
if num < start.shape[0]:
start = start.unsqueeze(-1)
stop = stop.unsqueeze(-1)
diff = stop - start
inc = diff / (num - 1)
res = [start]
res += [start + inc * i for i in range(1, num - 1)]
res.append(stop)
else:
res = [
linspace_method(strt, stp, num, device=device)
for strt, stp in zip(start, stop)
]
elif start_is_array and not stop_is_array:
if num < start.shape[0]:
start = start.unsqueeze(-1)
diff = stop - start
inc = diff / (num - 1)
res = [start]
res += [start + inc * i for i in range(1, num - 1)]
res.append(torch.ones_like(start, device=device) * stop)
else:
res = [linspace_method(strt, stop, num, device=device) for strt in start]
elif not start_is_array and stop_is_array:
if num < stop.shape[0]:
stop = stop.unsqueeze(-1)
diff = stop - start
inc = diff / (num - 1)
res = [torch.ones_like(stop, device=device) * start]
res += [start + inc * i for i in range(1, num - 1)]
res.append(stop)
else:
res = [linspace_method(start, stp, num, device=device) for stp in stop]
else:
return linspace_method(start, stop, num, dtype=dtype, device=device)
res = torch.cat(res, -1).reshape(sos_shape + [num])
if axis is not None:
ndim = res.ndim
perm = list(range(0, ndim - 1))
perm.insert(axis % (ndim + 1), ndim - 1)
res = res.permute(perm)
return res.to(device)
def meshgrid(
*arrays: torch.Tensor,
sparse: bool = False,
indexing: str = "xy",
out: Optional[torch.Tensor] = None,
) -> List[torch.Tensor]:
if not sparse:
return list(torch.meshgrid(*arrays, indexing=indexing))
sd = (1,) * len(arrays)
res = [
torch.reshape(torch.as_tensor(a), (sd[:i] + (-1,) + sd[i + 1 :]))
for i, a in enumerate(arrays)
]
if indexing == "xy" and len(arrays) > 1:
res[0] = torch.reshape(res[0], (1, -1) + sd[2:])
res[1] = torch.reshape(res[1], (-1, 1) + sd[2:])
return res
def ones(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: torch.dtype,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.ones(shape, dtype=dtype, device=device, out=out)
ones.support_native_out = True
def ones_like_v_0p4p0_and_above(
x: torch.Tensor,
/,
*,
dtype: torch.dtype,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.ones_like(x, dtype=dtype, device=device)
def ones_like_v_0p3p0_to_0p3p1(
x: torch.Tensor,
/,
*,
dtype: torch.dtype,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.ones_like(x, out=out)
def ones_like_v_0p1p12_to_0p2p0(
x: torch.Tensor,
/,
*,
dtype: torch.dtype,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
):
if len(x.shape) == 1:
for i in range(x.shape[0]):
x[i] = 1
return x
for i in range(x.shape[0]):
x[i, :] = ones_like_v_0p1p12_to_0p2p0(x[i, :], dtype=dtype)
return x
def tril(
x: torch.Tensor, /, *, k: int = 0, out: Optional[torch.Tensor] = None
) -> torch.Tensor:
return torch.tril(x, diagonal=k, out=out)
tril.support_native_out = True
def triu(
x: torch.Tensor, /, *, k: int = 0, out: Optional[torch.Tensor] = None
) -> torch.Tensor:
return torch.triu(x, diagonal=k, out=out)
triu.support_native_out = True
def zeros(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: torch.dtype,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> Tensor:
return torch.zeros(shape, dtype=dtype, device=device, out=out)
zeros.support_native_out = True
def zeros_like(
x: torch.Tensor,
/,
*,
dtype: torch.dtype,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.zeros_like(x, dtype=dtype, device=device)
# Extra #
# ------#
array = asarray
def copy_array(
x: torch.Tensor,
*,
to_ivy_array: bool = True,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if to_ivy_array:
return ivy.to_ivy(x.clone())
return x.clone()
def one_hot(
indices: torch.Tensor,
depth: int,
/,
*,
on_value: Optional[torch.Tensor] = None,
off_value: Optional[torch.Tensor] = None,
axis: Optional[int] = None,
dtype: Optional[torch.dtype] = None,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
on_none = on_value is None
off_none = off_value is None
if dtype is None:
if on_none and off_none:
dtype = torch.float32
else:
if not on_none:
dtype = torch.tensor(on_value).dtype
elif not off_none:
dtype = torch.tensor(off_value).dtype
else:
dtype = ivy.as_native_dtype(dtype)
on_value = torch.tensor(1.0) if on_none else torch.tensor(on_value, dtype=dtype)
off_value = torch.tensor(0.0) if off_none else torch.tensor(off_value, dtype=dtype)
res = torch.nn.functional.one_hot(indices.to(torch.int64), depth)
if not on_none or not off_none:
res = torch.where(res == 1, on_value, off_value)
if axis is not None:
res = torch.moveaxis(res, -1, axis)
return res.to(device, dtype)
def frombuffer(
buffer: bytes,
dtype: Optional[torch.dtype] = float,
count: Optional[int] = -1,
offset: Optional[int] = 0,
) -> torch.Tensor:
buffer_copy = copy.deepcopy(buffer)
dtype = ivy.as_native_dtype(dtype)
return torch.frombuffer(buffer_copy, dtype=dtype, count=count, offset=offset)
def triu_indices(
n_rows: int,
n_cols: Optional[int] = None,
k: int = 0,
/,
*,
device: torch.device = None,
) -> Tuple[torch.Tensor]:
n_cols = n_rows if n_cols is None else n_cols
return tuple(
torch.triu_indices(
row=n_rows, col=n_cols, offset=k, dtype=torch.int64, device=device
)
)
| ivy/ivy/functional/backends/torch/creation.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/creation.py",
"repo_id": "ivy",
"token_count": 7940
} | 25 |
# global
from typing import (
Iterable,
Optional,
Union,
Sequence,
Tuple,
NamedTuple,
List,
Literal,
Callable,
Any,
)
from numbers import Number
from collections import namedtuple
import torch
# local
from ivy.func_wrapper import (
with_unsupported_dtypes,
with_supported_dtypes,
handle_out_argument,
)
from .. import backend_version
import ivy
from ivy.functional.ivy.experimental.manipulation import (
_to_tf_padding,
_check_paddle_pad,
_to_paddle_padding,
)
def moveaxis(
a: torch.Tensor,
source: Union[int, Sequence[int]],
destination: Union[int, Sequence[int]],
/,
*,
copy: Optional[bool] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.moveaxis(a, source, destination)
moveaxis.support_native_out = False
def heaviside(
x1: torch.tensor,
x2: torch.tensor,
/,
*,
out: Optional[torch.tensor] = None,
) -> torch.tensor:
return torch.heaviside(
x1,
x2,
out=out,
)
heaviside.support_native_out = True
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "complex64", "complex128")},
backend_version,
)
def pad(
input: torch.Tensor,
pad_width: Union[Iterable[Tuple[int]], int],
/,
*,
mode: Union[
Literal[
"constant",
"dilated",
"edge",
"linear_ramp",
"maximum",
"mean",
"median",
"minimum",
"reflect",
"symmetric",
"wrap",
"empty",
],
Callable,
] = "constant",
stat_length: Union[Iterable[Tuple[int]], int] = 1,
constant_values: Union[Iterable[Tuple[Number]], Number] = 0,
end_values: Union[Iterable[Tuple[Number]], Number] = 0,
reflect_type: Literal["even", "odd"] = "even",
**kwargs: Optional[Any],
) -> torch.Tensor:
constant_values = (
float(constant_values)
if not isinstance(constant_values, float)
else constant_values
)
pad_width = _to_paddle_padding(pad_width, input.ndim)
mode = "replicate" if mode == "edge" else "circular" if mode == "wrap" else mode
if mode == "circular":
return (
torch.nn.functional.pad(
input.unsqueeze(0).unsqueeze(0),
tuple(pad_width),
mode=mode,
)
.squeeze(0)
.squeeze(0)
)
elif mode == "constant":
return torch.nn.functional.pad(
input.unsqueeze(0),
tuple(pad_width),
mode=mode,
value=constant_values,
).squeeze(0)
else:
return torch.nn.functional.pad(
input.unsqueeze(0),
tuple(pad_width),
mode=mode,
).squeeze(0)
pad.partial_mixed_handler = (
lambda *args, mode="constant", constant_values=0, reflect_type="even", **kwargs: (
_check_torch_pad(mode, reflect_type, args[1], args[0].shape, constant_values)
)
)
def _check_torch_pad(mode, reflect_type, pad_width, input_shape, constant_values):
ndim = len(input_shape)
pad_width = _to_tf_padding(pad_width, ndim)
if mode != "constant" and (ndim > 4 or (ndim > 1 and len(pad_width) > ndim - 1)):
return False
return _check_paddle_pad(
mode,
reflect_type,
pad_width,
input_shape,
constant_values,
4,
extend=False,
) and (
mode != "wrap"
or all(
pad_width[i][0] <= s and pad_width[i][1] <= s
for i, s in enumerate(input_shape)
)
)
def flipud(
m: torch.Tensor,
/,
*,
copy: Optional[bool] = None,
out: Optional[torch.tensor] = None,
) -> torch.tensor:
return torch.flipud(m)
flipud.support_native_out = False
def vstack(
arrays: Sequence[torch.Tensor],
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if not isinstance(arrays, tuple):
arrays = tuple(arrays)
return torch.vstack(arrays, out=None)
def hstack(
arrays: Sequence[torch.Tensor],
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if not isinstance(arrays, tuple):
arrays = tuple(arrays)
return torch.hstack(arrays, out=None)
def rot90(
m: torch.Tensor,
/,
*,
copy: Optional[bool] = None,
k: int = 1,
axes: Tuple[int, int] = (0, 1),
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.rot90(m, k, axes)
def top_k(
x: torch.Tensor,
k: int,
/,
*,
axis: int = -1,
largest: bool = True,
sorted: bool = True,
out: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
k = min(k, x.shape[axis])
topk_res = NamedTuple(
"top_k", [("values", torch.Tensor), ("indices", torch.Tensor)]
)
if not largest:
indices = torch.argsort(x, dim=axis)
indices = torch.index_select(indices, axis, torch.arange(k))
else:
indices = torch.argsort(-x, dim=axis)
indices = torch.index_select(indices, axis, torch.arange(k))
if not sorted:
indices = torch.sort(indices, dim=axis)[0]
val = torch.gather(x, axis, indices)
return topk_res(val, indices)
def fliplr(
m: torch.Tensor,
/,
*,
copy: Optional[bool] = None,
out: Optional[torch.tensor] = None,
) -> torch.tensor:
return torch.fliplr(m)
fliplr.support_native_out = False
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def i0(
x: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.i0(x, out=out)
i0.support_native_out = True
def flatten(
x: torch.Tensor,
/,
*,
copy: Optional[bool] = None,
start_dim: Optional[int] = 0,
end_dim: Optional[int] = -1,
order: Optional[str] = "C",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.flatten(x, start_dim=start_dim, end_dim=end_dim)
flatten.partial_mixed_handler = (
lambda *args, copy=None, start_dim=0, end_dim=1, order="C", **kwargs: order == "C"
)
def vsplit(
ary: torch.Tensor,
indices_or_sections: Union[int, Sequence[int], torch.Tensor],
/,
*,
copy: Optional[bool] = None,
) -> List[torch.Tensor]:
if len(ary.shape) < 2:
raise ivy.utils.exceptions.IvyError(
"vsplit only works on arrays of 2 or more dimensions"
)
return ivy.split(ary, num_or_size_splits=indices_or_sections, axis=0)
def dsplit(
ary: torch.Tensor,
indices_or_sections: Union[int, Sequence[int], torch.Tensor],
/,
*,
copy: Optional[bool] = None,
) -> List[torch.Tensor]:
if len(ary.shape) < 2:
raise ivy.utils.exceptions.IvyError(
"dsplit only works on arrays of 3 or more dimensions"
)
return ivy.split(ary, num_or_size_splits=indices_or_sections, axis=2)
def atleast_1d(*arys: torch.Tensor, copy: Optional[bool] = None) -> List[torch.Tensor]:
transformed = torch.atleast_1d(*arys)
if isinstance(transformed, tuple):
return list(transformed)
return transformed
def dstack(
arrays: Sequence[torch.Tensor],
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if not isinstance(arrays, tuple):
arrays = tuple(arrays)
return torch.dstack(arrays, out=out)
def atleast_2d(*arys: torch.Tensor, copy: Optional[bool] = None) -> List[torch.Tensor]:
transformed = torch.atleast_2d(*arys)
if isinstance(transformed, tuple):
return list(transformed)
return transformed
def atleast_3d(
*arys: Union[torch.Tensor, bool, Number], copy: Optional[bool] = None
) -> List[torch.Tensor]:
transformed = torch.atleast_3d(*arys)
if isinstance(transformed, tuple):
return list(transformed)
return transformed
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version)
def take_along_axis(
arr: torch.Tensor,
indices: torch.Tensor,
axis: int,
/,
*,
mode: str = "fill",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if arr.ndim != indices.ndim:
raise ivy.utils.exceptions.IvyException(
"arr and indices must have the same number of dimensions;"
+ f" got {arr.ndim} vs {indices.ndim}"
)
indices = indices.long()
if mode not in ["clip", "fill", "drop"]:
raise ValueError(
f"Invalid mode '{mode}'. Valid modes are 'clip', 'fill', 'drop'."
)
arr_shape = arr.shape
if axis < 0:
axis += arr.ndim
if mode == "clip":
max_index = arr.shape[axis] - 1
indices = torch.clamp(indices, 0, max_index)
elif mode in {"fill", "drop"}:
if "float" in str(arr.dtype) or "complex" in str(arr.dtype):
fill_value = float("nan")
elif "uint" in str(arr.dtype):
fill_value = torch.iinfo(arr.dtype).max
elif "int" in str(arr.dtype):
fill_value = -torch.iinfo(arr.dtype).max - 1
indices = torch.where((indices < 0) | (indices >= arr.shape[axis]), -1, indices)
arr_shape = list(arr_shape)
arr_shape[axis] = 1
fill_arr = torch.full(arr_shape, fill_value, dtype=arr.dtype)
arr = torch.cat([arr, fill_arr], dim=axis)
indices = torch.where(indices < 0, arr.shape[axis] + indices, indices)
return torch.take_along_dim(arr, indices, axis, out=out)
def hsplit(
ary: torch.Tensor,
indices_or_sections: Union[int, Tuple[int, ...]],
/,
*,
copy: Optional[bool] = None,
) -> List[torch.Tensor]:
if len(ary.shape) == 1:
return ivy.split(ary, num_or_size_splits=indices_or_sections, axis=0)
return ivy.split(ary, num_or_size_splits=indices_or_sections, axis=1)
take_along_axis.support_native_out = True
def broadcast_shapes(*shapes: Union[List[int], List[Tuple]]) -> Tuple[int]:
return tuple(torch.broadcast_shapes(*shapes))
broadcast_shapes.support_native_out = False
def expand(
x: torch.Tensor,
shape: Union[List[int], List[Tuple]],
/,
*,
copy: Optional[bool] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return x.expand(shape)
expand.support_native_out = False
@with_unsupported_dtypes({"2.2 and below": ("complex", "float16")}, backend_version)
def unique_consecutive(
x: torch.Tensor,
/,
*,
axis: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
Results = namedtuple(
"Results",
["output", "inverse_indices", "counts"],
)
output, inverse_indices, counts = torch.unique_consecutive(
x,
return_inverse=True,
return_counts=True,
dim=axis,
)
return Results(
output.to(x.dtype),
inverse_indices,
counts,
)
def column_stack(
arrays: Sequence[torch.Tensor], /, *, out: Optional[torch.Tensor] = None
) -> torch.Tensor:
return torch.column_stack(arrays)
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, backend_version)
def put_along_axis(
arr: torch.Tensor,
indices: torch.Tensor,
values: Union[int, torch.Tensor],
axis: int,
/,
*,
mode: Literal["sum", "min", "max", "mul", "replace"] = "replace",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
mode_mappings = {
"sum": "sum",
"min": "amin",
"max": "amax",
"mul": "prod",
"replace": "replace",
}
mode = mode_mappings.get(mode, mode)
indices = indices.to(torch.int64)
if mode == "replace":
return torch.scatter(arr, axis, indices, values, out=out)
else:
return torch.scatter_reduce(arr, axis, indices, values, reduce=mode, out=out)
put_along_axis.partial_mixed_handler = lambda *args, mode=None, **kwargs: mode in [
"replace",
"sum",
"mul",
"mean",
"max",
"min",
]
def concat_from_sequence(
input_sequence: Union[Tuple[torch.Tensor], List[torch.Tensor]],
/,
*,
new_axis: int = 0,
axis: int = 0,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
is_tuple = type(input_sequence) is tuple
if is_tuple:
input_sequence = list(input_sequence)
if new_axis == 0:
ret = torch.cat(input_sequence, dim=axis)
return ret
elif new_axis == 1:
ret = torch.stack(input_sequence, dim=axis)
return ret
def _take_with_axis(
x: torch.Tensor, indices: torch.Tensor, /, *, axis: int, mode: str
) -> torch.Tensor:
# has no checks
# default behaviour is 'raise' like ON CPU
# additional check is recommended
x_shape = x.shape[axis]
if not ivy.exists(axis):
x = x.flatten()
x_shape = torch.prod(torch.tensor(x_shape))
else:
x_shape = x.shape[axis]
# wrap
if mode == "wrap":
indices = ((indices % x_shape) + x_shape) % x_shape
# clip
else:
indices = torch.clip(indices, 0, x_shape - 1)
rank = len(x.shape)
axis = ((axis % rank) + rank) % rank
slicer = ([slice(None)] * axis) + [indices]
slicer = tuple(slicer)
return x[slicer]
def take(
x: Union[int, List, torch.Tensor],
indices: Union[int, List, torch.Tensor],
/,
*,
axis: Optional[int] = None,
mode: str = "clip",
fill_value: Optional[Number] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if mode not in ["raise", "wrap", "clip", "fill"]:
raise ValueError("mode must be one of 'clip', 'raise', 'wrap', or 'fill'")
if not isinstance(x, torch.Tensor):
x = torch.tensor(x)
if len(x.shape) == 0:
x = torch.tensor([x])
if not isinstance(indices, torch.Tensor):
indices = torch.tensor(indices)
if indices.dtype.is_floating_point:
indices = indices.to(torch.int64)
# raise
if mode == "raise":
mode = "clip"
if ivy.exists(axis):
try:
x_shape = x.shape[axis]
except Exception as e:
rank = len(x.shape)
raise IndexError(
"IndexError: Dimension out of range"
f"(expected to be in range of[-{rank}, {rank-1}]"
f", but got {axis})"
) from e
else:
x_shape = torch.prod(torch.tensor(x.shape))
bound_check = (indices < -x_shape) | (indices >= x_shape)
if torch.any(torch.tensor(bound_check)):
raise IndexError("index out of range in self")
# clip, wrap
if mode != "fill":
ret = _take_with_axis(x, indices, axis=axis, mode=mode)
if ivy.exists(out):
ivy.inplace_update(out, ret)
return ret
# fill
x_dtype = x.dtype
if fill_value is None:
# set according to jax behaviour
# https://tinyurl.com/66jn68uj
if x_dtype.is_floating_point or x_dtype.is_complex:
# NaN for inexact types
fill_value = float("NaN")
else:
if x_dtype == torch.bool:
# True for booleans
fill_value = True
elif str(x_dtype).split(".")[-1].startswith("u"):
# the largest positive value for unsigned types
fill_value = torch.iinfo(x_dtype).max
else:
# the largest negative value for signed types
fill_value = torch.iinfo(x_dtype).min
fill_value = torch.tensor(fill_value, dtype=x_dtype)
x_shape = x.shape
ret = _take_with_axis(x, indices, axis=axis, mode="wrap")
if len(ret.shape) == 0:
# if scalar (paddle scalar), scalar fill (replace)
if torch.any(torch.tensor(indices != 0)):
ret = fill_value
else:
if ivy.exists(axis):
rank = len(x.shape)
axis = ((axis % rank) + rank) % rank
x_shape = x_shape[axis]
else:
axis = 0
x_shape = torch.prod(x_shape)
bound_check = torch.tensor((indices < -x_shape) | (indices >= x_shape))
if torch.any(bound_check):
if axis > 0:
bound_check = torch.broadcast_to(
bound_check, (*x.shape[:axis], *bound_check.shape)
)
ret[bound_check] = fill_value
if ivy.exists(out):
ivy.inplace_update(out, ret)
return ret
def trim_zeros(a: torch.Tensor, /, *, trim: Optional[str] = "bf") -> torch.Tensor:
first = 0
trim = trim.upper()
if "F" in trim:
for i in a:
if i != 0.0:
break
else:
first = first + 1
last = len(a)
if "B" in trim:
for i in torch.flip(a, [0]):
if i != 0.0:
break
else:
last = last - 1
return a[first:last]
@handle_out_argument
def unflatten(
x: torch.Tensor,
/,
shape: Tuple[int] = None,
dim: Optional[int] = 0,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
res = torch.unflatten(x, dim, shape)
return res
| ivy/ivy/functional/backends/torch/experimental/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/experimental/manipulation.py",
"repo_id": "ivy",
"token_count": 8099
} | 26 |
"""Collection of PyTorch random functions, wrapped to fit Ivy syntax and
signature."""
# global
import torch
from typing import Optional, Union, Sequence
# local
import ivy
from ivy.functional.ivy.random import (
_check_bounds_and_get_shape,
_randint_check_dtype_and_bound,
_check_valid_scale,
)
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
# Extra #
# ------#
def random_uniform(
*,
low: Union[float, torch.Tensor] = 0.0,
high: Union[float, torch.Tensor] = 1.0,
shape: Optional[Union[torch.Tensor, ivy.NativeShape, Sequence[int]]] = None,
dtype: torch.dtype,
device: torch.device = None,
seed: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
shape = _check_bounds_and_get_shape(low, high, shape).shape
rand_range = high - low
if seed:
torch.manual_seed(seed)
if torch.is_tensor(shape):
shape = shape.tolist()
return (
torch.rand(shape, device=device, dtype=torch.float) * rand_range + low
).type(dtype)
def random_normal(
*,
mean: Union[float, torch.Tensor] = 0.0,
std: Union[float, torch.Tensor] = 1.0,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
dtype: torch.dtype,
seed: Optional[int] = None,
device: torch.device = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
_check_valid_scale(std)
shape = _check_bounds_and_get_shape(mean, std, shape).shape
dtype = ivy.as_native_dtype(dtype)
if seed:
torch.manual_seed(seed)
if isinstance(mean, (int, float)) and isinstance(std, (int, float)):
return torch.normal(mean, std, shape, out=out).type(dtype).to(device)
return torch.normal(mean, std, out=out).type(dtype).to(device)
random_normal.support_native_out = True
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, backend_version)
def multinomial(
population_size: int,
num_samples: int,
/,
*,
batch_size: int = 1,
probs: Optional[torch.Tensor] = None,
replace: bool = True,
device: torch.device = None,
seed: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if probs is None:
probs = (
torch.ones(
(
batch_size,
population_size,
)
)
/ population_size
)
if seed:
torch.manual_seed(seed)
return torch.multinomial(probs.float(), num_samples, replace, out=out).to(device)
multinomial.support_native_out = True
def randint(
low: Union[int, torch.Tensor],
high: Union[int, torch.Tensor],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: torch.device = None,
dtype: Optional[Union[torch.dtype, ivy.Dtype]] = None,
seed: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if not dtype:
dtype = ivy.default_int_dtype()
dtype = ivy.as_native_dtype(dtype)
_randint_check_dtype_and_bound(low, high, dtype)
shape = _check_bounds_and_get_shape(low, high, shape).shape
rand_range = high - low
if seed:
torch.manual_seed(seed)
return (torch.rand(shape, device=device) * rand_range + low).to(dtype)
def seed(*, seed_value: int = 0):
torch.manual_seed(seed_value)
torch.cuda.manual_seed(seed_value)
if hasattr(torch.backends, "mps"):
if torch.backends.mps.is_available():
from torch import mps
mps.manual_seed(seed_value)
return
def shuffle(
x: torch.Tensor,
axis: Optional[int] = 0,
/,
*,
seed: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if len(x.shape) == 0:
return x
batch_size = x.shape[0]
if seed:
torch.manual_seed(seed)
return torch.index_select(x, 0, torch.randperm(batch_size), out=out)
shuffle.support_native_out = True
| ivy/ivy/functional/backends/torch/random.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/random.py",
"repo_id": "ivy",
"token_count": 1748
} | 27 |
# local
from . import lax_numpy
_IndexUpdateHelper = lax_numpy._IndexUpdateHelper
_IndexUpdateRef = lax_numpy._IndexUpdateRef
| ivy/ivy/functional/frontends/jax/_src/numpy/array_methods.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/_src/numpy/array_methods.py",
"repo_id": "ivy",
"token_count": 40
} | 28 |
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.frontends.jax.array import Array
import ivy.functional.frontends.jax.numpy as jnp_frontend
from ivy.functional.frontends.jax.func_wrapper import (
to_ivy_arrays_and_back,
outputs_to_frontend_arrays,
handle_jax_dtype,
inputs_to_ivy_arrays,
)
from ivy.func_wrapper import handle_out_argument
from ivy import with_unsupported_device_and_dtypes
ndarray = Array
@with_unsupported_device_and_dtypes(
{
"0.4.24 and below": {
"cpu": (
"float16",
"bflooat16",
"complex64",
"complex128",
),
"gpu": (
"complex64",
"complex128",
),
}
},
"jax",
)
@handle_jax_dtype
@outputs_to_frontend_arrays
def arange(start, stop=None, step=1, dtype=None):
return ivy.arange(start, stop, step=step, dtype=dtype)
@handle_jax_dtype
@to_ivy_arrays_and_back
def array(object, dtype=None, copy=True, order="K", ndmin=0):
if order is not None and order != "K":
raise ivy.utils.exceptions.IvyNotImplementedException(
"Only implemented for order='K'"
)
device = ivy.default_device()
if ivy.is_array(object):
device = ivy.dev(object)
ret = ivy.array(object, dtype=dtype, device=device)
if ivy.get_num_dims(ret) < ndmin:
ret = ivy.expand_dims(ret, axis=list(range(ndmin - ivy.get_num_dims(ret))))
if ret.shape == () and dtype is None:
return Array(ret, weak_type=True)
return Array(ret)
@handle_jax_dtype
@to_ivy_arrays_and_back
def asarray(a, dtype=None, order=None):
return array(a, dtype=dtype, order=order)
@to_ivy_arrays_and_back
def bool_(x):
return ivy.astype(x, ivy.bool)
@to_ivy_arrays_and_back
def cdouble(x):
return ivy.astype(x, ivy.complex128)
@to_ivy_arrays_and_back
@handle_out_argument
def compress(condition, a, *, axis=None, out=None):
condition_arr = ivy.asarray(condition).astype(bool)
if condition_arr.ndim != 1:
raise ivy.utils.exceptions.IvyException("Condition must be a 1D array")
if axis is None:
arr = ivy.asarray(a).flatten()
axis = 0
else:
arr = ivy.moveaxis(a, axis, 0)
if condition_arr.shape[0] > arr.shape[0]:
raise ivy.utils.exceptions.IvyException(
"Condition contains entries that are out of bounds"
)
arr = arr[: condition_arr.shape[0]]
return ivy.moveaxis(arr[condition_arr], 0, axis)
@to_ivy_arrays_and_back
def copy(a, order=None):
return array(a, order=order)
@to_ivy_arrays_and_back
def csingle(x):
return ivy.astype(x, ivy.complex64)
@to_ivy_arrays_and_back
def double(x):
return ivy.astype(x, ivy.float64)
@handle_jax_dtype
@to_ivy_arrays_and_back
def empty(shape, dtype=None):
return Array(ivy.empty(shape=shape, dtype=dtype))
@handle_jax_dtype
@to_ivy_arrays_and_back
def empty_like(prototype, dtype=None, shape=None):
# XLA cannot create uninitialized arrays
# jax.numpy.empty_like returns an array initialized with zeros.
if shape:
return ivy.zeros(shape, dtype=dtype)
return ivy.zeros_like(prototype, dtype=dtype)
@handle_jax_dtype
@to_ivy_arrays_and_back
def eye(N, M=None, k=0, dtype=None):
return Array(ivy.eye(N, M, k=k, dtype=dtype))
@to_ivy_arrays_and_back
def from_dlpack(x):
return ivy.from_dlpack(x)
@to_ivy_arrays_and_back
def frombuffer(buffer, dtype="float", count=-1, offset=0):
return ivy.frombuffer(buffer, dtype, count, offset)
@to_ivy_arrays_and_back
def full(shape, fill_value, dtype=None):
return ivy.full(shape, fill_value, dtype=dtype)
@to_ivy_arrays_and_back
def full_like(a, fill_value, dtype=None, shape=None):
return ivy.full_like(a, fill_value, dtype=dtype)
@to_ivy_arrays_and_back
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
cr = ivy.log(stop / start) / (num - 1 if endpoint else num)
x = ivy.linspace(
0, cr * (num - 1 if endpoint else num), num, endpoint=endpoint, axis=axis
)
x = ivy.exp(x)
x = start * x
x[0] = (start * cr) / cr
if endpoint:
x[-1] = stop
return x.asarray(dtype=dtype)
@handle_jax_dtype
@to_ivy_arrays_and_back
def hstack(tup, dtype=None):
# TODO: dtype supported in JAX v0.3.20
return ivy.hstack(tup)
@handle_jax_dtype
@to_ivy_arrays_and_back
def identity(n, dtype=None):
return ivy.eye(n, dtype=dtype)
@to_ivy_arrays_and_back
def in1d(ar1, ar2, assume_unique=False, invert=False):
del assume_unique
ar1_flat = ivy.flatten(ar1)
ar2_flat = ivy.flatten(ar2)
if invert:
return (ar1_flat[:, None] != ar2_flat[None, :]).all(axis=-1)
else:
return (ar1_flat[:, None] == ar2_flat[None, :]).any(axis=-1)
@inputs_to_ivy_arrays
def iterable(y):
return hasattr(y, "__iter__") and y.ndim > 0
@handle_jax_dtype
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"0.4.24 and below": (
"float16",
"bfloat16",
)
},
"jax",
)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0):
ret = ivy.linspace(start, stop, num, axis=axis, endpoint=endpoint, dtype=dtype)
if retstep:
if endpoint:
num -= 1
step = ivy.divide(ivy.subtract(stop, start), num)
return ret, step
return ret
@handle_jax_dtype
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"0.4.24 and below": (
"float16",
"bfloat16",
)
},
"jax",
)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0):
if not endpoint:
interval = (stop - start) / num
stop -= interval
return ivy.logspace(start, stop, num, base=base, axis=axis, dtype=dtype)
@to_ivy_arrays_and_back
def meshgrid(*x, copy=True, sparse=False, indexing="xy"):
# TODO: handle 'copy' argument when ivy.meshgrid supports it
ivy_meshgrid = ivy.meshgrid(*x, sparse=sparse, indexing=indexing)
return ivy_meshgrid
@to_ivy_arrays_and_back
def ndim(a):
if not isinstance(a, ivy.Array):
return 0
return ivy.astype(ivy.array(a.ndim), ivy.int64)
@handle_jax_dtype
@to_ivy_arrays_and_back
def ones(shape, dtype=None):
return Array(ivy.ones(shape, dtype=dtype))
@handle_jax_dtype
@to_ivy_arrays_and_back
def ones_like(a, dtype=None, shape=None):
if shape:
return ivy.ones(shape, dtype=dtype)
return ivy.ones_like(a, dtype=dtype)
@to_ivy_arrays_and_back
def setdiff1d(ar1, ar2, assume_unique=False, *, size=None, fill_value=None):
fill_value = ivy.array(0 if fill_value is None else fill_value, dtype=ar1.dtype)
if ar1.size == 0:
return ivy.full(size or 0, fill_value, dtype=ar1.dtype)
if not assume_unique:
val = (
ivy.to_scalar(ivy.all(ar1))
if ivy.is_bool_dtype(ar1.dtype)
else ivy.to_scalar(ivy.min(ar1))
)
ar1 = jnp_frontend.unique(ar1, size=size and ar1.size, fill_value=val).ivy_array
mask = in1d(ar1, ar2, invert=True).ivy_array
if size is None:
return ar1[mask]
else:
if not assume_unique:
# Set mask to zero at locations corresponding to unique() padding.
n_unique = ar1.size + 1 - (ar1 == ar1[0]).sum(dtype=ivy.int64)
mask = ivy.where(ivy.arange(ar1.size) < n_unique, mask, False)
return ivy.where(
ivy.arange(size) < mask.sum(dtype=ivy.int64),
ar1[jnp_frontend.where(mask, size=size)[0].ivy_array],
fill_value,
)
@to_ivy_arrays_and_back
def single(x):
return ivy.astype(x, ivy.float32)
@to_ivy_arrays_and_back
def size(a, axis=None):
ivy.set_default_int_dtype("int64")
if axis is not None:
sh = ivy.shape(a)
return sh[axis]
return a.size
@to_ivy_arrays_and_back
def triu(m, k=0):
return ivy.triu(m, k=k)
@to_ivy_arrays_and_back
def vander(x, N=None, increasing=False):
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array")
if N == 0:
return ivy.array([], dtype=x.dtype).reshape((x.shape[0], 0))
else:
return ivy.vander(x, N=N, increasing=increasing)
@handle_jax_dtype
@to_ivy_arrays_and_back
def zeros(shape, dtype=None):
return Array(ivy.zeros(shape, dtype=dtype))
@handle_jax_dtype
@to_ivy_arrays_and_back
def zeros_like(a, dtype=None, shape=None):
if shape:
return ivy.zeros(shape, dtype=dtype)
return ivy.zeros_like(a, dtype=dtype)
| ivy/ivy/functional/frontends/jax/numpy/creation.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/numpy/creation.py",
"repo_id": "ivy",
"token_count": 4081
} | 29 |
import ivy
def array(obj, dtype=None, copy=True, ndmin=4):
ret = ivy.array(obj, dtype=dtype, copy=copy)
while ndmin > len(ret.shape):
ret = ivy.expand_dims(ret, axis=0)
return ret
| ivy/ivy/functional/frontends/mindspore/numpy.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mindspore/numpy.py",
"repo_id": "ivy",
"token_count": 95
} | 30 |
# global
import sys
# local
import ivy
from ivy.utils.exceptions import handle_exceptions
from ivy.functional.frontends import set_frontend_to_specific_version
from typing import Union, Iterable, Tuple
from numbers import Number
from .data_type_routines import dtype
from . import ndarray
from .ndarray import *
from . import scalars
from .scalars import *
# Constructing dtypes are required as ivy.<dtype>
# will change dynamically on the backend and may not be available
_int8 = ivy.IntDtype("int8")
_int16 = ivy.IntDtype("int16")
_int32 = ivy.IntDtype("int32")
_int64 = ivy.IntDtype("int64")
_uint8 = ivy.UintDtype("uint8")
_uint16 = ivy.UintDtype("uint16")
_uint32 = ivy.UintDtype("uint32")
_uint64 = ivy.UintDtype("uint64")
_bfloat16 = ivy.FloatDtype("bfloat16")
_float16 = ivy.FloatDtype("float16")
_float32 = ivy.FloatDtype("float32")
_float64 = ivy.FloatDtype("float64")
_complex64 = ivy.ComplexDtype("complex64")
_complex128 = ivy.ComplexDtype("complex128")
_bool = ivy.Dtype("bool")
numpy_promotion_table = {
(_bool, _bool): _bool,
(_bool, _int8): _int8,
(_bool, _int16): _int16,
(_bool, _int32): _int32,
(_bool, _int64): _int64,
(_bool, _uint8): _uint8,
(_bool, _uint16): _uint16,
(_bool, _uint32): _uint32,
(_bool, _uint64): _uint64,
(_bool, _bfloat16): _bfloat16,
(_bool, _float16): _float16,
(_bool, _float32): _float32,
(_bool, _float64): _float64,
(_bool, _complex64): _complex64,
(_bool, _complex128): _complex128,
(_int8, _bool): _int8,
(_int8, _int8): _int8,
(_int8, _int16): _int16,
(_int8, _int32): _int32,
(_int8, _int64): _int64,
(_int16, _bool): _int16,
(_int16, _int8): _int16,
(_int16, _int16): _int16,
(_int16, _int32): _int32,
(_int16, _int64): _int64,
(_int32, _bool): _int32,
(_int32, _int8): _int32,
(_int32, _int16): _int32,
(_int32, _int32): _int32,
(_int32, _int64): _int64,
(_int64, _bool): _int64,
(_int64, _int8): _int64,
(_int64, _int16): _int64,
(_int64, _int32): _int64,
(_int64, _int64): _int64,
(_uint8, _bool): _uint8,
(_uint8, _uint8): _uint8,
(_uint8, _uint16): _uint16,
(_uint8, _uint32): _uint32,
(_uint8, _uint64): _uint64,
(_uint16, _bool): _uint16,
(_uint16, _uint8): _uint16,
(_uint16, _uint16): _uint16,
(_uint16, _uint32): _uint32,
(_uint16, _uint64): _uint64,
(_uint32, _bool): _uint32,
(_uint32, _uint8): _uint32,
(_uint32, _uint16): _uint32,
(_uint32, _uint32): _uint32,
(_uint32, _uint64): _uint64,
(_uint64, _bool): _uint64,
(_uint64, _uint8): _uint64,
(_uint64, _uint16): _uint64,
(_uint64, _uint32): _uint64,
(_uint64, _uint64): _uint64,
(_int8, _uint8): _int16,
(_int8, _uint16): _int32,
(_int8, _uint32): _int64,
(_int16, _uint8): _int16,
(_int16, _uint16): _int32,
(_int16, _uint32): _int64,
(_int32, _uint8): _int32,
(_int32, _uint16): _int32,
(_int32, _uint32): _int64,
(_int64, _uint8): _int64,
(_int64, _uint16): _int64,
(_int64, _uint32): _int64,
(_uint8, _int8): _int16,
(_uint16, _int8): _int32,
(_uint32, _int8): _int64,
(_uint8, _int16): _int16,
(_uint16, _int16): _int32,
(_uint32, _int16): _int64,
(_uint8, _int32): _int32,
(_uint16, _int32): _int32,
(_uint32, _int32): _int64,
(_uint8, _int64): _int64,
(_uint16, _int64): _int64,
(_uint32, _int64): _int64,
(_float16, _bool): _float16,
(_float16, _float16): _float16,
(_float16, _float32): _float32,
(_float16, _float64): _float64,
(_float32, _bool): _float32,
(_float32, _float16): _float32,
(_float32, _float32): _float32,
(_float32, _float64): _float64,
(_float64, _bool): _float64,
(_float64, _float16): _float64,
(_float64, _float32): _float64,
(_float64, _float64): _float64,
(_uint64, _int8): _float64,
(_int8, _uint64): _float64,
(_uint64, _int16): _float64,
(_int16, _uint64): _float64,
(_uint64, _int32): _float64,
(_int32, _uint64): _float64,
(_uint64, _int64): _float64,
(_int64, _uint64): _float64,
(_int8, _float16): _float16,
(_float16, _int8): _float16,
(_int8, _float32): _float32,
(_float32, _int8): _float32,
(_int8, _float64): _float64,
(_float64, _int8): _float64,
(_int16, _float16): _float32,
(_float16, _int16): _float32,
(_int16, _float32): _float32,
(_float32, _int16): _float32,
(_int16, _float64): _float64,
(_float64, _int16): _float64,
(_int32, _float16): _float64,
(_float16, _int32): _float64,
(_int32, _float32): _float64,
(_float32, _int32): _float64,
(_int32, _float64): _float64,
(_float64, _int32): _float64,
(_int64, _float16): _float64,
(_float16, _int64): _float64,
(_int64, _float32): _float64,
(_float32, _int64): _float64,
(_int64, _float64): _float64,
(_float64, _int64): _float64,
(_uint8, _float16): _float16,
(_float16, _uint8): _float16,
(_uint8, _float32): _float32,
(_float32, _uint8): _float32,
(_uint8, _float64): _float64,
(_float64, _uint8): _float64,
(_uint16, _float16): _float32,
(_float16, _uint16): _float32,
(_uint16, _float32): _float32,
(_float32, _uint16): _float32,
(_uint16, _float64): _float64,
(_float64, _uint16): _float64,
(_uint32, _float16): _float64,
(_float16, _uint32): _float64,
(_uint32, _float32): _float64,
(_float32, _uint32): _float64,
(_uint32, _float64): _float64,
(_float64, _uint32): _float64,
(_uint64, _float16): _float64,
(_float16, _uint64): _float64,
(_uint64, _float32): _float64,
(_float32, _uint64): _float64,
(_uint64, _float64): _float64,
(_float64, _uint64): _float64,
(_bfloat16, _bfloat16): _bfloat16,
(_bfloat16, _uint8): _bfloat16,
(_uint8, _bfloat16): _bfloat16,
(_bfloat16, _int8): _bfloat16,
(_int8, _bfloat16): _bfloat16,
(_bfloat16, _float32): _float32,
(_float32, _bfloat16): _float32,
(_bfloat16, _float64): _float64,
(_float64, _bfloat16): _float64,
(_complex64, _bool): _complex64,
(_complex64, _int8): _complex64,
(_complex64, _int16): _complex64,
(_complex64, _int32): _complex128,
(_complex64, _int64): _complex128,
(_complex64, _uint8): _complex64,
(_complex64, _uint16): _complex64,
(_complex64, _uint32): _complex128,
(_complex64, _uint64): _complex128,
(_complex64, _float16): _complex64,
(_complex64, _float32): _complex64,
(_complex64, _float64): _complex128,
(_complex64, _bfloat16): _complex64,
(_complex64, _complex64): _complex64,
(_complex64, _complex128): _complex128,
(_complex128, _bool): _complex128,
(_complex128, _int8): _complex128,
(_complex128, _int16): _complex128,
(_complex128, _int32): _complex128,
(_complex128, _int64): _complex128,
(_complex128, _uint8): _complex128,
(_complex128, _uint16): _complex128,
(_complex128, _uint32): _complex128,
(_complex128, _uint64): _complex128,
(_complex128, _float16): _complex128,
(_complex128, _float32): _complex128,
(_complex128, _float64): _complex128,
(_complex128, _bfloat16): _complex128,
(_complex128, _complex64): _complex128,
(_complex128, _complex128): _complex128,
(_int8, _complex64): _complex64,
(_int16, _complex64): _complex64,
(_int32, _complex64): _complex128,
(_int64, _complex64): _complex128,
(_uint8, _complex64): _complex64,
(_uint16, _complex64): _complex64,
(_uint32, _complex64): _complex128,
(_uint64, _complex64): _complex128,
(_float16, _complex64): _complex64,
(_float32, _complex64): _complex64,
(_float64, _complex64): _complex128,
(_bfloat16, _complex64): _complex64,
(_int8, _complex128): _complex128,
(_int16, _complex128): _complex128,
(_int32, _complex128): _complex128,
(_int64, _complex128): _complex128,
(_uint8, _complex128): _complex128,
(_uint16, _complex128): _complex128,
(_uint32, _complex128): _complex128,
(_uint64, _complex128): _complex128,
(_float16, _complex128): _complex128,
(_float32, _complex128): _complex128,
(_float64, _complex128): _complex128,
(_bfloat16, _complex128): _complex128,
}
numpy_str_to_type_table = {
"b": "int8",
"h": "int16",
"i": "int32",
"l": "int64",
"q": "int64",
"B": "uint8",
"H": "uint16",
"I": "uint32",
"L": "uint64",
"e": "float16",
"f": "float32",
"d": "float64",
"?": "bool",
"E": "bfloat16",
"F": "complex64",
"D": "complex128",
"f2": "float16",
"f4": "float32",
"f8": "float64",
"i1": "int8",
"i2": "int16",
"i4": "int32",
"i8": "int64",
"u1": "uint8",
"u2": "uint16",
"u4": "uint32",
"u8": "uint64",
"c8": "complex64",
"c16": "complex128",
"bool_": "bool",
}
numpy_type_to_str_and_num_table = {
"int8": ("b", 1),
"int16": ("h", 3),
"int32": ("i", 5),
"int64": ("l", 7),
"uint8": ("B", 2),
"uint16": ("H", 4),
"uint32": ("I", 6),
"uint64": ("L", 8),
"float16": ("e", 23),
"float32": ("f", 11),
"float64": ("d", 12),
"bool": ("?", 0),
"bfloat16": ("E", 256),
"complex64": ("F", 14),
"complex128": ("D", 15),
}
numpy_scalar_to_dtype = {
bool_: _bool,
number: _float64,
integer: _int64,
signedinteger: _int64,
byte: _int8,
short: _int16,
intc: _int32,
longlong: _int64,
int_: _int64,
unsignedinteger: _uint64,
ubyte: _uint8,
ushort: _uint16,
uintc: _uint32,
ulonglong: _uint64,
uint: _uint64,
inexact: _float64,
floating: _float64,
half: _float16,
single: _float32,
float_: _float64,
_bfloat16: _bfloat16,
complexfloating: _complex128,
csingle: _complex64,
complex_: _complex128,
}
numpy_dtype_to_scalar = {v: k for k, v in numpy_scalar_to_dtype.items()}
numpy_casting_rules = {
_bool: [
_bool,
_uint8,
_uint16,
_uint32,
_uint64,
_int8,
_int16,
_int32,
_int64,
_float16,
_float32,
_float64,
_complex64,
_complex128,
],
_int8: [
_int8,
_int16,
_int32,
_int64,
_float16,
_float32,
_float64,
_complex64,
_complex128,
],
_int16: [
_int16,
_int32,
_int64,
_float32,
_float64,
_complex64,
_complex128,
],
_int32: [_int32, _int64, _float64, _complex128],
_int64: [_int64, _float64, _complex128],
_uint8: [
_uint8,
_uint16,
_uint32,
_uint64,
_int16,
_int32,
_int64,
_float16,
_float32,
_float64,
_complex64,
_complex128,
],
_uint16: [
_uint16,
_uint32,
_uint64,
_int32,
_int64,
_float32,
_float64,
_complex64,
_complex128,
],
_uint32: [
_uint32,
_uint64,
_int64,
_float64,
_complex128,
],
_uint64: [_uint64, _float64, _complex128],
_float16: [
_float16,
_float32,
_float64,
_complex64,
_complex128,
],
_float32: [
_float32,
_float64,
_complex64,
_complex128,
],
_float64: [_float64, _complex128],
_complex64: [_complex64, _complex128],
_complex128: [_complex128],
}
@handle_exceptions
def promote_numpy_dtypes(
type1: Union[ivy.Dtype, ivy.NativeDtype],
type2: Union[ivy.Dtype, ivy.NativeDtype],
/,
):
type1, type2 = ivy.as_ivy_dtype(type1), ivy.as_ivy_dtype(type2)
try:
return numpy_promotion_table[(type1, type2)]
except KeyError as e:
raise ivy.utils.exceptions.IvyException(
"these dtypes are not type promotable"
) from e
@handle_exceptions
def promote_types_of_numpy_inputs(
x1: Union[ivy.Array, Number, Iterable[Number]],
x2: Union[ivy.Array, Number, Iterable[Number]],
/,
) -> Tuple[ivy.Array, ivy.Array]:
"""Promote the dtype of the given ivy array inputs to a common dtype based
on numpy type promotion rules.
While passing float or integer values or any other non-array input
to this function, it should be noted that the return will be an
array-like object. Therefore, outputs from this function should be
used as inputs only for those functions that expect an array-like or
tensor-like objects, otherwise it might give unexpected results.
"""
# ToDo: Overflows not working properly for numpy, if a scalar or 0-dim
# is passed with an array, it should go to the next largest dtype that
# can hold the value without overflow. E.g a np.array([5], 'int8') + 300 operation
# results in np.array([305]) with int16 dtype
x1 = ivy.asarray(x1)
x2 = ivy.asarray(x2)
type1 = ivy.default_dtype(item=x1).strip("u123456789")
type2 = ivy.default_dtype(item=x2).strip("u123456789")
# Ignore type of 0-dim arrays or scalars to mimic numpy
if x1.shape != () and x2.shape == () and type1 == type2:
x2 = ivy.asarray(
x2, dtype=x1.dtype, device=ivy.default_device(item=x1, as_native=False)
)
elif x1.shape == () and x2.shape != () and type1 == type2:
x1 = ivy.asarray(
x1, dtype=x2.dtype, device=ivy.default_device(item=x2, as_native=False)
)
else:
promoted = promote_numpy_dtypes(x1.dtype, x2.dtype)
x1 = ivy.asarray(x1, dtype=promoted)
x2 = ivy.asarray(x2, dtype=promoted)
return x1, x2
from . import creation_routines
from .creation_routines import *
from . import data_type_routines
from .data_type_routines import *
from . import logic
from .logic import *
from . import manipulation_routines
from .manipulation_routines import *
from . import mathematical_functions
from .mathematical_functions import *
from . import sorting_searching_counting
from .sorting_searching_counting import *
from . import statistics
from .statistics import *
from . import matrix
from .matrix import *
from . import random
from .random import *
from . import indexing_routines
from .indexing_routines import *
from . import broadcast
from .broadcast import *
from . import ma
from . import fft
from .ufunc import ufunc
from . import linalg
from .linalg.matrix_and_vector_products import (
# dot,
# vdot,
inner,
outer,
matrix_power,
tensordot,
# einsum,
# einsum_path,
kron,
cross,
)
from .linalg.decompositions import cholesky, qr, svd
from .linalg.norms_and_other_numbers import det, slogdet, matrix_rank, norm, trace
from .linalg.solving_equations_and_inverting_matrices import pinv, inv, solve
# importing private functions for ufunc initialization #
# -----------------------------------------------------#
from ivy.functional.frontends.numpy.mathematical_functions.miscellaneous import (
_absolute,
_cbrt,
_copysign,
_fabs,
_heaviside,
_sign,
_sqrt,
_square,
_lcm,
_gcd,
_clip,
)
from ivy.functional.frontends.numpy.mathematical_functions.arithmetic_operations import ( # noqa
_add,
_divide,
_float_power,
_floor_divide,
_fmod,
_mod,
_modf,
_multiply,
_remainder,
_negative,
_positive,
_power,
_reciprocal,
_subtract,
_divmod,
)
from ivy.functional.frontends.numpy.mathematical_functions.trigonometric_functions import ( # noqa
_arccos,
_arcsin,
_arctan,
_cos,
_deg2rad,
_rad2deg,
_sin,
_tan,
_degrees,
_arctan2,
)
from ivy.functional.frontends.numpy.mathematical_functions.handling_complex_numbers import ( # noqa
_conj,
)
from ivy.functional.frontends.numpy.mathematical_functions.hyperbolic_functions import (
_arccosh,
_arcsinh,
_arctanh,
_cosh,
_sinh,
_tanh,
)
from ivy.functional.frontends.numpy.mathematical_functions.rounding import (
_ceil,
_trunc,
_floor,
_rint,
)
from ivy.functional.frontends.numpy.logic.comparison import (
_equal,
_greater,
_greater_equal,
_less,
_less_equal,
_not_equal,
)
from ivy.functional.frontends.numpy.mathematical_functions.exponents_and_logarithms import ( # noqa
_exp,
_exp2,
_expm1,
_log,
_log10,
_log1p,
_log2,
_logaddexp,
_logaddexp2,
_ldexp,
_frexp,
)
from ivy.functional.frontends.numpy.logic.array_type_testing import (
_isfinite,
_isinf,
_isnan,
)
from ivy.functional.frontends.numpy.logic.logical_operations import (
_logical_and,
_logical_not,
_logical_or,
_logical_xor,
)
from ivy.functional.frontends.numpy.linalg.matrix_and_vector_products import (
_matmul,
dot,
einsum,
)
from ivy.functional.frontends.numpy.mathematical_functions.extrema_finding import (
_maximum,
_minimum,
_fmax,
_fmin,
)
from ivy.functional.frontends.numpy.mathematical_functions.floating_point_routines import ( # noqa
_nextafter,
_signbit,
_spacing,
)
_frontend_array = array
# initializing ufuncs #
# ---------------------#
absolute = ufunc("_absolute")
cbrt = ufunc("_cbrt")
copysign = ufunc("_copysign")
fabs = ufunc("_fabs")
heaviside = ufunc("_heaviside")
sign = ufunc("_sign")
sqrt = ufunc("_sqrt")
square = ufunc("_square")
add = ufunc("_add")
divide = ufunc("_divide")
float_power = ufunc("_float_power")
floor_divide = ufunc("_floor_divide")
fmod = ufunc("_fmod")
mod = ufunc("_mod")
modf = ufunc("_modf")
multiply = ufunc("_multiply")
remainder = ufunc("_remainder")
negative = ufunc("_negative")
positive = ufunc("_positive")
power = ufunc("_power")
reciprocal = ufunc("_reciprocal")
subtract = ufunc("_subtract")
true_divide = ufunc("_divide")
arccos = ufunc("_arccos")
arcsin = ufunc("_arcsin")
arctan = ufunc("_arctan")
arctan2 = ufunc("_arctan2")
cos = ufunc("_cos")
deg2rad = ufunc("_deg2rad")
rad2deg = ufunc("_rad2deg")
sin = ufunc("_sin")
tan = ufunc("_tan")
degrees = ufunc("_degrees")
arccosh = ufunc("_arccosh")
arcsinh = ufunc("_arcsinh")
arctanh = ufunc("_arctanh")
cosh = ufunc("_cosh")
sinh = ufunc("_sinh")
tanh = ufunc("_tanh")
ceil = ufunc("_ceil")
trunc = ufunc("_trunc")
equal = ufunc("_equal")
greater = ufunc("_greater")
greater_equal = ufunc("_greater_equal")
less = ufunc("_less")
less_equal = ufunc("_less_equal")
not_equal = ufunc("_not_equal")
exp = ufunc("_exp")
exp2 = ufunc("_exp2")
expm1 = ufunc("_expm1")
log = ufunc("_log")
log10 = ufunc("_log10")
log1p = ufunc("_log1p")
log2 = ufunc("_log2")
logaddexp = ufunc("_logaddexp")
logaddexp2 = ufunc("_logaddexp2")
isfinite = ufunc("_isfinite")
isinf = ufunc("_isinf")
isnan = ufunc("_isnan")
logical_and = ufunc("_logical_and")
logical_not = ufunc("_logical_not")
logical_or = ufunc("_logical_or")
logical_xor = ufunc("_logical_xor")
matmul = ufunc("_matmul")
maximum = ufunc("_maximum")
minimum = ufunc("_minimum")
divmod = ufunc("_divmod")
fmax = ufunc("_fmax")
fmin = ufunc("_fmin")
ldexp = ufunc("_ldexp")
floor = ufunc("_floor")
frexp = ufunc("_frexp")
conj = ufunc("_conj")
rint = ufunc("_rint")
nextafter = ufunc("_nextafter")
signbit = ufunc("_signbit")
conjugate = ufunc("_conj")
lcm = ufunc("_lcm")
gcd = ufunc("_gcd")
spacing = ufunc("_spacing")
clip = ufunc("_clip")
remainder = ufunc("_remainder")
# setting to specific version #
# --------------------------- #
if ivy.is_local():
module = ivy.utils._importlib.import_cache[__name__]
else:
module = sys.modules[__name__]
__version__ = set_frontend_to_specific_version(module)
| ivy/ivy/functional/frontends/numpy/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/__init__.py",
"repo_id": "ivy",
"token_count": 9067
} | 31 |
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_unsupported_dtypes
_SWAP_DIRECTION_MAP = {
None: "forward",
"backward": "forward",
"ortho": "ortho",
"forward": "backward",
}
# --- Helpers --- #
# --------------- #
def _swap_direction(norm):
try:
return _SWAP_DIRECTION_MAP[norm]
except KeyError:
raise ValueError(
f'Invalid norm value {norm}; should be "backward", "ortho" or "forward".'
) from None
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
def fft(a, n=None, axis=-1, norm=None):
return ivy.fft(ivy.astype(a, ivy.complex128), axis, norm=norm, n=n)
@with_unsupported_dtypes({"1.26.3 and below": ("int",)}, "numpy")
@to_ivy_arrays_and_back
def fftfreq(n, d=1.0):
if not isinstance(
n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))
):
raise TypeError("n should be an integer")
N = (n - 1) // 2 + 1
val = 1.0 / (n * d)
results = ivy.empty((n,), dtype=int)
p1 = ivy.arange(0, N, dtype=int)
results[:N] = p1
p2 = ivy.arange(-(n // 2), 0, dtype=int)
results[N:] = p2
return results * val
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, "numpy")
def fftshift(x, axes=None):
x = ivy.asarray(x)
if axes is None:
axes = tuple(range(x.ndim))
shift = [(dim // 2) for dim in x.shape]
elif isinstance(
axes,
(int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),
):
shift = x.shape[axes] // 2
else:
shift = [(x.shape[ax] // 2) for ax in axes]
roll = ivy.roll(x, shift, axis=axes)
return roll
@to_ivy_arrays_and_back
def ifft(a, n=None, axis=-1, norm=None):
a = ivy.array(a, dtype=ivy.complex128)
if norm is None:
norm = "backward"
return ivy.ifft(a, axis, norm=norm, n=n)
@with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
def ifft2(a, s=None, axes=(-2, -1), norm=None):
a = ivy.asarray(a, dtype=ivy.complex128)
a = ivy.ifftn(a, s=s, axes=axes, norm=norm)
return a
@with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
def ifftn(a, s=None, axes=None, norm=None):
a = ivy.asarray(a, dtype=ivy.complex128)
a = ivy.ifftn(a, s=s, axes=axes, norm=norm)
return a
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, "numpy")
def ifftshift(x, axes=None):
x = ivy.asarray(x)
if axes is None:
axes = tuple(range(x.ndim))
shift = [-(dim // 2) for dim in x.shape]
elif isinstance(
axes,
(int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),
):
shift = -(x.shape[axes] // 2)
else:
shift = [-(x.shape[ax] // 2) for ax in axes]
roll = ivy.roll(x, shift, axis=axes)
return roll
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
def ihfft(a, n=None, axis=-1, norm=None):
if n is None:
n = a.shape[axis]
norm = _swap_direction(norm)
output = ivy.conj(rfft(a, n, axis, norm=norm).ivy_array)
return output
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
def rfft(a, n=None, axis=-1, norm=None):
if norm is None:
norm = "backward"
a = ivy.array(a, dtype=ivy.float64)
return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)
@to_ivy_arrays_and_back
def rfftfreq(n, d=1.0):
if not isinstance(
n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))
):
raise TypeError("n should be an integer")
val = 1.0 / (n * d)
N = n // 2 + 1
results = ivy.arange(0, N, dtype=int)
return results * val
@with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
def rfftn(a, s=None, axes=None, norm=None):
a = ivy.asarray(a, dtype=ivy.complex128)
return ivy.rfftn(a, s=s, axes=axes, norm=norm)
| ivy/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py",
"repo_id": "ivy",
"token_count": 1998
} | 32 |
# global
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
inputs_to_ivy_arrays,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
from ivy.functional.frontends.numpy import promote_types_of_numpy_inputs
from ivy.func_wrapper import with_supported_dtypes
@inputs_to_ivy_arrays
@from_zero_dim_arrays_to_scalar
def allclose(a, b, /, *, rtol=1e-05, atol=1e-08, equal_nan=False):
a, b = promote_types_of_numpy_inputs(a, b)
return ivy.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def isclose(a, b, /, *, rtol=1e-05, atol=1e-08, equal_nan=False):
a, b = promote_types_of_numpy_inputs(a, b)
return ivy.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
@with_supported_dtypes(
{"2.6.0 and below": ("int64", "float64", "float32", "int32", "bfloat16")}, "paddle"
)
@to_ivy_arrays_and_back
def isin(element, test_elements, assume_unique=False, invert=False):
return ivy.isin(element, test_elements, assume_unique=assume_unique, invert=invert)
@handle_numpy_out
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def isneginf(x, out=None):
return ivy.isinf(x, detect_positive=False)
@handle_numpy_out
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def isposinf(x, out=None):
return ivy.isinf(x, detect_negative=False)
| ivy/ivy/functional/frontends/numpy/logic/array_contents.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/logic/array_contents.py",
"repo_id": "ivy",
"token_count": 635
} | 33 |
# local
import ivy.functional.frontends.numpy as np_frontend
import ivy
def asmatrix(data, dtype=None):
return np_frontend.matrix(ivy.array(data), dtype=dtype, copy=False)
def asscalar(a):
return a.item()
| ivy/ivy/functional/frontends/numpy/manipulation_routines/changing_kind_of_array.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/manipulation_routines/changing_kind_of_array.py",
"repo_id": "ivy",
"token_count": 86
} | 34 |
# global
import ivy
# local
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
from_zero_dim_arrays_to_scalar,
)
@to_ivy_arrays_and_back
def sinc(x):
if ivy.get_num_dims(x) == 0:
x = ivy.astype(x, ivy.float64)
return ivy.sinc(x)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def unwrap(p, discont=None, axis=-1, *, period=2 * ivy.pi):
p = ivy.asarray(p)
nd = p.ndim
dd = ivy.diff(p, axis=axis)
if discont is None:
discont = period / 2
slice1 = [slice(None, None)] * nd # full slices
slice1[axis] = ivy.slice(1, None)
slice1 = ivy.tuple(slice1)
dtype = ivy.result_type(dd, period)
if ivy.issubdtype(dtype, ivy.integer):
interval_high, rem = ivy.divmod(period, 2)
boundary_ambiguous = rem == 0
else:
interval_high = period / 2
boundary_ambiguous = True
interval_low = -interval_high
ddmod = ivy.mod(dd - interval_low, period) + interval_low
if boundary_ambiguous:
ivy.copyto(ddmod, interval_high, where=(ddmod == interval_low) & (dd > 0))
ph_correct = ddmod - dd
ivy.copyto(ph_correct, 0, where=ivy.abs(dd) < discont)
up = ivy.array(p, copy=True, dtype=dtype)
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
| ivy/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py",
"repo_id": "ivy",
"token_count": 612
} | 35 |
# global
# local
import ivy
from ivy.functional.frontends.numpy.creation_routines.from_existing_data import array
from ivy.functional.frontends.numpy.ndarray.ndarray import ndarray
class generic(ndarray):
_name = "generic"
def __init__(self):
raise ivy.utils.exceptions.IvyException(
f"cannot create 'numpy.{self._name}' instances"
)
class bool_(generic):
def __new__(cls, value=0):
ret = array(value, dtype="bool")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "bool")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="bool")
)
def __repr__(self):
return "True" if self.ivy_array else "False"
class number(generic):
_name = "number"
class bfloat16(generic):
def __new__(cls, value=0):
ret = array(value, dtype="bfloat16")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "bfloat16")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="bfloat16")
)
class integer(number):
_name = "integer"
def __repr__(self):
return self.ivy_array.__repr__()[10:-1]
class inexact(number):
_name = "inexact"
class signedinteger(integer):
_name = "signedinteger"
class unsignedinteger(integer):
_name = "unsignedinteger"
class floating(inexact):
_name = "floating"
def __repr__(self):
return self.ivy_array.__repr__()[10:-1]
class complexfloating(inexact):
_name = "complexfloating"
def __repr__(self):
return self.ivy_array.__repr__()[10:-1]
class byte(signedinteger):
def __new__(cls, value=0):
ret = array(value, dtype="int8")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "int8")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="int8")
)
class short(signedinteger):
def __new__(cls, value=0):
ret = array(value, dtype="int16")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "int16")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="int16")
)
class intc(signedinteger):
def __new__(cls, value=0):
ret = array(value, dtype="int32")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "int32")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="int32")
)
class int_(signedinteger):
def __new__(cls, value=0):
ret = array(value, dtype="int64")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "int64")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="int64")
)
class longlong(signedinteger):
def __new__(cls, value=0):
ret = array(value, dtype="int64")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "int64")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="int64")
)
class uint(signedinteger):
def __new__(cls, value=0):
ret = array(value, dtype="uint64")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "uint64")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="uint64")
)
class ulonglong(signedinteger):
def __new__(cls, value=0):
ret = array(value, dtype="uint64")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "uint64")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="uint64")
)
class ubyte(unsignedinteger):
def __new__(cls, value=0):
ret = array(value, dtype="uint8")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "uint8")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="uint8")
)
class ushort(unsignedinteger):
def __new__(cls, value=0):
ret = array(value, dtype="uint16")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "uint16")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="uint16")
)
class uintc(unsignedinteger):
def __new__(cls, value=0):
ret = array(value, dtype="uint32")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "uint32")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="uint32")
)
class half(floating):
def __new__(cls, value=0):
ret = array(value, dtype="float16")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "float16")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="float16")
)
class single(floating):
def __new__(cls, value=0):
ret = array(value, dtype="float32")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "float32")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="float32")
)
class double(floating, float):
def __new__(cls, value=0):
ret = array(value, dtype="float64")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "float64")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="float64")
)
class csingle(complexfloating):
def __new__(cls, value=0):
ret = array(value, dtype="complex64")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "complex64")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="complex64")
)
class cdouble(complexfloating, complex):
def __new__(cls, value=0):
ret = array(value, dtype="complex128")
if ret.shape != ():
return ret
obj = super().__new__(cls)
return obj
def __init__(self, value=0):
ndarray.__init__(self, 0)
self.ivy_array = (
ivy.astype(value.ivy_array, "complex128")
if hasattr(value, "ivy_array")
else ivy.array(value, dtype="complex128")
)
bool8 = bool_
complex128 = cfloat = complex_ = cdouble
complex64 = singlecomplex = csingle
float16 = half
float32 = single
float64 = float_ = double
int16 = short
int32 = intc
int64 = intp = int_
int8 = byte
uint16 = ushort
uint32 = uintc
uint64 = uintp = uint
uint8 = ubyte
| ivy/ivy/functional/frontends/numpy/scalars/scalars.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/scalars/scalars.py",
"repo_id": "ivy",
"token_count": 4642
} | 36 |
import ivy
from ivy.functional.frontends.onnx.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def MatMul(x1, x2):
return ivy.matmul(x1, x2)
| ivy/ivy/functional/frontends/onnx/linalg.py/0 | {
"file_path": "ivy/ivy/functional/frontends/onnx/linalg.py",
"repo_id": "ivy",
"token_count": 78
} | 37 |
# local
import ivy
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
from ivy.functional.frontends.torch.nn.functional import convolution_functions
# --- Helpers --- #
# --------------- #
def _channel_first_input(x, data_format):
ndims = len(x.shape)
dims = ndims - 2
if 1 >= dims >= 3:
raise ivy.utils.exceptions.IvyError(f"invalid for input with {dims} dims")
# channel first input
if data_format not in ["NCL", "NCHW", "NCDHW"]:
if data_format in ["NLC", "NHWC", "NDHWC"]:
x = ivy.permute_dims(x, axes=(0, ndims - 1, *range(1, ndims - 1)))
else:
raise ivy.utils.exceptions.IvyError(
"data_format should be " + "'NCL' or 'NLC' "
if dims == 1
else (
"'NCHW' or 'NHWC' "
if dims == 2
else "'NCDHW' or 'NDHWC' " + f"but got data_format '{data_format}'"
)
)
return x
def _conv(
x, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, data_format="NLC"
):
x = _channel_first_input(x, data_format)
if padding == "same":
dilation = 1
return convolution_functions._conv(
x,
weight,
bias=bias,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
def _conv_transpose(
x,
weight,
bias=None,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
data_format="NLC",
):
x = _channel_first_input(x, data_format)
if padding == "same":
dilation = 1
return convolution_functions._conv_transpose(
x,
weight,
bias=bias,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups,
dilation=dilation,
)
# --- Main --- #
# ------------ #
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def conv1d(
x,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
data_format="NCL",
name=None,
):
return _conv(x, weight, bias, stride, padding, dilation, groups, data_format)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def conv1d_transpose(
x,
weight,
bias=None,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
output_size=None,
data_format="NCL",
name=None,
):
return _conv_transpose(
x, weight, bias, stride, padding, output_padding, dilation, groups, data_format
)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def conv2d(
x,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
data_format="NCHW",
name=None,
):
return _conv(x, weight, bias, stride, padding, dilation, groups, data_format)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def conv2d_transpose(
x,
weight,
bias=None,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
output_size=None,
data_format="NCHW",
name=None,
):
return _conv_transpose(
x, weight, bias, stride, padding, output_padding, dilation, groups, data_format
)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def conv3d(
x,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
data_format="NCDHW",
name=None,
):
return _conv(x, weight, bias, stride, padding, dilation, groups, data_format)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def conv3d_transpose(
x,
weight,
bias=None,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
output_size=None,
data_format="NCDHW",
name=None,
):
return _conv_transpose(
x, weight, bias, stride, padding, output_padding, dilation, groups, data_format
)
| ivy/ivy/functional/frontends/paddle/nn/functional/conv.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/nn/functional/conv.py",
"repo_id": "ivy",
"token_count": 2016
} | 38 |
# local
from ..manipulation import * # noqa: F401
import ivy
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
from ivy.func_wrapper import with_unsupported_dtypes
@with_supported_dtypes(
{"2.5.1 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def index_add_(x, index, axis, value, *, name=None):
x = ivy.swapaxes(x, axis, 0)
value = ivy.swapaxes(value, axis, 0)
_to_adds = []
index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda i: i[0]))
while index:
_curr_idx = index[0][0]
while len(_to_adds) < _curr_idx:
_to_adds.append(ivy.zeros_like(value[0]))
_to_add_cum = ivy.get_item(value, index[0][1])
while (len(index)) > 1 and (index[0][0] == index[1][0]):
_to_add_cum = _to_add_cum + ivy.get_item(value, index.pop(1)[1])
index.pop(0)
_to_adds.append(_to_add_cum)
while len(_to_adds) < x.shape[0]:
_to_adds.append(ivy.zeros_like(value[0]))
_to_adds = ivy.stack(_to_adds)
if len(x.shape) < 2:
# Added this line due to the paddle backend treating scalars as 1-d arrays
_to_adds = ivy.flatten(_to_adds)
ret = ivy.add(x, _to_adds)
ret = ivy.swapaxes(ret, axis, 0)
x = ret
return x
# NOTE:
# Only inplace functions are to be added in this file.
# Please add non-inplace counterparts to `/frontends/paddle/manipulation.py`.
@with_unsupported_dtypes(
{"2.6.0 and below": ("int8", "uint8", "int16", "uint16", "float16", "bfloat16")},
"paddle",
)
@to_ivy_arrays_and_back
def reshape_(x, shape):
ivy.reshape(x, shape)
return x
| ivy/ivy/functional/frontends/paddle/tensor/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/tensor/manipulation.py",
"repo_id": "ivy",
"token_count": 806
} | 39 |
from .ndimage import *
| ivy/ivy/functional/frontends/scipy/ndimage/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/ndimage/__init__.py",
"repo_id": "ivy",
"token_count": 7
} | 40 |
from . import _split
from ._split import *
| ivy/ivy/functional/frontends/sklearn/model_selection/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/model_selection/__init__.py",
"repo_id": "ivy",
"token_count": 12
} | 41 |
# local
from ivy.functional.frontends.tensorflow.general_functions import pad as tf_pad
pad = tf_pad
| ivy/ivy/functional/frontends/tensorflow/compat/v1/general_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/compat/v1/general_functions.py",
"repo_id": "ivy",
"token_count": 34
} | 42 |
# global
import ivy
from ivy.functional.frontends.tensorflow.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.functional.frontends.tensorflow import check_tensorflow_casting
# --- Helpers --- #
# --------------- #
def _convolution_broadcast_helper(
arg, num_spatial_dims, channel_index, name="dilations"
):
# Helper to broadcast dilations and strides to correct dims
if arg is None:
return [1] * num_spatial_dims
else:
if isinstance(arg, int):
arg = [arg]
else:
arg = list(arg)
len_arg = len(arg)
if len_arg == num_spatial_dims + 2:
return arg
# Broadcast to rcorrect dimensions
if len_arg == 1:
arg = arg * num_spatial_dims
elif len_arg != num_spatial_dims:
raise ValueError(
f"{name} should be of length 1, "
f"{num_spatial_dims} or {num_spatial_dims + 2}. "
f"Received: {name}={arg} of length {len_arg}."
)
# Add dimensions for batch and channel
if channel_index == 1:
return [1, 1] + arg
else:
return [1] + arg + [1]
def _reduce_padding(padding, data_format):
if not isinstance(padding, str):
if data_format[1] == "C":
padding = padding[2:]
else:
padding = padding[1:-1]
return padding
def _reduce_strides_dilations(dim, stride, dilations):
if not isinstance(stride, int):
if len(stride) > dim:
stride = stride[1:-1]
if len(stride) == 1 and dim != 1:
stride = stride[0]
if not isinstance(dilations, int):
if len(dilations) > dim:
dilations = dilations[1:-1]
if len(dilations) == 1 and dim != 1:
dilations = dilations[0]
return stride, dilations
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
def atrous_conv2d(value, filters, rate, padding):
return ivy.conv2d(value, filters, 1, padding, dilations=[rate] * 2)
@to_ivy_arrays_and_back
def atrous_conv2d_transpose(value, filters, output_shape, rate, padding):
filters = filters.swapaxes(-2, -1)
return ivy.conv2d_transpose(
value, filters, 1, padding, output_shape=output_shape, dilations=[rate] * 2
)
@to_ivy_arrays_and_back
def avg_pool(input, ksize, strides, padding, data_format="NWC", name=None):
if len(ivy.shape(input)) == 3:
return ivy.avg_pool1d(input, ksize, strides, padding, data_format=data_format)
elif len(ivy.shape(input)) == 4:
return ivy.avg_pool2d(input, ksize, strides, padding, data_format=data_format)
return ivy.avg_pool3d(input, ksize, strides, padding, data_format=data_format)
# avg_pool1d
@to_ivy_arrays_and_back
def avg_pool1d(input, ksize, strides, padding, data_format="NWC", name=None):
return ivy.avg_pool1d(input, ksize, strides, padding, data_format=data_format)
# avg_pool2d
@to_ivy_arrays_and_back
def avg_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None):
return ivy.avg_pool2d(input, ksize, strides, padding, data_format=data_format)
# avg_pool3d
@to_ivy_arrays_and_back
def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
return ivy.avg_pool3d(input, ksize, strides, padding, data_format=data_format)
@to_ivy_arrays_and_back
def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None):
xnormalized, _, _ = ivy.batch_norm(
x,
mean,
variance,
offset=offset,
scale=scale,
eps=variance_epsilon,
)
return xnormalized
@to_ivy_arrays_and_back
def bias_add(value, bias, data_format=None, name=None):
if data_format is None:
data_format = "N...C"
channel_index = data_format.find("C")
if channel_index != 1:
return ivy.add(value, bias)
else:
value = ivy.swapaxes(value, 1, -1)
res = ivy.add(value, bias)
return ivy.swapaxes(res, 1, -1)
@to_ivy_arrays_and_back
def conv1d(
input, filters, stride, padding, data_format="NWC", dilations=None, name=None
):
dilations = 1 if dilations is None else dilations
stride, dilations = _reduce_strides_dilations(1, stride, dilations)
return ivy.conv1d(
input, filters, stride, padding, data_format=data_format, dilations=dilations
)
@to_ivy_arrays_and_back
def conv1d_transpose(
input,
filters,
output_shape,
strides,
padding="SAME",
data_format="NWC",
dilations=None,
name=None,
):
dilations = 1 if dilations is None else dilations
strides, dilations = _reduce_strides_dilations(1, strides, dilations)
return ivy.conv1d_transpose(
input,
filters,
strides,
padding,
output_shape=output_shape,
data_format=data_format,
dilations=dilations,
)
@to_ivy_arrays_and_back
def conv2d(
input, filters, strides, padding, data_format="NHWC", dilations=None, name=None
):
dilations = 1 if dilations is None else dilations
strides, dilations = _reduce_strides_dilations(2, strides, dilations)
padding = _reduce_padding(padding, data_format)
return ivy.conv2d(
input, filters, strides, padding, data_format=data_format, dilations=dilations
)
@to_ivy_arrays_and_back
def conv2d_transpose(
input,
filters,
output_shape,
strides,
padding="SAME",
data_format="NHWC",
dilations=None,
name=None,
):
dilations = 1 if dilations is None else dilations
strides, dilations = _reduce_strides_dilations(2, strides, dilations)
padding = _reduce_padding(padding, data_format)
return ivy.conv2d_transpose(
input,
filters,
strides,
padding,
output_shape=output_shape,
data_format=data_format,
dilations=dilations,
)
@to_ivy_arrays_and_back
def conv3d(
input, filters, strides, padding, data_format="NDHWC", dilations=None, name=None
):
dilations = 1 if dilations is None else dilations
strides, dilations = _reduce_strides_dilations(3, strides, dilations)
return ivy.conv3d(
input, filters, strides, padding, data_format=data_format, dilations=dilations
)
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16",)}, "tensorflow")
@to_ivy_arrays_and_back
def conv3d_transpose(
input,
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
dilations=None,
name=None,
):
dilations = 1 if dilations is None else dilations
strides, dilations = _reduce_strides_dilations(3, strides, dilations)
return ivy.conv3d_transpose(
input,
filters,
strides,
padding,
output_shape=output_shape,
data_format=data_format,
dilations=dilations,
)
@to_ivy_arrays_and_back
def convolution(
input,
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None,
):
num_spatial_dims = input.ndim - 2
if data_format is None or not data_format.startswith("NC"):
data_format = "channel_last"
else:
data_format = "channel_first"
channel_index = -1 if data_format == "channel_last" else 1
input_depth = ivy.shape(input)[channel_index]
filters_depth = ivy.shape(filters)[-2]
feature_group_count = 1
if input_depth != filters_depth:
if input_depth % filters_depth != 0:
raise ValueError(
"input depth must be evenly divisible by filter depth: "
f"{input_depth} vs {filters_depth}"
)
feature_group_count = input_depth // filters_depth
return ivy.conv_general_dilated(
input,
filters,
strides,
padding,
dims=num_spatial_dims,
data_format=data_format,
dilations=dilations,
feature_group_count=feature_group_count,
)
@to_ivy_arrays_and_back
def crelu(features, axis=-1, name=None):
c = ivy.concat([features, -features], axis=axis)
return ivy.relu(c)
# ctc_unique_labels
@to_ivy_arrays_and_back
def ctc_unique_labels(labels, name=None):
ctc_labels = ivy.unique_all(labels, by_value=False)
unique_pad = ivy.pad(
ctc_labels[0], (0, labels.size - ctc_labels[0].size), mode="constant"
)
return unique_pad, ctc_labels[2]
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16",)}, "tensorflow")
@to_ivy_arrays_and_back
def depthwise_conv2d(
input,
filter,
strides,
padding="SAME",
data_format="NHWC",
dilations=None,
name=None,
):
dilations = 1 if dilations is None else dilations
strides, dilations = _reduce_strides_dilations(2, strides, dilations)
fc = filter.shape[-2]
filter = filter.reshape(
[*filter.shape[0:2], 1, filter.shape[-2] * filter.shape[-1]]
)
return ivy.conv_general_dilated(
input,
filter,
strides,
padding,
data_format="channel_last" if data_format[-1] == "C" else "channel_first",
dilations=dilations,
feature_group_count=fc,
)
@to_ivy_arrays_and_back
def dropout(x, rate, noise_shape=None, seed=None, name=None):
return ivy.dropout(x, rate, noise_shape=noise_shape, training=True, seed=seed)
@with_unsupported_dtypes({"2.11.1 and below": ("complex",)}, "tensorflow")
@to_ivy_arrays_and_back
def embedding_lookup(params, ids, max_norm=None, name=None):
return ivy.embedding(params, ids, max_norm=max_norm)
@to_ivy_arrays_and_back
def gelu(features, approximate=False, name=None):
return ivy.gelu(features, approximate=approximate)
@with_unsupported_dtypes({"2.15.0 and below": "float16"}, "tensorflow")
@to_ivy_arrays_and_back
def leaky_relu(features, alpha=0.2, name=None):
return ivy.leaky_relu(features, alpha=alpha)
@with_supported_dtypes({"2.15.0 and below": ("float32", "float16")}, "tensorflow")
@to_ivy_arrays_and_back
def local_response_normalization(
input, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5, name=None
):
return ivy.local_response_norm(
input, 2 * depth_radius + 1, bias=bias, alpha=alpha, beta=beta
)
@to_ivy_arrays_and_back
def log_poisson_loss(targets, log_input, compute_full_loss=False, name=None):
return ivy.log_poisson_loss(targets, log_input, compute_full_loss=compute_full_loss)
@to_ivy_arrays_and_back
def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None):
return ivy.max_pool1d(input, ksize, strides, padding, data_format=data_format)
@to_ivy_arrays_and_back
def max_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None):
return ivy.max_pool2d(input, ksize, strides, padding, data_format=data_format)
@with_supported_dtypes({"2.15.0 and below": ("float32",)}, "tensorflow")
@to_ivy_arrays_and_back
def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
return ivy.max_pool3d(input, ksize, strides, padding, data_format=data_format)
@to_ivy_arrays_and_back
def moments(x, axes, shift=None, keepdims=False, name=None):
return ivy.mean(x, axis=ivy.to_list(axes), keepdims=keepdims), ivy.var(
x, axis=ivy.to_list(axes), keepdims=keepdims
)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"int8",
"int16",
"int32",
"int64",
"bool",
)
},
"tensorflow",
)
@to_ivy_arrays_and_back
def normalize_moments(counts, mean_ss, variance_ss, shift=None, name=None):
divisor = ivy.reciprocal(counts)
if shift is not None:
shifted_mean = ivy.multiply(mean_ss, divisor)
mean = ivy.add(shifted_mean, shift)
else:
shifted_mean = ivy.multiply(mean_ss, divisor)
mean = shifted_mean
variance = ivy.subtract(
ivy.multiply(variance_ss, divisor), ivy.square(shifted_mean)
)
return mean, variance
# pool
@to_ivy_arrays_and_back
def pool(
input,
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None,
):
return ivy.pool(
input,
window_shape,
pooling_type,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, "tensorflow")
@to_ivy_arrays_and_back
def relu(features, name=None):
return ivy.relu(features)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, "tensorflow")
@to_ivy_arrays_and_back
def relu6(features, name=None):
return ivy.relu6(features)
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16",)}, "tensorflow")
@to_ivy_arrays_and_back
def separable_conv2d(
input,
depthwise_filter,
pointwise_filter,
strides,
padding,
data_format=None,
dilations=None,
name=None,
):
dilations = 1 if dilations is None else dilations
strides, dilations = _reduce_strides_dilations(2, strides, dilations)
ret = depthwise_conv2d(
input,
depthwise_filter,
strides=strides,
padding=padding,
dilations=dilations,
data_format=data_format,
)
return conv2d(ret, pointwise_filter, 1, "SAME", data_format=data_format)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"int8",
"int16",
"int32",
"int64",
"bool",
)
},
"tensorflow",
)
@to_ivy_arrays_and_back
def sigmoid_cross_entropy_with_logits(labels=None, logits=None, name=None):
ivy.utils.assertions.check_shape(labels, logits)
zeros = ivy.zeros_like(logits)
max_logits = ivy.where(logits >= zeros, logits, zeros)
neg_abs_logits = ivy.negative(ivy.abs(logits))
neg_multiple = ivy.negative(ivy.multiply(logits, labels))
ret_val = ivy.add(max_logits, neg_multiple)
return ivy.add(ret_val, ivy.log1p(ivy.exp(neg_abs_logits)))
@to_ivy_arrays_and_back
def silu(features, beta: float = 1.0):
beta = ivy.astype(ivy.array(beta), ivy.dtype(features))
return ivy.multiply(features, ivy.sigmoid(ivy.multiply(beta, features)))
@with_unsupported_dtypes({"2.15.0 and below": ("float16",)}, "tensorflow")
@to_ivy_arrays_and_back
def softmax(logits, axis=None, name=None):
return ivy.softmax(logits, axis=axis)
# Softsign
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"int8",
"int16",
"int32",
"int64",
)
},
"tensorflow",
)
@to_ivy_arrays_and_back
def softsign(x, name=None):
return ivy.softsign(x)
# sufficient_statistics
@to_ivy_arrays_and_back
def sufficient_statistics(x, axes, shift=None, keepdims=False, name=None):
count = 1
shape = ivy.shape(x)
axes = list(set(axes))
for a in axes:
if ivy.to_scalar(a) < 0:
index = x.ndim + ivy.to_scalar(a)
else:
index = ivy.to_scalar(a)
count *= shape[index]
count = ivy.array(count, dtype=ivy.dtype(x))
if shift is None:
sum_of_elements = ivy.sum(x, axis=axes, keepdims=keepdims)
sum_of_squares = ivy.sum(ivy.square(x), axis=axes, keepdims=keepdims)
else:
sum_of_elements = ivy.sum(
(ivy.subtract(x, shift)), axis=axes, keepdims=keepdims
)
sum_of_squares = ivy.sum(
(ivy.square(ivy.subtract(x, shift))), axis=axes, keepdims=keepdims
)
if shift.ndim == 0:
ivy.reshape(shift, ())
if count.ndim == 0:
ivy.reshape(count, ())
if sum_of_elements.ndim == 0:
ivy.reshape(sum_of_elements, ())
if sum_of_squares.ndim == 0:
ivy.reshape(sum_of_squares, ())
return count, sum_of_elements, sum_of_squares, shift
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"int8",
"int16",
"int32",
"int64",
"bool",
)
},
"tensorflow",
)
@to_ivy_arrays_and_back
def weighted_cross_entropy_with_logits(
labels=None, logits=None, pos_weight=1.0, name=None
):
ivy.utils.assertions.check_shape(labels, logits)
ones = ivy.ones_like(labels)
zeros = ivy.zeros_like(logits)
log_weight = ivy.add(ones, ivy.multiply(pos_weight - 1, labels))
ones_minus_labels = ivy.subtract(ones, labels)
first_term = ivy.multiply(ones_minus_labels, logits)
max_neg_logits = ivy.where(
ivy.negative(logits) >= zeros, ivy.negative(logits), zeros
)
neg_abs_logits = ivy.negative(ivy.abs(logits))
log_neg_abs_logits = ivy.log1p(ivy.exp(neg_abs_logits))
second_term = ivy.multiply(log_weight, ivy.add(log_neg_abs_logits, max_neg_logits))
return ivy.add(first_term, second_term)
# weighted_moments
@to_ivy_arrays_and_back
def weighted_moments(x, axes, frequency_weights, keepdims=False, name=None):
fw_x_prod = frequency_weights * x
fw_x_prod = ivy.array(fw_x_prod)
weighted_input_sum = ivy.sum(fw_x_prod, axis=axes, keepdims=True).astype(
fw_x_prod.dtype
)
broadcasted_weights = frequency_weights + ivy.zeros_like(x)
broadcasted_weights = ivy.array(broadcasted_weights)
sum_of_weights = ivy.sum(broadcasted_weights, axis=axes, keepdims=True).astype(
broadcasted_weights.dtype
)
divisor = ivy.reciprocal(sum_of_weights)
weighted_input_sum, divisor = check_tensorflow_casting(weighted_input_sum, divisor)
weighted_mean = ivy.multiply(weighted_input_sum, divisor)
x, weighted_mean = check_tensorflow_casting(x, weighted_mean)
squared_difference = ivy.square(ivy.subtract(x, weighted_mean))
if isinstance(squared_difference, complex):
squared_difference = squared_difference.real - squared_difference.imag * 1j
fw_sq_diff_prod = frequency_weights * squared_difference
fw_sq_diff_prod = ivy.array(fw_sq_diff_prod)
weighted_distsq = ivy.sum(fw_sq_diff_prod, axis=axes, keepdims=True).astype(
fw_sq_diff_prod.dtype
)
weighted_distsq, divisor = check_tensorflow_casting(weighted_distsq, divisor)
weighted_variance = ivy.multiply(weighted_distsq, divisor)
if not keepdims:
weighted_mean = ivy.squeeze(weighted_mean, axis=axes)
weighted_variance = ivy.squeeze(weighted_variance, axis=axes)
return weighted_mean, weighted_variance
swish = silu
| ivy/ivy/functional/frontends/tensorflow/nn.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/nn.py",
"repo_id": "ivy",
"token_count": 8359
} | 43 |
# local
import ivy
from ivy.functional.frontends.torch.func_wrapper import (
to_ivy_arrays_and_back,
to_ivy_shape,
)
from ivy.func_wrapper import (
with_unsupported_dtypes,
with_supported_dtypes,
)
import ivy.functional.frontends.torch as torch_frontend
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def arange(
start=0,
end=None,
step=1,
*,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False,
):
return ivy.arange(start, end, step, dtype=dtype, device=device, out=out)
@to_ivy_arrays_and_back
def as_strided(input, size, stride, storage_offset=None):
ind = ivy.array([0], dtype=ivy.int64)
for i, (size_i, stride_i) in enumerate(zip(size, stride)):
r_size = [1] * len(stride)
r_size[i] = -1
ind = ind + ivy.reshape(ivy.arange(size_i), r_size) * stride_i
if storage_offset:
ind = ind + storage_offset
# in case the input is a non-contiguous native array,
# the return will differ from torch.as_strided
if ivy.is_ivy_array(input) and input.base is not None:
return ivy.gather(ivy.flatten(input.base), ind)
return ivy.gather(ivy.flatten(input), ind)
@to_ivy_arrays_and_back
def as_tensor(
data,
*,
dtype=None,
device=None,
):
if dtype is None:
if isinstance(data, int):
dtype = ivy.int64
elif isinstance(data, float):
dtype = torch_frontend.get_default_dtype()
elif isinstance(data, (list, tuple)):
if all(isinstance(d, int) for d in data):
dtype = ivy.int64
else:
dtype = torch_frontend.get_default_dtype()
return ivy.asarray(data, dtype=dtype, device=device)
@to_ivy_arrays_and_back
def asarray(
obj,
*,
dtype=None,
device=None,
copy=None,
):
return ivy.asarray(obj, copy=copy, dtype=dtype, device=device)
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, "torch")
@to_ivy_arrays_and_back
def complex(
real,
imag,
*,
out=None,
):
assert real.dtype == imag.dtype, TypeError(
"Expected real and imag to have the same dtype, "
f" but got real.dtype = {real.dtype} and imag.dtype = {imag.dtype}."
)
complex_dtype = ivy.complex64 if real.dtype != ivy.float64 else ivy.complex128
complex_array = real + imag * 1j
return complex_array.astype(complex_dtype, out=out)
@to_ivy_arrays_and_back
def empty(
*args,
size=None,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False,
pin_memory=False,
memory_format=None,
):
if args and size:
raise TypeError("empty() got multiple values for argument 'shape'")
if size is None:
size = (
args[0]
if isinstance(args[0], (tuple, list, ivy.Shape, ivy.NativeShape))
else args
)
if isinstance(size, (tuple, list)):
size = tuple(s.to_scalar() if ivy.is_array(s) else s for s in size)
return ivy.empty(shape=size, dtype=dtype, device=device, out=out)
@to_ivy_arrays_and_back
def empty_like(
input,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False,
memory_format=None,
):
ret = ivy.empty_like(input, dtype=dtype, device=device)
return ret
@to_ivy_arrays_and_back
def empty_strided(
size,
stride,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False,
pin_memory=False,
):
max_offsets = [(s - 1) * st for s, st in zip(size, stride)]
items = sum(max_offsets) + 1
empty_array = empty(items, dtype=dtype, device=device)
strided_array = as_strided(empty_array, size, stride)
return strided_array
@to_ivy_arrays_and_back
def eye(
n, m=None, *, out=None, dtype=None, layout=None, device=None, requires_grad=False
):
return ivy.eye(n, m, dtype=dtype, device=device, out=out)
@to_ivy_arrays_and_back
def from_dlpack(ext_tensor):
return ivy.from_dlpack(ext_tensor)
@to_ivy_arrays_and_back
def from_numpy(data, /):
return ivy.asarray(data, dtype=ivy.dtype(data))
@to_ivy_arrays_and_back
def frombuffer(
buffer,
*,
dtype,
count=-1,
offset=0,
requires_grad=False,
):
return ivy.frombuffer(buffer, dtype=dtype, count=count, offset=offset)
@to_ivy_arrays_and_back
def full(
size,
fill_value,
*,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=None,
):
ret = ivy.full(size, fill_value, dtype=dtype, device=device, out=out)
return ret
@to_ivy_arrays_and_back
def full_like(
input,
fill_value,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False,
memory_format=None,
):
fill_value = ivy.to_scalar(fill_value)
return ivy.full_like(input, fill_value, dtype=dtype, device=device)
@to_ivy_arrays_and_back
def heaviside(input, values, *, out=None):
return ivy.heaviside(input, values, out=out)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def linspace(
start,
end,
steps,
*,
out=None,
dtype=None,
device=None,
layout=None,
requires_grad=False,
):
dtype = torch_frontend.get_default_dtype() if dtype is None else dtype
return ivy.linspace(start, end, num=steps, dtype=dtype, device=device, out=out)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def logspace(
start,
end,
steps,
*,
base=10.0,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False,
):
ret = ivy.logspace(
start, end, num=steps, base=base, dtype=dtype, device=device, out=out
)
return ret
@to_ivy_shape
@to_ivy_arrays_and_back
def ones(*args, size=None, out=None, dtype=None, device=None, requires_grad=False):
if args and size:
raise TypeError("ones() got multiple values for argument 'shape'")
if size is None:
size = (
args[0]
if isinstance(args[0], (tuple, list, ivy.Shape, ivy.NativeShape))
else args
)
return ivy.ones(shape=size, dtype=dtype, device=device, out=out)
@to_ivy_arrays_and_back
def ones_like_v_0p3p0_to_0p3p1(input, out=None):
return ivy.ones_like(input, out=None)
@to_ivy_arrays_and_back
def ones_like_v_0p4p0_and_above(
input,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False,
memory_format=None,
):
ret = ivy.ones_like(input, dtype=dtype, device=device)
return ret
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, "torch")
@to_ivy_arrays_and_back
def polar(
abs,
angle,
*,
out=None,
):
return complex(abs * angle.cos(), abs * angle.sin(), out=out)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def range(
*args,
dtype=None,
layout=None,
device=None,
requires_grad=False,
):
if len(args) == 1:
end = args[0]
start = 0
step = 1
elif len(args) == 2:
end = args[1]
start = args[0]
step = 1
elif len(args) == 3:
start, end, step = args
else:
ivy.utils.assertions.check_true(
len(args) == 1 or len(args) == 3,
"only 1 or 3 positional arguments are supported",
)
range_vec = []
elem = start
while 1:
range_vec = range_vec + [elem]
elem += step
if start == end:
break
if start < end:
if elem > end:
break
else:
if elem < end:
break
return ivy.array(range_vec, dtype=dtype, device=device)
@to_ivy_arrays_and_back
def tensor(
data,
*,
dtype=None,
device=None,
requires_grad=False,
pin_memory=False,
):
return ivy.array(data, dtype=dtype, device=device)
@to_ivy_shape
@to_ivy_arrays_and_back
def zeros(*args, size=None, out=None, dtype=None, device=None, requires_grad=False):
if args and size:
raise TypeError("zeros() got multiple values for argument 'shape'")
if size is None:
size = (
args[0]
if isinstance(args[0], (tuple, list, ivy.Shape, ivy.NativeShape))
else args
)
return ivy.zeros(shape=size, dtype=dtype, device=device, out=out)
@to_ivy_arrays_and_back
def zeros_like(
input,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False,
memory_format=None,
):
ret = ivy.zeros_like(input, dtype=dtype, device=device)
return ret
| ivy/ivy/functional/frontends/torch/creation_ops.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/creation_ops.py",
"repo_id": "ivy",
"token_count": 4064
} | 44 |
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"2.2 and below": (
"complex",
"float16",
)
},
"torch",
)
def celu(input, alpha=1.0, inplace=False):
return ivy.celu(input, alpha=alpha)
def celu_(input, alpha=1.0):
return celu(input, alpha=alpha, inplace=True)
@to_ivy_arrays_and_back
def elu(input, alpha=1.0, inplace=False):
prod = ivy.multiply(
alpha,
ivy.subtract(ivy.exp(input), 1),
)
return ivy.where(ivy.greater(input, 0), input, prod)
def elu_(input, alpha=1.0):
return elu(input, alpha=alpha, inplace=True)
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
"torch",
)
def gelu(input, *, approximate="none"):
if approximate == "none":
return ivy.gelu(input, approximate=False)
elif approximate == "tanh":
return ivy.gelu(input, approximate=True)
else:
raise ivy.utils.exceptions.IvyException(
"`approximate` argument must be either 'none' or 'tanh'."
)
@to_ivy_arrays_and_back
def glu(input, dim=-1):
a, b = ivy.split(input, num_or_size_splits=2, axis=dim)
return ivy.multiply(a, ivy.sigmoid(b))
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10, dim=-1):
gumbels = -ivy.empty_like(logits).exponential().log()
gumbels = (logits + gumbels) / tau
y_soft = ivy.softmax(gumbels, axis=dim)
if hard:
indices = y_soft.max(axis=dim, keepdims=True)[1]
y_hard = ivy.zeros_like(logits)
updates = ivy.ones_like(indices)
y_hard = ivy.scatter_nd(indices, updates, reduction="replace", out=y_hard)
ret = y_hard - y_soft.stop_gradient(preserve_type=True) + y_soft
else:
ret = y_soft
return ret
@to_ivy_arrays_and_back
def hardshrink(input, lambd=0.5):
mask = ivy.logical_or(ivy.greater(input, lambd), ivy.less(input, -lambd))
return ivy.where(mask, input, 0.0)
@to_ivy_arrays_and_back
def hardsigmoid(input, inplace=False):
return ivy.divide(ivy.minimum(ivy.maximum(ivy.add(input, 3), 0), 6), 6)
@to_ivy_arrays_and_back
def hardswish(input, inplace=False):
relu6_val = ivy.relu6(ivy.add(input, 3))
return ivy.multiply(input, ivy.divide(relu6_val, 6))
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def hardtanh(input, min_val=-1.0, max_val=1.0, inplace=False):
less = ivy.where(ivy.less(input, min_val), min_val, input)
return ivy.where(ivy.greater(input, max_val), max_val, less).astype(input.dtype)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def hardtanh_(input, min_val=-1.0, max_val=1.0):
return hardtanh(input, min_val=min_val, max_val=max_val, inplace=True)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def leaky_relu(input, negative_slope=0.01, inplace=False):
return ivy.leaky_relu(input, alpha=negative_slope)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def leaky_relu_(input, negative_slope=0.01):
return leaky_relu(input, negative_slope=negative_slope, inplace=True)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.2 and below": ("float",)}, "torch")
def local_response_norm(input, size, alpha=0.0001, beta=0.75, k=1.0):
non_batched = input.ndim == 3
if non_batched:
input = ivy.expand_dims(input, axis=2)
ret = ivy.local_response_norm(
input, size, bias=k, alpha=alpha, beta=beta, average=True, data_format="NCHW"
)
if non_batched:
ret = ivy.squeeze(ret, axis=2)
return ret
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def log_softmax(input, dim=None, _stacklevel=3, dtype=None):
if dtype:
input = ivy.astype(ivy.array(input), ivy.as_ivy_dtype(dtype))
if dim is None:
dim = -1
return ivy.log_softmax(input, axis=dim)
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
"torch",
)
def logsigmoid(input):
return ivy.logsigmoid(input)
@to_ivy_arrays_and_back
def mish(input, inplace=False):
return ivy.multiply(
input,
ivy.tanh(ivy.softplus(input)),
)
@to_ivy_arrays_and_back
def normalize(input, p=2.0, dim=1, eps=1e-12, out=None):
abs_square = ivy.pow(ivy.abs(input), p)
sum_ = ivy.sum(abs_square, axis=dim, keepdims=True)
pnorm_res = ivy.pow(sum_, 1.0 / p)
max_ = ivy.maximum(pnorm_res, eps)
return ivy.divide(input, max_, out=out)
@to_ivy_arrays_and_back
def prelu(input, weight):
return ivy.add(ivy.maximum(0, input), ivy.multiply(weight, ivy.minimum(0, input)))
@to_ivy_arrays_and_back
def relu(input, inplace=False):
return ivy.relu(input)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
def relu6(input, inplace=False):
return ivy.relu6(input)
@to_ivy_arrays_and_back
def relu_(input):
return relu(input, inplace=True)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def rrelu(input, lower=1.0 / 8, upper=1.0 / 3, training=False, inplace=False):
if training:
# alpha = ivy.random_uniform(low=lower, high=upper)
# ToDo implement alpha correctly after fixing ivy.random_uniform
pass
else:
alpha = (lower + upper) / 2
return ivy.subtract(
ivy.relu(input), ivy.multiply(alpha, ivy.relu(ivy.negative(input)))
)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def rrelu_(input, lower=1.0 / 8, upper=1.0 / 3, training=False):
return rrelu(input, lower=lower, upper=upper, training=training, inplace=True)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, "torch")
def scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None
):
return ivy.scaled_dot_product_attention(
query,
key,
value,
scale=scale,
mask=attn_mask,
dropout_p=dropout_p,
is_causal=is_causal,
)
@to_ivy_arrays_and_back
def selu(input, inplace=False):
return ivy.selu(input)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def sigmoid(input):
return ivy.sigmoid(input)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def silu(input, inplace=False):
return ivy.multiply(input, ivy.sigmoid(input))
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def softmax(input, dim=None, _stacklevel=3, dtype=None):
if dtype:
input = ivy.astype(ivy.array(input), ivy.as_ivy_dtype(dtype))
return ivy.softmax(input, axis=dim)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def softmin(input, dim=None, dtype=None):
if dtype:
input = ivy.astype(ivy.array(input), ivy.as_ivy_dtype(dtype))
return ivy.softmax(-input, axis=dim)
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
"torch",
)
def softplus(input, beta=1, threshold=20):
return ivy.softplus(input, beta=beta, threshold=threshold)
@to_ivy_arrays_and_back
def softshrink(input, lambd=0.5):
low = ivy.where(ivy.less(input, -lambd), ivy.add(input, lambd), 0)
up = ivy.where(ivy.greater(input, lambd), ivy.subtract(input, lambd), 0)
return ivy.add(low, up)
@to_ivy_arrays_and_back
def softsign(input):
return ivy.divide(input, ivy.add(1, ivy.abs(input)))
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def tanh(input):
return ivy.tanh(input)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def tanhshrink(input):
return ivy.subtract(input, ivy.tanh(input))
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def threshold(input, threshold, value, inplace=False):
return ivy.where(ivy.greater(input, threshold), input, value)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def threshold_(input, threshold, value):
return threshold(input, threshold, value, inplace=True)
| ivy/ivy/functional/frontends/torch/nn/functional/non_linear_activation_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/functional/non_linear_activation_functions.py",
"repo_id": "ivy",
"token_count": 4032
} | 45 |
import ivy
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_supported_dtypes
import inspect
# --- Helpers --- #
# --------------- #
@to_ivy_arrays_and_back
def _assert(condition, message):
if not condition:
raise Exception(message)
else:
return True
# --- Main --- #
# ------------ #
@with_supported_dtypes({"2.2 and above": ("int64",)}, "torch")
@to_ivy_arrays_and_back
def bincount(x, weights=None, minlength=0):
return ivy.bincount(x, weights=weights, minlength=minlength)
def if_else(cond_fn, body_fn, orelse_fn, vars):
cond_keys = inspect.getfullargspec(cond_fn).args
cond_vars = dict(zip(cond_keys, vars))
return ivy.if_else(cond_fn, body_fn, orelse_fn, cond_vars)
@to_ivy_arrays_and_back
def result_type(tensor, other):
return ivy.result_type(tensor, other)
| ivy/ivy/functional/frontends/torch/utilities.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/utilities.py",
"repo_id": "ivy",
"token_count": 353
} | 46 |
# global
import math
# Array API Standard #
# -------------------#
e = math.e
"""IEEE 754 floating-point representation of Euler's constant."""
pi = math.pi
"""IEEE 754 floating-point representation of the mathematical constant π."""
nan = math.nan
"""IEEE 754 floating-point representation of Not a Number (NaN)."""
inf = math.inf
"""IEEE 754 floating-point representation of (positive) infinity."""
newaxis = None
"""An alias for None which is useful for indexing arrays."""
# Mathematical constants #
# ------#
golden = golden_ratio = (1 + math.sqrt(5)) / 2
quetta = 1e30
ronna = 1e27
yotta = 1e24
zetta = 1e21
exa = 1e18
peta = 1e15
tera = 1e12
giga = 1e9
mega = 1e6
kilo = 1e3
hecto = 1e2
deka = 1e1
deci = 1e-1
centi = 1e-2
milli = 1e-3
micro = 1e-6
nano = 1e-9
pico = 1e-12
femto = 1e-15
atto = 1e-18
zepto = 1e-21
yocto = 1e-24
ronto = 1e-27
quecto = 1e-30
# Binary prefixes #
# ------#
kibi = 2**10
mebi = 2**20
gibi = 2**30
tebi = 2**40
pebi = 2**50
exbi = 2**60
zebi = 2**70
yobi = 2**80
| ivy/ivy/functional/ivy/constants.py/0 | {
"file_path": "ivy/ivy/functional/ivy/constants.py",
"repo_id": "ivy",
"token_count": 420
} | 47 |
# global
import logging
from typing import Union, Optional, Tuple, List, Sequence, Literal
# local
import ivy
from ivy.utils.backend import current_backend
from ivy.func_wrapper import (
to_native_arrays_and_back,
handle_out_argument,
handle_nestable,
handle_array_like_without_promotion,
handle_array_function,
handle_device,
inputs_to_ivy_arrays,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
# Helpers #
# ------- #
def _check_valid_dimension_size(std):
ivy.utils.assertions.check_dimensions(std)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@handle_array_function
def eigh_tridiagonal(
alpha: Union[ivy.Array, ivy.NativeArray],
beta: Union[ivy.Array, ivy.NativeArray],
/,
*,
eigvals_only: bool = True,
select: str = "a",
select_range: Optional[
Union[Tuple[int, int], List[int], ivy.Array, ivy.NativeArray]
] = None,
tol: Optional[float] = None,
) -> Union[ivy.Array, Tuple[ivy.Array, ivy.Array]]:
"""Compute the eigenvalues and eigenvectors of a Hermitian tridiagonal
matrix.
Parameters
----------
alpha
A real or complex array of shape (n), the diagonal elements of the
matrix. If alpha is complex, the imaginary part is ignored
(assumed zero) to satisfy the requirement that the matrix be Hermitian.
beta
A real or complex array of shape (n-1), containing the elements of
the first super-diagonal of the matrix. If beta is complex, the first
sub-diagonal of the matrix is assumed to be the conjugate of beta to
satisfy the requirement that the matrix be Hermitian.
eigvals_only
If False, both eigenvalues and corresponding eigenvectors are
computed. If True, only eigenvalues are computed. Default is True.
select
Optional string with values in {'a', 'v', 'i'} (default is 'a') that
determines which eigenvalues to calculate: 'a': all eigenvalues.
'v': eigenvalues in the interval (min, max] given by select_range.
'i': eigenvalues with indices min <= i <= max.
select_range
Size 2 tuple or list or array specifying the range of eigenvalues to
compute together with select. If select is 'a', select_range is ignored.
tol
Optional scalar. Ignored when backend is not Tensorflow. The absolute
tolerance to which each eigenvalue is required. An eigenvalue
(or cluster) is considered to have converged if it lies in an interval
of this width. If tol is None (default), the value eps*|T|_2 is used
where eps is the machine precision, and |T|_2 is the 2-norm of the matrix T.
Returns
-------
eig_vals
The eigenvalues of the matrix in non-decreasing order.
eig_vectors
If eigvals_only is False the eigenvectors are returned in the second output
argument.
Both the description and the type hints above assumes an array input for
simplicity, but this function is *nestable*, and therefore also accepts
:class:`ivy.Container` instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> alpha = ivy.array([0., 1., 2.])
>>> beta = ivy.array([0., 1.])
>>> y = ivy.eigh_tridiagonal(alpha, beta)
>>> print(y)
ivy.array([0., 0.38196602, 2.61803389])
>>> alpha = ivy.array([0., 1., 2.])
>>> beta = ivy.array([0., 1.])
>>> y = ivy.eigh_tridiagonal(alpha,
... beta, select='v',
... select_range=[0.2,3.0])
>>> print(y)
ivy.array([0.38196602, 2.61803389])
>>> alpha = ivy.array([0., 1., 2., 3.])
>>> beta = ivy.array([2., 1., 2.])
>>> y = ivy.eigh_tridiagonal(alpha,
... beta,
... eigvals_only=False,
... select='i',
... select_range=[1,2],
... tol=1.)
>>> print(y)
(ivy.array([0.38196602, 2.61803389]), ivy.array([[ 0.35048741, -0.56710052],
[ 0.06693714, -0.74234426],
[-0.74234426, -0.06693714],
[ 0.56710052, 0.35048741]]))
With :class:`ivy.Container` input:
>>> alpha = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([2., 2., 2.]))
>>> beta = ivy.array([0.,2.])
>>> y = ivy.eigh_tridiagonal(alpha, beta)
>>> print(y)
{
a: ivy.array([-0.56155282, 0., 3.56155276]),
b: ivy.array([0., 2., 4.])
}
>>> alpha = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([2., 2., 2.]))
>>> beta = ivy.Container(a=ivy.array([0.,2.]), b=ivy.array([2.,2.]))
>>> y = ivy.eigh_tridiagonal(alpha, beta)
>>> print(y)
{
a: ivy.array([-0.56155282, 0., 3.56155276]),
b: ivy.array([-0.82842714, 2., 4.82842731])
}
"""
x = ivy.diag(alpha)
y = ivy.diag(beta, k=1)
z = ivy.diag(beta, k=-1)
w = x + y + z
eigh_out = ivy.linalg.eigh(w)
eigenvalues = eigh_out.eigenvalues
eigenvectors = eigh_out.eigenvectors
if select == "i":
eigenvalues = eigenvalues[select_range[0] : select_range[1] + 1]
eigenvectors = eigenvectors[:, select_range[0] : select_range[1] + 1]
elif select == "v":
condition = ivy.logical_and(
eigenvalues.greater(select_range[0]),
eigenvalues.less_equal(select_range[1]),
)
eigenvalues = eigenvalues[condition]
eigenvectors = eigenvectors[:, condition]
if eigvals_only:
return eigenvalues
return eigenvalues, eigenvectors
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def diagflat(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
offset: int = 0,
padding_value: float = 0,
align: str = "RIGHT_LEFT",
num_rows: int = -1,
num_cols: int = -1,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> ivy.Array:
"""Return a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
x
Input data, which is flattened and set as the k-th diagonal of the output.
k
Diagonal to set.
Positive value means superdiagonal,
0 refers to the main diagonal,
and negative value means subdiagonal.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The 2-D output array.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([[1,2], [3,4]])
>>> ivy.diagflat(x)
ivy.array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> x = ivy.array([1,2])
>>> ivy.diagflat(x, k=1)
ivy.array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
return current_backend(x).diagflat(
x,
offset=offset,
padding_value=padding_value,
align=align,
num_rows=num_rows,
num_cols=num_cols,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def kron(
a: Union[ivy.Array, ivy.NativeArray],
b: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a
First input array.
b
Second input array
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Array representing the Kronecker product of the input arrays.
Examples
--------
>>> a = ivy.array([1,2])
>>> b = ivy.array([3,4])
>>> ivy.kron(a, b)
ivy.array([3, 4, 6, 8])
"""
return current_backend(a, b).kron(a, b, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def matrix_exp(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the matrix exponential of a square matrix.
Parameters
----------
a
Square matrix.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
the matrix exponential of the input.
Examples
--------
>>> x = ivy.array([[[1., 0.],
[0., 1.]],
[[2., 0.],
[0., 2.]]])
>>> ivy.matrix_exp(x)
ivy.array([[[2.7183, 1.0000],
[1.0000, 2.7183]],
[[7.3891, 1.0000],
[1.0000, 7.3891]]])
"""
return current_backend(x).matrix_exp(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_device
def eig(
x: Union[ivy.Array, ivy.NativeArray],
/,
) -> Tuple[ivy.Array]:
"""Compute eigenvalies and eigenvectors of x. Returns a tuple with two
elements: first is the set of eigenvalues, second is the set of
eigenvectors.
Parameters
----------
x
An array of shape (..., N, N).
Returns
-------
w
Not necessarily ordered array(..., N) of eigenvalues in complex type.
v
An array(..., N, N) of normalized (unit “length”) eigenvectors,
the column v[:,i] is the eigenvector corresponding to the eigenvalue w[i].
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([[1,2], [3,4]])
>>> w, v = ivy.eig(x)
>>> w; v
ivy.array([-0.37228132+0.j, 5.37228132+0.j])
ivy.array([[-0.82456484+0.j, -0.41597356+0.j],
[ 0.56576746+0.j, -0.90937671+0.j]])
>>> x = ivy.array([[[1,2], [3,4]], [[5,6], [5,6]]])
>>> w, v = ivy.eig(x)
>>> w; v
ivy.array(
[
[-3.72281323e-01+0.j, 5.37228132e+00+0.j],
[3.88578059e-16+0.j, 1.10000000e+01+0.j]
]
)
ivy.array([
[
[-0.82456484+0.j, -0.41597356+0.j], [0.56576746+0.j, -0.90937671+0.j]
],
[
[-0.76822128+0.j, -0.70710678+0.j], [0.6401844 +0.j, -0.70710678+0.j]
]
])
"""
return current_backend(x).eig(x)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_device
def eigvals(
x: Union[ivy.Array, ivy.NativeArray],
/,
) -> ivy.Array:
"""Compute eigenvalues of x. Returns a set of eigenvalues.
Parameters
----------
x
An array of shape (..., N, N).
Returns
-------
w
Not necessarily ordered array(..., N) of eigenvalues in complex type.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([[1,2], [3,4]])
>>> w = ivy.eigvals(x)
>>> w
ivy.array([-0.37228132+0.j, 5.37228132+0.j])
>>> x = ivy.array([[[1,2], [3,4]], [[5,6], [5,6]]])
>>> w = ivy.eigvals(x)
>>> w
ivy.array(
[
[-0.37228132+0.j, 5.37228132+0.j],
[ 0. +0.j, 11. +0.j]
]
)
"""
return current_backend(x).eigvals(x)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def adjoint(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the complex conjugate transpose of x.
Parameters
----------
x
An array with more than one dimension.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
the complex conjugate transpose of the input.
Examples
--------
>>> x = np.array([[1.-1.j, 2.+2.j],
[3.+3.j, 4.-4.j]])
>>> x = ivy.array(x)
>>> ivy.adjoint(x)
ivy.array([[1.+1.j, 3.-3.j],
[2.-2.j, 4.+4.j]])
"""
return current_backend(x).adjoint(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def lu_factor(
A: Union[ivy.Array, ivy.NativeArray],
/,
*,
pivot: bool = True,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Tuple[Union[ivy.Array, ivy.NativeArray], Union[ivy.Array, ivy.NativeArray]]:
"""
Parameters
----------
A
tensor of shape (*, m, n) where * is zero or more batch dimensions.
pivot
Whether to compute the LU decomposition with partial pivoting, or the regular LU
decomposition. pivot = False not supported on CPU. Default: True.
out
tuple of two tensors to write the output to. Ignored if None. Default: None.
Returns
-------
ret
A named tuple (LU, pivots).
"""
return current_backend(A).lu_factor(A, pivot=pivot, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def lu_solve(
lu: Union[ivy.Array, ivy.NativeArray],
p: Union[ivy.Array, ivy.NativeArray],
b: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
return current_backend(lu, p, b).lu_solve(lu, p, b, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def solve_triangular(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
upper: bool = True,
adjoint: bool = False,
unit_diagonal: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the unique solution to the triangular system of linear equations
AX = B.
Parameters
----------
x1
Triangular coefficient array A of shape (..., N, N), with no zeros on diagonal.
x2
Right-hand side array B of shape (..., N, K).
upper
Whether the input `x1` is upper triangular.
adjoint
Whether to take the adjoint (conjugate transpose) of `x1` as the matrix A.
unit_diagonal
Whether to ignore the diagonal entries of A and assume them all equal to 1.
out
Optional output array. If provided, the output array to store the result.
Returns
-------
ret
The solution X, which has the same shape as B.
Examples
--------
With :class:`ivy.Array` inputs:
>>> a = ivy.array([[3, 0, 0, 0],
... [2, 1, 0, 0],
... [1, 0, 1, 0],
... [1, 1, 1, 1]], dtype=ivy.float32)
>>> b = ivy.array([[4],
... [2],
... [4],
... [2]], dtype=ivy.float32)
>>> x = ivy.solve_triangular(a, b, upper=False)
>>> ivy.matmul(a, x)
ivy.array([[4.],
[2.],
[4.],
[2.]])
"""
return current_backend(x1, x2).solve_triangular(
x1, x2, upper=upper, adjoint=adjoint, unit_diagonal=unit_diagonal, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
def multi_dot(
x: Sequence[Union[ivy.Array, ivy.NativeArray]],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the dot product of two or more matrices in a single function
call, while selecting the fastest evaluation order.
Parameters
----------
x
sequence of matrices to multiply.
out
optional output array, for writing the result to. It must have a valid
shape, i.e. the resulting shape after applying regular matrix multiplication
to the inputs.
Returns
-------
ret
dot product of the arrays.
Examples
--------
With :class:`ivy.Array` input:
>>> A = ivy.arange(2 * 3).reshape((2, 3))
>>> B = ivy.arange(3 * 2).reshape((3, 2))
>>> C = ivy.arange(2 * 2).reshape((2, 2))
>>> ivy.multi_dot((A, B, C))
ivy.array([[ 26, 49],
[ 80, 148]])
>>> A = ivy.arange(2 * 3).reshape((2, 3))
>>> B = ivy.arange(3 * 2).reshape((3, 2))
>>> C = ivy.arange(2 * 2).reshape((2, 2))
>>> D = ivy.zeros((2, 2))
>>> ivy.multi_dot((A, B, C), out=D)
>>> print(D)
ivy.array([[ 26, 49],
[ 80, 148]])
"""
return current_backend(x).multi_dot(x, out=out)
multi_dot.mixed_backend_wrappers = {
"to_add": ("handle_device",),
"to_skip": (),
}
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def cond(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
p: Optional[Union[int, float, str]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the condition number of x.
Parameters
----------
x
An array with more than one dimension.
p
The order of the norm of the matrix (see :func:`ivy.norm` for details).
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
the condition number of the input.
Examples
--------
>>> x = ivy.array([[1., 2.],
... [3., 4.]])
>>> ivy.cond(x)
ivy.array(14.933034)
>>> x = ivy.array([[1., 2.],
... [3., 4.]])
>>> ivy.cond(x, p=ivy.inf)
ivy.array(21.0)
"""
return current_backend(x).cond(x, p=p, out=out)
# This code has been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/tenalg/core_tenalg/_kronecker.py
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def kronecker(
x: Sequence[Union[ivy.Array, ivy.NativeArray]],
skip_matrix: Optional[int] = None,
reverse: Optional[bool] = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Kronecker product of a list of matrices.
Parameters
----------
x
Sequence of matrices
skip_matrix
if not None, index of a matrix to skip
reverse
if True, the order of the matrices is reversed
Returns
-------
kronecker_product: matrix of shape ``(prod(n_rows), prod(n_columns)``
where ``prod(n_rows) = prod([m.shape[0] for m in matrices])``
and ``prod(n_columns) = prod([m.shape[1] for m in matrices])``
"""
if skip_matrix is not None:
x = [x[i] for i in range(len(x)) if i != skip_matrix]
if reverse:
order = -1
else:
order = 1
for i, matrix in enumerate(x[::order]):
if not i:
res = matrix
else:
res = ivy.kron(res, matrix, out=out)
return res
# The code has been adapted from tensorly.khatri_rao
# https://github.com/tensorly/tensorly/blob/main/tensorly/tenalg/core_tenalg/_khatri_rao.py#L9
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def khatri_rao(
x: Sequence[Union[ivy.Array, ivy.NativeArray]],
weights: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
skip_matrix: Optional[Sequence[int]] = None,
mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Khatri-Rao product of a sequence of matrices.
This can be seen as a column-wise kronecker product.
If one matrix only is given, that matrix is directly returned.
Parameters
----------
x
Sequence of tensors with the same number of columns, i.e.::
for i in len(x):
x[i].shape = (n_i, m)
weights
array of weights for each rank, of length m, the number of column of the factors
(i.e. m == factor[i].shape[1] for any factor)
skip_matrix
if not None, index of a matrix to skip
mask
array of 1s and 0s of length m
out
optional output array, for writing the result to. It must have a shape that the
result can broadcast to.
Returns
-------
khatri_rao_product: ivy.Array of shape ``(prod(n_i), m)``
where ``prod(n_i) = prod([m.shape[0] for m in input])``
i.e. the product of the number of rows of all the input in the product.
"""
if skip_matrix is not None:
x = [x[i] for i in range(len(x)) if i != skip_matrix]
# Khatri-rao of only one matrix: just return that matrix
if len(x) == 1:
if ivy.exists(out):
return ivy.inplace_update(out, x[0])
return x[0]
if len(x[0].shape) == 2:
n_columns = x[0].shape[1]
else:
n_columns = 1
x = [ivy.reshape(m, (-1, 1)) for m in x]
logging.warning(
"Khatri-rao of a series of vectors instead of input. "
"Considering each as a matrix with 1 column."
)
# Testing whether the input have the proper size
for i, matrix in enumerate(x):
if len(matrix.shape) != 2:
raise ValueError(
"All the input must have exactly 2 dimensions!"
f"Matrix {i} has dimension {len(matrix.shape)} != 2."
)
if matrix.shape[1] != n_columns:
raise ValueError(
"All input must have same number of columns!"
f"Matrix {i} has {matrix.shape[1]} columns != {n_columns}."
)
for i, e in enumerate(x[1:]):
if not i:
if weights is None:
res = x[0]
else:
res = x[0] * ivy.reshape(weights, (1, -1))
s1, s2 = ivy.shape(res)
s3, s4 = ivy.shape(e)
a = ivy.reshape(res, (s1, 1, s2))
b = ivy.reshape(e, (1, s3, s4))
res = ivy.reshape(a * b, (-1, n_columns))
m = ivy.reshape(mask, (1, -1)) if mask is not None else 1
res = res * m
if ivy.exists(out):
return ivy.inplace_update(out, res)
return res
# The following code has been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/tenalg/core_tenalg/n_mode_product.py#L5
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def mode_dot(
x: Union[ivy.Array, ivy.NativeArray],
/,
matrix_or_vector: Union[ivy.Array, ivy.NativeArray],
mode: int,
transpose: Optional[bool] = False,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""N-mode product of a tensor and a matrix or vector at the specified mode.
Parameters
----------
x
tensor of shape ``(i_1, ..., i_k, ..., i_N)``
matrix_or_vector
1D or 2D array of shape ``(J, i_k)`` or ``(i_k, )``
matrix or vectors to which to n-mode multiply the tensor
mode
int in the range(1, N)
transpose
If True, the matrix is transposed.
For complex tensors, the conjugate transpose is used.
out
optional output array, for writing the result to. It must have a shape that the
result can broadcast to.
Returns
-------
ivy.Array
`mode`-mode product of `tensor` by `matrix_or_vector`
* of shape :math:`(i_1, ..., i_{k-1}, J, i_{k+1}, ..., i_N)`
if matrix_or_vector is a matrix
* of shape :math:`(i_1, ..., i_{k-1}, i_{k+1}, ..., i_N)`
if matrix_or_vector is a vector
"""
# the mode along which to fold might decrease if we take product with a vector
fold_mode = mode
new_shape = list(x.shape)
ndims = len(matrix_or_vector.shape)
if ndims == 2: # Tensor times matrix
# Test for the validity of the operation
dim = 0 if transpose else 1
if matrix_or_vector.shape[dim] != x.shape[mode]:
raise ValueError(
f"shapes {x.shape} and {matrix_or_vector.shape} not aligned in"
f" mode-{mode} multiplication: {x.shape[mode]} (mode {mode}) !="
f" {matrix_or_vector.shape[dim]} (dim 1 of matrix)"
)
if transpose:
matrix_or_vector = ivy.conj(ivy.permute_dims(matrix_or_vector, (1, 0)))
new_shape[mode] = matrix_or_vector.shape[0]
vec = False
elif ndims == 1: # Tensor times vector
if matrix_or_vector.shape[0] != x.shape[mode]:
raise ValueError(
f"shapes {x.shape} and {matrix_or_vector.shape} not aligned for"
f" mode-{mode} multiplication: {x.shape[mode]} (mode {mode}) !="
f" {matrix_or_vector.shape[0]} (vector size)"
)
if len(new_shape) > 1:
new_shape.pop(mode)
else:
new_shape = []
vec = True
else:
raise ValueError(
"Can only take n_mode_product with a vector or a matrix."
f"Provided array of dimension {ndims} not in [1, 2]."
)
res = ivy.matmul(matrix_or_vector, ivy.unfold(x, mode))
if vec: # We contracted with a vector, leading to a vector
return ivy.reshape(res, new_shape, out=out)
else: # tensor times vec: refold the unfolding
return ivy.fold(res, fold_mode, new_shape, out=out)
# The following code has been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/tenalg/core_tenalg/n_mode_product.py#L81
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def multi_mode_dot(
x: Union[ivy.Array, ivy.NativeArray],
mat_or_vec_list: Sequence[Union[ivy.Array, ivy.NativeArray]],
/,
modes: Optional[Sequence[int]] = None,
skip: Optional[Sequence[int]] = None,
transpose: Optional[bool] = False,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
r"""N-mode product of a tensor and several matrices or vectors over several
modes.
Parameters
----------
x
the input tensor
mat_or_vec_list
sequence of matrices or vectors of length ``tensor.ndim``
skip
None or int, optional, default is None
If not None, index of a matrix to skip.
modes
None or int list, optional, default is None
transpose
If True, the matrices or vectors in in the list are transposed.
For complex tensors, the conjugate transpose is used.
out
optional output array, for writing the result to. It must have a shape that the
result can broadcast to.
Returns
-------
ivy.Array
tensor times each matrix or vector in the list at mode `mode`
Notes
-----
If no modes are specified, just assumes there is one matrix or vector per mode and returns:
:math:`\\text{x }\\times_0 \\text{ matrix or vec list[0] }\\times_1 \\cdots \\times_n \\text{ matrix or vec list[n] }`
""" # noqa: E501
if modes is None:
modes = range(len(mat_or_vec_list))
decrement = 0 # If we multiply by a vector, we diminish the dimension of the tensor
res = x
# Order of mode dots doesn't matter for different modes
# Sorting by mode shouldn't change order for equal modes
factors_modes = sorted(zip(mat_or_vec_list, modes), key=lambda x: x[1])
for i, (mat_or_vec_list, mode) in enumerate(factors_modes):
ndims = len(mat_or_vec_list.shape)
if (skip is not None) and (i == skip):
continue
if transpose and ndims == 2:
res = mode_dot(
res,
ivy.conj(ivy.permute_dims(mat_or_vec_list, (1, 0))),
mode - decrement,
)
else:
res = mode_dot(res, mat_or_vec_list, mode - decrement)
if ndims == 1:
decrement += 1
if ivy.exists(out):
return ivy.inplace_update(out, res)
return res
def _svd_checks(x, n_eigenvecs=None):
"""Run common checks to all of the SVD methods.
Parameters
----------
matrix : 2D-array
n_eigenvecs : int, optional, default is None
if specified, number of eigen[vectors-values] to return
Returns
-------
n_eigenvecs : int
the number of eigenvectors to solve for
min_dim : int
the minimum dimension of matrix
max_dim : int
the maximum dimension of matrix
"""
# ndims = len(x.shape)
# if ndims != 2:
# raise ValueError(f"matrix be a matrix. matrix.ndim is {ndims} != 2")
dim_1, dim_2 = ivy.shape(x)[-2:]
min_dim, max_dim = min(dim_1, dim_2), max(dim_1, dim_2)
if n_eigenvecs is None:
n_eigenvecs = max_dim
if n_eigenvecs > max_dim:
logging.warning(
f"Trying to compute SVD with n_eigenvecs={n_eigenvecs}, which is larger "
f"than max(matrix.shape)={max_dim}. Setting n_eigenvecs to {max_dim}."
)
n_eigenvecs = max_dim
return n_eigenvecs, min_dim, max_dim
# This function has been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/tenalg/svd.py#L12
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def svd_flip(
U: Union[ivy.Array, ivy.NativeArray],
V: Union[ivy.Array, ivy.NativeArray],
/,
u_based_decision: Optional[bool] = True,
) -> Tuple[ivy.Array, ivy.Array]:
"""Sign correction to ensure deterministic output from SVD. Adjusts the
columns of u and the rows of v such that the loadings in the columns in u
that are largest in absolute value are always positive. This function is
borrowed from scikit- learn/utils/extmath.py.
Parameters
----------
U
left singular matrix output of SVD
V
right singular matrix output of SVD
u_based_decision
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of U, rows of V
max_abs_cols = ivy.argmax(ivy.abs(U), axis=0)
signs = ivy.sign(
ivy.array(
[U[i, j] for (i, j) in zip(max_abs_cols, range(ivy.shape(U)[1]))],
)
)
U = U * signs
if ivy.shape(V)[0] > ivy.shape(U)[1]:
signs = ivy.concat((signs, ivy.ones(ivy.shape(V)[0] - ivy.shape(U)[1])))
V = V * signs[: ivy.shape(V)[0]][:, None]
else:
# rows of V, columns of U
max_abs_rows = ivy.argmax(ivy.abs(V), axis=1)
signs = ivy.sign(
ivy.array(
[V[i, j] for (i, j) in zip(range(ivy.shape(V)[0]), max_abs_rows)],
)
)
V = V * signs[:, None]
if ivy.shape(U)[1] > ivy.shape(V)[0]:
signs = ivy.concat(
(
signs,
ivy.ones(
ivy.shape(U)[1] - ivy.shape(V)[0],
),
)
)
U = U * signs[: ivy.shape(U)[1]]
return U, V
# This function has been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/tenalg/svd.py#L65
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def make_svd_non_negative(
x: Union[ivy.Array, ivy.NativeArray],
U: Union[ivy.Array, ivy.NativeArray],
S: Union[ivy.Array, ivy.NativeArray],
V: Union[ivy.Array, ivy.NativeArray],
/,
*,
nntype: Optional[Literal["nndsvd", "nndsvda"]] = "nndsvd",
) -> Tuple[ivy.Array, ivy.Array]:
"""Use NNDSVD method to transform SVD results into a non-negative form.
This method leads to more efficient solving with NNMF [1].
Parameters
----------
x
tensor being decomposed.
U
left singular matrix from SVD.
S
diagonal matrix from SVD.
V
right singular matrix from SVD.
nntype
whether to fill small values with 0.0 (nndsvd),
or the tensor mean (nndsvda, default).
[1]: Boutsidis & Gallopoulos. Pattern Recognition, 41(4): 1350-1362, 2008.
"""
W = ivy.zeros_like(U)
H = ivy.zeros_like(V)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = ivy.sqrt(S[0]) * ivy.abs(U[:, 0])
H[0, :] = ivy.sqrt(S[0]) * ivy.abs(V[0, :])
for j in range(1, len(S)):
a, b = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
a_p, b_p = ivy.where(a < 0.0, 0, a), ivy.where(b < 0.0, 0.0, b)
# a_p, b_p = ivy.clip(a, 0.0), ivy.clip(b, 0.0)
# a_n, b_n = ivy.abs(ivy.clip(a, 0.0)), ivy.abs(ivy.clip(b, 0.0))
a_n, b_n = ivy.abs(ivy.where(a > 0.0, 0.0, a)), ivy.abs(
ivy.where(b > 0.0, 0.0, b)
)
# and their norms
a_p_nrm, b_p_nrm = float(ivy.vector_norm(a_p)), float(ivy.vector_norm(b_p))
a_n_nrm, b_n_nrm = float(ivy.vector_norm(a_n)), float(ivy.vector_norm(b_n))
m_p, m_n = a_p_nrm * b_p_nrm, a_n_nrm * b_n_nrm
# choose update
if m_p > m_n:
u = a_p / a_p_nrm
v = b_p / b_p_nrm
sigma = m_p
else:
u = a_n / a_n_nrm
v = b_n / b_n_nrm
sigma = m_n
lbd = float(ivy.sqrt(S[j] * sigma))
W[:, j] = lbd * u
H[j, :] = lbd * v
# After this point we no longer need H
eps = ivy.finfo(x.dtype).min
if nntype == "nndsvd":
W = ivy.soft_thresholding(W, eps)
H = ivy.soft_thresholding(H, eps)
elif nntype == "nndsvda":
avg = ivy.mean(x)
W = ivy.where(eps > W, ivy.ones(ivy.shape(W)) * avg, W)
H = ivy.where(eps > H, ivy.ones(ivy.shape(H)) * avg, H)
else:
raise ValueError(
f'Invalid nntype parameter: got {nntype} instead of one of ("nndsvd",'
' "nndsvda")'
)
return W, H
# The following function has been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/tenalg/svd.py#L206
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def truncated_svd(
x: Union[ivy.Array, ivy.NativeArray],
/,
compute_uv: bool = True,
n_eigenvecs: Optional[int] = None,
) -> Union[ivy.Array, Tuple[ivy.Array, ivy.Array, ivy.Array]]:
"""Compute a truncated SVD on `x` using the standard SVD.
Parameters
----------
x
2D-array
compute_uv
If ``True`` then left and right singular vectors will be computed and returned
in ``U`` and ``Vh``, respectively. Otherwise, only the singular values will be
computed, which can be significantly faster.
n_eigenvecs
if specified, number of eigen[vectors-values] to return
else full matrices will be returned
Returns
-------
ret
a namedtuple ``(U, S, Vh)``
Each returned array must have the same floating-point data type as ``x``.
"""
n_eigenvecs, min_dim, _ = _svd_checks(x, n_eigenvecs=n_eigenvecs)
full_matrices = True if n_eigenvecs > min_dim else False
if compute_uv:
U, S, Vh = ivy.svd(x, full_matrices=full_matrices, compute_uv=True)
return U[:, :n_eigenvecs], S[:n_eigenvecs], Vh[:n_eigenvecs, :]
else:
S = ivy.svd(x, full_matrices=full_matrices, compute_uv=False)
return S[:n_eigenvecs]
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def tensor_train(
input_tensor: Union[ivy.Array, ivy.NativeArray],
rank: Union[int, Sequence[int]],
/,
*,
svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
verbose: Optional[bool] = False,
) -> ivy.TTTensor:
"""TT decomposition via recursive SVD.
Decomposes the input into a sequence of order-3 tensors (factors)
Also known as Tensor-Train decomposition [1]_
Parameters
----------
input_tensor
tensor to decompose
rank
maximum allowable TT rank of the factors
if int, then this is the same for all the factors
if int list, then rank[k] is the rank of the kth factor
svd
function to use to compute the SVD
verbose
level of verbosity
Returns
-------
factors
order-3 tensors of the TT decomposition
[1]: Ivan V. Oseledets. "Tensor-train decomposition",
SIAM J. Scientific Computing, 33(5):2295–2317, 2011.
"""
rank = ivy.TTTensor.validate_tt_rank(ivy.shape(input_tensor), rank=rank)
tensor_size = input_tensor.shape
n_dim = len(tensor_size)
unfolding = input_tensor
factors = [None] * n_dim
for k in range(n_dim - 1):
n_row = int(rank[k] * tensor_size[k])
unfolding = ivy.reshape(unfolding, (n_row, -1))
(n_row, n_column) = unfolding.shape
current_rank = min(n_row, n_column, rank[k + 1])
U, S, V = _svd_interface(unfolding, n_eigenvecs=current_rank, method=svd)
rank[k + 1] = current_rank
factors[k] = ivy.reshape(U, (rank[k], tensor_size[k], rank[k + 1]))
if verbose is True:
print(
"TT factor " + str(k) + " computed with shape " + str(factors[k].shape)
)
unfolding = ivy.reshape(S, (-1, 1)) * V
(prev_rank, last_dim) = unfolding.shape
factors[-1] = ivy.reshape(unfolding, (prev_rank, last_dim, 1))
if verbose is True:
print(
"TT factor "
+ str(n_dim - 1)
+ " computed with shape "
+ str(factors[n_dim - 1].shape)
)
return ivy.TTTensor(factors)
# TODO uncomment the code below when these svd
# methods have been added
def _svd_interface(
matrix,
method="truncated_svd",
n_eigenvecs=None,
flip_sign=True,
u_based_flip_sign=True,
non_negative=None,
mask=None,
n_iter_mask_imputation=5,
**kwargs,
):
if method == "truncated_svd":
svd_fun = truncated_svd
# elif method == "symeig_svd":
# svd_fun = symeig_svd
# elif method == "randomized_svd":
# svd_fun = randomized_svd
elif callable(method):
svd_fun = method
else:
raise ValueError("Invalid Choice")
U, S, V = svd_fun(matrix, n_eigenvecs=n_eigenvecs, **kwargs)
if mask is not None and n_eigenvecs is not None:
for _ in range(n_iter_mask_imputation):
S = S * ivy.eye(U.shape[-1], V.shape[-2])
matrix = matrix * mask + (U @ S @ V) * (1 - mask)
U, S, V = svd_fun(matrix, n_eigenvecs=n_eigenvecs, **kwargs)
if flip_sign:
U, V = svd_flip(U, V, u_based_decision=u_based_flip_sign)
if non_negative is not False and non_negative is not None:
U, V = make_svd_non_negative(matrix, U, S, V)
return U, S, V
# This function has been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/decomposition/_tucker.py#L22
# TODO update svd type hints when other svd methods have been added
# also update the test
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def initialize_tucker(
x: Union[ivy.Array, ivy.NativeArray],
rank: Sequence[int],
modes: Sequence[int],
/,
*,
init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
seed: Optional[int] = None,
svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
non_negative: Optional[bool] = False,
mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
svd_mask_repeats: Optional[int] = 5,
) -> Tuple[ivy.Array, Sequence[ivy.Array]]:
"""Initialize core and factors used in `tucker`. The type of initialization
is set using `init`. If `init == 'random'` then initialize factor matrices
using `random_state`. If `init == 'svd'` then initialize the `m`th factor
matrix using the `rank` left singular vectors of the `m`th unfolding of the
input tensor.
Parameters
----------
x
input tensor
rank
number of components
modes
modes to consider in the input tensor
seed
Used to create a random seed distribution
when init == 'random'
init
initialization scheme for tucker decomposition.
svd
function to use to compute the SVD
non_negative
if True, non-negative factors are returned
mask
array of booleans with the same shape as ``tensor`` should be 0 where
the values are missing and 1 everywhere else. Note: if tensor is
sparse, then mask should also be sparse with a fill value of 1 (or
True).
svd_mask_repeats
number of iterations for imputing the values in the SVD matrix when
mask is not None
Returns
-------
core
initialized core tensor
factors
list of factors
"""
try:
assert len(x.shape) >= 2
except ValueError as e:
raise ValueError(
"expected x to have at least 2 dimensions but it has only"
f" {len(x.shape)} dimension(s)"
) from e
# Initialisation
if init == "svd":
factors = []
for index, mode in enumerate(modes):
mask_unfold = None if mask is None else ivy.unfold(mask, mode)
U, _, _ = _svd_interface(
ivy.unfold(x, mode),
n_eigenvecs=rank[index],
method=svd,
non_negative=non_negative,
mask=mask_unfold,
n_iter_mask_imputation=svd_mask_repeats,
# random_state=random_state,
)
factors.append(U)
# The initial core approximation is needed here for the masking step
core = multi_mode_dot(x, factors, modes=modes, transpose=True)
elif init == "random":
core = (
ivy.random_uniform(
shape=[rank[index] for index in range(len(modes))],
dtype=x.dtype,
seed=seed,
)
+ 0.01
)
factors = [
ivy.random_uniform(
shape=(x.shape[mode], rank[index]), dtype=x.dtype, seed=seed
)
for index, mode in enumerate(modes)
]
else:
(core, factors) = init
if non_negative is True:
factors = [ivy.abs(f) for f in factors]
core = ivy.abs(core)
return (core, factors)
# This function has been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/decomposition/_tucker.py#L98
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def partial_tucker(
x: Union[ivy.Array, ivy.NativeArray],
rank: Optional[Sequence[int]] = None,
modes: Optional[Sequence[int]] = None,
/,
*,
n_iter_max: Optional[int] = 100,
init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
seed: Optional[int] = None,
mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
svd_mask_repeats: Optional[int] = 5,
tol: Optional[float] = 10e-5,
verbose: Optional[bool] = False,
return_errors: Optional[bool] = False,
) -> Tuple[ivy.Array, Sequence[ivy.Array]]:
"""Partial tucker decomposition via Higher Order Orthogonal Iteration (HOI)
Decomposes `tensor` into a Tucker decomposition
exclusively along the provided modes.
Parameters
----------
x
the input tensor
rank
size of the core tensor, ``(len(ranks) == tensor.ndim)``
if int, the same rank is used for all modes
if None, original tensors size will be preserved.
modes
list of the modes on which to perform the decomposition
n_iter_max
maximum number of iteration
init
{'svd', 'random'}, or TuckerTensor optional
if a TuckerTensor is provided, this is used for initialization
svd
str, default is 'truncated_svd'
function to use to compute the SVD,
seed
Used to create a random seed distribution
when init == 'random'
mask
array of booleans with the same shape as ``tensor`` should be 0 where
the values are missing and 1 everywhere else. Note: if tensor is
sparse, then mask should also be sparse with a fill value of 1 (or
True).
svd_mask_repeats
number of iterations for imputing the values in the SVD matrix when
mask is not None
tol
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance.
verbose
if True, different in reconstruction errors are returned at each
iteration.
return_erros
if True, list of reconstruction errors are returned.
Returns
-------
core : ndarray
core tensor of the Tucker decomposition
factors : ndarray list
list of factors of the Tucker decomposition.
with ``core.shape[i] == (tensor.shape[i], ranks[i]) for i in modes``
"""
if modes is None:
modes = list(range(len(x.shape)))
if rank is None:
logging.warning(
"No value given for 'rank'. The decomposition will preserve the original"
" size."
)
rank = [ivy.shape(x)[mode] for mode in modes]
elif isinstance(rank, int):
logging.warning(
f"Given only one int for 'rank' instead of a list of {len(modes)} modes."
" Using this rank for all modes."
)
rank = tuple(rank for _ in modes)
else:
rank = ivy.TuckerTensor.validate_tucker_rank(x.shape, rank=rank)
# SVD init
core, factors = initialize_tucker(
x,
rank,
modes,
init=init,
svd=svd,
seed=seed,
mask=mask,
svd_mask_repeats=svd_mask_repeats,
)
rec_errors = []
norm_tensor = ivy.sqrt(ivy.sum(x**2))
for iteration in range(n_iter_max):
if mask is not None:
x = x * mask + multi_mode_dot(
core, factors, modes=modes, transpose=False
) * (1 - mask)
for index, mode in enumerate(modes):
core_approximation = multi_mode_dot(
x, factors, modes=modes, skip=index, transpose=True
)
eigenvecs, _, _ = _svd_interface(
ivy.unfold(core_approximation, mode),
n_eigenvecs=rank[index],
# random_state=random_state,
)
factors[index] = eigenvecs
core = multi_mode_dot(x, factors, modes=modes, transpose=True)
# The factors are orthonormal and
# therefore do not affect the reconstructed tensor's norm
norm_core = ivy.sqrt(ivy.sum(core**2))
rec_error = ivy.sqrt(abs(norm_tensor**2 - norm_core**2)) / norm_tensor
rec_errors.append(rec_error)
if iteration > 1:
if verbose:
print(
f"reconstruction error={rec_errors[-1]},"
f" variation={rec_errors[-2] - rec_errors[-1]}."
)
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print(f"converged in {iteration} iterations.")
break
if return_errors:
return (core, factors), rec_errors
return (core, factors)
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def tucker(
x: Union[ivy.Array, ivy.NativeArray],
rank: Optional[Sequence[int]] = None,
/,
*,
fixed_factors: Optional[Sequence[int]] = None,
n_iter_max: Optional[int] = 100,
init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
seed: Optional[int] = None,
mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
svd_mask_repeats: Optional[int] = 5,
tol: Optional[float] = 10e-5,
verbose: Optional[bool] = False,
return_errors: Optional[bool] = False,
):
"""Tucker decomposition via Higher Order Orthogonal Iteration (HOI)
Decomposes `tensor` into a Tucker decomposition:
``tensor = [| core; factors[0], ...factors[-1] |]`` [1]_
Parameters
----------
x
input tensor
rank
size of the core tensor, ``(len(ranks) == tensor.ndim)``
if int, the same rank is used for all modes
fixed_factors
if not None, list of modes for which to keep the factors fixed.
Only valid if a Tucker tensor is provided as init.
n_iter_max
maximum number of iteration
init
{'svd', 'random'}, or TuckerTensor optional
if a TuckerTensor is provided, this is used for initialization
svd
str, default is 'truncated_svd'
function to use to compute the SVD,
seed
Used to create a random seed distribution
when init == 'random'
mask
array of booleans with the same shape as ``tensor`` should be 0 where
the values are missing and 1 everywhere else. Note: if tensor is
sparse, then mask should also be sparse with a fill value of 1 (or
True).
svd_mask_repeats
number of iterations for imputing the values in the SVD matrix when
mask is not None
tol
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
verbose
if True, different in reconstruction errors are returned at each
iteration.
return_errors
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
Returns
-------
ivy.TuckerTensor or ivy.TuckerTensor and
list of reconstruction errors if return_erros is True.
References
----------
.. [1] tl.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
"""
if fixed_factors:
try:
(core, factors) = init
except ValueError as e:
raise ValueError(
f"Got fixed_factor={fixed_factors} but no appropriate Tucker tensor was"
' passed for "init".'
) from e
if len(fixed_factors) == len(factors):
return ivy.TuckerTensor((core, factors))
fixed_factors = sorted(fixed_factors)
modes_fixed, factors_fixed = zip(
*[(i, f) for (i, f) in enumerate(factors) if i in fixed_factors]
)
core = multi_mode_dot(core, factors_fixed, modes=modes_fixed)
modes, factors = zip(
*[(i, f) for (i, f) in enumerate(factors) if i not in fixed_factors]
)
init = (core, list(factors))
rank = ivy.TuckerTensor.validate_tucker_rank(x.shape, rank=rank)
(core, new_factors), rec_errors = partial_tucker(
x,
rank,
modes,
n_iter_max=n_iter_max,
init=init,
svd=svd,
tol=tol,
seed=seed,
mask=mask,
verbose=verbose,
svd_mask_repeats=svd_mask_repeats,
return_errors=True,
)
factors = list(new_factors)
for i, e in enumerate(fixed_factors):
factors.insert(e, factors_fixed[i])
core = multi_mode_dot(core, factors_fixed, modes=modes_fixed, transpose=True)
if return_errors:
return ivy.TuckerTensor((core, factors)), rec_errors
return ivy.TuckerTensor((core, factors))
else:
modes = list(range(len(x.shape)))
rank = ivy.TuckerTensor.validate_tucker_rank(x.shape, rank=rank)
(core, factors), rec_errors = partial_tucker(
x,
rank,
modes,
n_iter_max=n_iter_max,
init=init,
svd=svd,
tol=tol,
seed=seed,
mask=mask,
verbose=verbose,
return_errors=True,
)
if return_errors:
return ivy.TuckerTensor((core, factors)), rec_errors
else:
return ivy.TuckerTensor((core, factors))
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def tt_matrix_to_tensor(
tt_matrix: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the full tensor whose TT-Matrix decomposition is given by
'factors' Re- assembles 'factors', which represent a tensor in TT-Matrix
format into the corresponding full tensor.
Parameters
----------
tt_matrix
array of 4D-arrays
TT-Matrix factors (known as core) of shape
(rank_k, left_dim_k, right_dim_k, rank_{k+1})
out
Optional output array. If provided, the output array to store the result.
Returns
-------
output_tensor: array
tensor whose TT-Matrix decomposition was given by 'factors'
Examples
--------
>>> x = ivy.array([[[[[0.49671414],
... [-0.1382643]],
...
... [[0.64768857],
... [1.5230298]]]],
... [[[[-0.23415337],
... [-0.23413695]],
...
... [[1.57921278],
... [0.76743472]]]]])
>>> y = ivy.tt_matrix_to_tensor(x)
>>> print(y)
ivy.array([[[[-0.1163073 , -0.11629914],
[ 0.03237505, 0.03237278]],
[[ 0.78441733, 0.38119566],
[-0.21834874, -0.10610882]]],
[[[-0.15165846, -0.15164782],
[-0.35662258, -0.35659757]],
[[ 1.02283812, 0.49705869],
[ 2.40518808, 1.16882598]]]])
"""
_, in_shape, out_shape, _ = zip(*(f.shape for f in tt_matrix))
ndim = len(in_shape)
full_shape = sum(zip(*(in_shape, out_shape)), ())
order = list(range(0, ndim * 2, 2)) + list(range(1, ndim * 2, 2))
for i, factor in enumerate(tt_matrix):
if not i:
res = factor
else:
res = ivy.tensordot(res, factor, axes=([len(res.shape) - 1], [0]))
return ivy.permute_dims(ivy.reshape(res, full_shape), axes=order, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
def dot(
a: Union[ivy.Array, ivy.NativeArray],
b: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the dot product between two arrays `a` and `b` using the current
backend's implementation. The dot product is defined as the sum of the
element-wise product of the input arrays.
Parameters
----------
a
First input array.
b
Second input array.
out
Optional output array. If provided, the output array to store the result.
Returns
-------
ret
The dot product of the input arrays.
Examples
--------
With :class:`ivy.Array` inputs:
>>> a = ivy.array([1, 2, 3])
>>> b = ivy.array([4, 5, 6])
>>> result = ivy.dot(a, b)
>>> print(result)
ivy.array(32)
>>> a = ivy.array([[1, 2], [3, 4]])
>>> b = ivy.array([[5, 6], [7, 8]])
>>> c = ivy.empty_like(a)
>>> ivy.dot(a, b, out=c)
>>> print(c)
ivy.array([[19, 22],
[43, 50]])
>>> a = ivy.array([[1.1, 2.3, -3.6]])
>>> b = ivy.array([[-4.8], [5.2], [6.1]])
>>> c = ivy.zeros((1, 1))
>>> ivy.dot(a, b, out=c)
>>> print(c)
ivy.array([[-15.28]])
"""
return current_backend(a, b).dot(a, b, out=out)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def general_inner_product(
a: Union[ivy.Array, ivy.NativeArray],
b: Union[ivy.Array, ivy.NativeArray],
n_modes: Optional[int] = None,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Generalised inner products between tensors.
Takes the inner product between the last (respectively first)
`n_modes` of `a` (respectively `b`)
Parameters
----------
a
first input tensor.
b
second input tensor.
n_modes
int, default is None. If None, the traditional inner product is returned
(i.e. a float) otherwise, the product between the `n_modes` last modes of
`a` and the `n_modes` first modes of `b` is returned. The resulting tensor's
order is `len(a) - n_modes`.
out
Optional output array. If provided, the output array to store the result.
Returns
-------
The inner product of the input arrays.
Examples
--------
With :class:`ivy.Array` inputs:
>>> a = ivy.array([1, 2, 3])
>>> b = ivy.array([4, 5, 6])
>>> result = ivy.general_inner_product(a, b, 1)
>>> print(result)
ivy.array(32)
>>> a = ivy.array([1, 2])
>>> b = ivy.array([4, 5])
>>> result = ivy.general_inner_product(a, b)
>>> print(result)
ivy.array(14)
>>> a = ivy.array([[1, 1], [1, 1]])
>>> b = ivy.array([[1, 2, 3, 4],[1, 1, 1, 1]])
>>> result = ivy.general_inner_product(a, b, 1)
>>> print(result)
ivy.array([[2, 3, 4, 5],
[2, 3, 4, 5]])
"""
shape_a = a.shape
shape_b = b.shape
if n_modes is None:
if shape_a != shape_b:
raise ValueError(
"Taking a generalised product between two tensors without specifying"
" common modes is equivalent to taking inner product.This requires"
f" a.shape == b.shape.However, got shapes {a.shape} and {b.shape}"
)
return ivy.sum(ivy.multiply(a, b), out=out)
common_modes = shape_a[len(shape_a) - n_modes :]
if common_modes != shape_b[:n_modes]:
raise ValueError(
f"Incorrect shapes for inner product along {n_modes} common modes."
f"Shapes {shape_a.shape} and {shape_b.shape}"
)
common_size = int(ivy.prod(common_modes)) if len(common_modes) != 0 else 0
output_shape = shape_a[:-n_modes] + shape_b[n_modes:]
inner_product = ivy.dot(
ivy.reshape(a, (-1, common_size)), ivy.reshape(b, (common_size, -1))
)
return ivy.reshape(inner_product, output_shape, out=out)
# This function has been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/tenalg/core_tenalg/moments.py#L5
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def higher_order_moment(
x: Union[ivy.Array, ivy.NativeArray],
order: int,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the Higher-Order Moment.
Parameters
----------
x
matrix of size (n_samples, n_features)
or tensor of size(n_samples, D1, ..., DN)
order
number of the higher-order moment to compute
Returns
-------
tensor
if tensor is a matrix of size (n_samples, n_features),
tensor of size (n_features, )*order
Examples
--------
>>> a = ivy.array([[1, 2], [3, 4]])
>>> result = ivy.higher_order_moment(a, 3)
>>> print(result)
ivy.array([[
[14, 19],
[19, 26]],
[[19, 26],
[26, 36]
]])
"""
moment = ivy.copy_array(x)
for _ in range(order - 1):
moment = ivy.batched_outer([moment, x])
return ivy.mean(moment, axis=0, out=out)
@handle_nestable
@handle_exceptions
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def batched_outer(
tensors: Sequence[Union[ivy.Array, ivy.NativeArray]],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a generalized outer product of the tensors.
Parameters
----------
tensors
list of tensors of shape (n_samples, J1, ..., JN) ,
(n_samples, K1, ..., KM) ...
Returns
-------
outer product of tensors
of shape (n_samples, J1, ..., JN, K1, ..., KM, ...)
Examples
--------
>>> a = ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
>>> b = ivy.array([[[.1, .2], [.3, .4]], [[.5, .6], [.7, .8]]])
>>> result = ivy.batched_outer([a, b])
>>> print(result)
ivy.array([[[[[0.1, 0.2],
[0.30000001, 0.40000001]],
[[0.2 , 0.40000001],
[0.60000002, 0.80000001]]],
[[[0.3 , 0.60000001],
[0.90000004, 1.20000002]],
[[0.40000001, 0.80000001],
[1.20000005, 1.60000002]]]],
[[[[2.5 , 3.00000012],
[3.49999994, 4.00000006]],
[[3. , 3.60000014],
[4.19999993, 4.80000007]]],
[[[3.5 , 4.20000017],
[4.89999992, 5.60000008]],
[[4. , 4.80000019],
[5.5999999 , 6.4000001 ]]]]])
"""
result = None
result_size = None
result_shape = None
for i, tensor in enumerate(tensors):
if i:
current_shape = ivy.shape(tensor)
current_size = len(current_shape) - 1
n_samples = current_shape[0]
_check_same_batch_size(i, n_samples, result_shape)
shape_1 = result_shape + (1,) * current_size
shape_2 = (n_samples,) + (1,) * result_size + tuple(current_shape[1:])
result = ivy.reshape(result, shape_1) * ivy.reshape(tensor, shape_2)
else:
result = tensor
result_shape = ivy.shape(result)
result_size = len(result_shape) - 1
if ivy.exists(out):
result = ivy.inplace_update(out, result)
return result
def _check_same_batch_size(i, n_samples, result_shape):
if n_samples != result_shape[0]:
raise ValueError(
f"Tensor {i} has a batch-size of {n_samples} but those before had a"
f" batch-size of {result_shape[0]}, all tensors should have the"
" same batch-size."
)
| ivy/ivy/functional/ivy/experimental/linear_algebra.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/linear_algebra.py",
"repo_id": "ivy",
"token_count": 29192
} | 48 |
# global
from typing import Union, Optional, Tuple, Literal, List, Sequence
# local
import ivy
from ivy.utils.backend import current_backend
from ivy.func_wrapper import (
handle_array_function,
to_native_arrays_and_back,
handle_out_argument,
handle_nestable,
handle_array_like_without_promotion,
handle_device,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
inf = float("inf")
# Array API Standard #
# -------------------#
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def cholesky(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
upper: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the cholesky decomposition of the x matrix.
Parameters
----------
x
input array having shape (..., M, M) and whose innermost two dimensions form
square symmetric positive-definite matrices. Should have a floating-point data
type.
upper
If True, the result must be the upper-triangular Cholesky factor U. If False,
the result must be the lower-triangular Cholesky factor L. Default: ``False``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the Cholesky factors for each square matrix. If upper is
False, the returned array must contain lower-triangular matrices; otherwise, the
returned array must contain upper-triangular matrices. The returned array must
have a floating-point data type determined by Type Promotion Rules and must have
the same shape as x.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.cholesky.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[4.0, 1.0, 2.0, 0.5, 2.0],
... [1.0, 0.5, 0.0, 0.0, 0.0],
... [2.0, 0.0, 3.0, 0.0, 0.0],
... [0.5, 0.0, 0.0, 0.625, 0.0],
... [2.0, 0.0, 0.0, 0.0, 16.0]])
>>> l = ivy.cholesky(x, upper='false')
>>> print(l)
ivy.array([[ 2. , 0.5 , 1. , 0.25, 1. ],
[ 0. , 0.5 , -1. , -0.25, -1. ],
[ 0. , 0. , 1. , -0.5 , -2. ],
[ 0. , 0. , 0. , 0.5 , -3. ],
[ 0. , 0. , 0. , 0. , 1. ]])
>>> x = ivy.array([[4.0, 1.0, 2.0, 0.5, 2.0],
... [1.0, 0.5, 0.0, 0.0, 0.0],
... [2.0, 0.0, 3.0, 0.0, 0.0],
... [0.5, 0.0, 0.0, 0.625, 0.0],
... [2.0, 0.0, 0.0, 0.0, 16.0]])
>>> y = ivy.zeros([5,5])
>>> ivy.cholesky(x, upper='false', out=y)
>>> print(y)
ivy.array([[ 2. , 0.5 , 1. , 0.25, 1. ],
[ 0. , 0.5 , -1. , -0.25, -1. ],
[ 0. , 0. , 1. , -0.5 , -2. ],
[ 0. , 0. , 0. , 0.5 , -3. ],
[ 0. , 0. , 0. , 0. , 1. ]])
>>> x = ivy.array([[4.0, 1.0, 2.0, 0.5, 2.0],
... [1.0, 0.5, 0.0, 0.0, 0.0],
... [2.0, 0.0, 3.0, 0.0, 0.0],
... [0.5, 0.0, 0.0, 0.625, 0.0],
... [2.0, 0.0, 0.0, 0.0, 16.0]])
>>> ivy.cholesky(x, upper='false', out=x)
>>> print(x)
ivy.array([[ 2. , 0.5 , 1. , 0.25, 1. ],
[ 0. , 0.5 , -1. , -0.25, -1. ],
[ 0. , 0. , 1. , -0.5 , -2. ],
[ 0. , 0. , 0. , 0.5 , -3. ],
[ 0. , 0. , 0. , 0. , 1. ]])
>>> x = ivy.array([[1., -2.], [2., 5.]])
>>> u = ivy.cholesky(x, upper='false')
>>> print(u)
ivy.array([[ 1., -2.],
[ 0., 1.]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[3., -1],[-1., 3.]]),
... b=ivy.array([[2., 1.],[1., 1.]]))
>>> y = ivy.cholesky(x, upper='false')
>>> print(y)
{
a: ivy.array([[1.73, -0.577],
[0., 1.63]]),
b: ivy.array([[1.41, 0.707],
[0., 0.707]])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([[3., -1],[-1., 3.]]),
... b=ivy.array([[2., 1.],[1., 1.]]))
>>> upper = ivy.Container(a=1, b=-1)
>>> y = ivy.cholesky(x, upper='false')
>>> print(y)
{
a: ivy.array([[1.73, -0.577],
[0., 1.63]]),
b: ivy.array([[1.41, 0.707],
[0., 0.707]])
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.array([[1., -2.], [2., 5.]])
>>> upper = ivy.Container(a=1, b=-1)
>>> y = ivy.cholesky(x, upper='false')
>>> print(y)
ivy.array([[ 1., -2.],
[ 0., 1.]])
"""
return current_backend(x).cholesky(x, upper=upper, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def cross(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
axisa: int = -1,
axisb: int = -1,
axisc: int = -1,
axis: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return cross product of 3-element vectors.
If x1 and x2 are multi- dimensional arrays (i.e., both have a rank greater than 1),
then the cross- product of each pair of corresponding 3-element vectors is
independently computed.
Parameters
----------
x1
first input array. Should have a numeric data type.
x2
second input array. Must be compatible with ``x1`` for all
non-compute axes. The size of the axis over which to compute
the cross product must be the same size as the respective axis
in ``x``. Should have a numeric data type.
.. note::
The compute axis (dimension) must not be broadcasted.
axis
the axis (dimension) of x1 and x2 containing the vectors for which to compute
the cross product. Must be an integer on the interval``[-N, N)``, where ``N``
is the rank (number of dimensions) of the shape. If specified as a
negative integer, the function must determine the axis along which to
compute the cross product by counting backward from the last dimension
(where ``-1`` refers to the last dimension). By default, the function must
compute the cross product over the last axis. Default: ``-1``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the cross products. The returned array must have a data
type determined by Type Promotion Rules.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.cross.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([1., 0., 0.])
>>> y = ivy.array([0., 1., 0.])
>>> z = ivy.cross(x, y)
>>> print(z)
ivy.array([0., 0., 1.])
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([5., 0., 0.]),
... b=ivy.array([0., 0., 2.]))
>>> y = ivy.Container(a=ivy.array([0., 7., 0.]),
... b=ivy.array([3., 0., 0.]))
>>> z = ivy.cross(x,y)
>>> print(z)
{
a: ivy.array([0., 0., 35.]),
b: ivy.array([0., 6., 0.])
}
With a combination of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.array([9., 0., 3.])
>>> y = ivy.Container(a=ivy.array([1., 1., 0.]),
... b=ivy.array([1., 0., 1.]))
>>> z = ivy.cross(x,y)
>>> print(z)
{
a: ivy.array([-3., 3., 9.]),
b: ivy.array([0., -6., 0.])
}
"""
return current_backend(x1).cross(
x1, x2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def det(
x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""Return the determinant of a square matrix (or a stack of square
matrices)``x``.
Parameters
----------
x
input array having shape ``(..., M, M)`` and whose innermost two dimensions
form square matrices. Should have a floating-point data type.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
if ``x`` is a two-dimensional array, a zero-dimensional array containing the
determinant; otherwise,a non-zero dimensional array containing the determinant
for each square matrix. The returned array must have the same data type as
``x``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.det.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[2.,4.],[6.,7.]])
>>> y = ivy.det(x)
>>> print(y)
ivy.array(-10.)
>>> x = ivy.array([[3.4,-0.7,0.9],[6.,-7.4,0.],[-8.5,92,7.]])
>>> y = ivy.det(x)
>>> print(y)
ivy.array(293.46997)
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([[3.4,-0.7,0.9],[6.,-7.4,0.],[-8.5,92,7.]])
>>> y = ivy.det(x)
>>> print(y)
ivy.array(293.46997)
With :class:`ivy.Container` input:
>>> x = ivy.Container(a = ivy.array([[3., -1.], [-1., 3.]]) ,
... b = ivy.array([[2., 1.], [1., 1.]]))
>>> y = ivy.det(x)
>>> print(y)
{a:ivy.array(8.),b:ivy.array(1.)}
"""
return current_backend(x).det(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def diagonal(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
offset: int = 0,
axis1: int = -2,
axis2: int = -1,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the specified diagonals of a matrix (or a stack of matrices)
``x``.
Parameters
----------
x
input array having shape ``(..., M, N)`` and whose innermost two dimensions form
``MxN`` matrices.
offset
offset specifying the off-diagonal relative to the main diagonal.
- ``offset = 0``: the main diagonal.
- ``offset > 0``: off-diagonal above the main diagonal.
- ``offset < 0``: off-diagonal below the main diagonal.
Default: `0`.
axis1
axis to be used as the first axis of the 2-D sub-arrays from which the diagonals
should be taken.
Defaults to first axis (-2).
axis2
axis to be used as the second axis of the 2-D sub-arrays from which the
diagonals should be taken. Defaults to second axis (-1).
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the diagonals and whose shape is determined by removing the
last two dimensions and appending a dimension equal to the size of the resulting
diagonals. The returned array must have the same data type as ``x``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.diagonal.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([[1., 2.],
... [3., 4.]])
>>> d = ivy.diagonal(x)
>>> print(d)
ivy.array([1., 4.])
>>> x = ivy.array([[[1., 2.],
... [3., 4.]],
... [[5., 6.],
... [7., 8.]]])
>>> d = ivy.diagonal(x)
>>> print(d)
ivy.array([[1., 4.],
[5., 8.]])
>>> x = ivy.array([[1., 2.],
... [3., 4.]])
>>> d = ivy.diagonal(x, offset=1)
>>> print(d)
ivy.array([2.])
>>> x = ivy.array([[0, 1, 2],
... [3, 4, 5],
... [6, 7, 8]])
>>> d = ivy.diagonal(x, offset=-1, axis1=0)
>>> print(d)
ivy.array([3, 7])
>>> x = ivy.array([[[ 0, 1, 2],
... [ 3, 4, 5],
... [ 6, 7, 8]],
... [[ 9, 10, 11],
... [12, 13, 14],
... [15, 16, 17]],
... [[18, 19, 20],
... [21, 22, 23],
... [24, 25, 26]]])
>>> d = ivy.diagonal(x, offset=1, axis1=-3)
>>> print(d)
ivy.array([[1, 11],
[4, 14],
[7, 17]])
>>> x = ivy.array([[[0, 1],
... [2, 3]],
... [[4, 5],
... [6, 7]]])
>>> d = ivy.diagonal(x, offset=0, axis1=0, axis2=1)
>>> print(d)
ivy.array([[0, 6],
[1, 7]])
>>> x = ivy.array([[[1., 2.],
... [3., 4.]],
... [[5., 6.],
... [7., 8.]]])
>>> d = ivy.diagonal(x, offset=1, axis1=0, axis2=1)
>>> print(d)
ivy.array([[3.],
[4.]])
>>> x = ivy.array([[1., 2.],
... [3., 4.]])
>>> d = ivy.diagonal(x)
>>> print(d)
ivy.array([1., 4.])
>>> x = ivy.array([[[ 0, 1, 2],
... [ 3, 4, 5],
... [ 6, 7, 8]],
... [[ 9, 10, 11],
... [12, 13, 14],
... [15, 16, 17]],
... [[18, 19, 20],
... [21, 22, 23],
... [24, 25, 26]]])
>>> d = ivy.diagonal(x, offset=1, axis1=1, axis2=-1)
>>> print(d)
ivy.array([[ 1, 5],
[10, 14],
[19, 23]])
>>> x = ivy.array([[0, 1, 2],
... [3, 4, 5],
... [6, 7, 8]])
>>> d = ivy.diagonal(x)
>>> print(d)
ivy.array([0, 4, 8])
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(
... a = ivy.array([[7, 1, 2],
... [1, 3, 5],
... [0, 7, 4]]),
... b = ivy.array([[4, 3, 2],
... [1, 9, 5],
... [7, 0, 6]])
... )
>>> d = ivy.diagonal(x)
>>> print(d)
{
a: ivy.array([7, 3, 4]),
b: ivy.array([4, 9, 6])
}
"""
return current_backend(x).diagonal(
x, offset=offset, axis1=axis1, axis2=axis2, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def eig(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> Tuple[Union[ivy.Array, ivy.NativeArray]]:
"""Return an eigendecomposition x = QLQᵀ of a symmetric matrix (or a stack
of symmetric matrices) ``x``, where ``Q`` is an orthogonal matrix (or a
stack of matrices) and ``L`` is a vector (or a stack of vectors).
.. note::
The function ``eig`` currently behaves like ``eigh``, as
it requires complex number support, once complex numbers are supported,
x does not need to be a complex Hermitian or real symmetric matrix.
Parameters
----------
x
input array having shape ``(..., M, M)`` and whose innermost two dimensions form
square matrices. Must have a floating-point data type.
Returns
-------
ret
a namedtuple (``eigenvalues``, ``eigenvectors``) whose
- first element must have the field name ``eigenvalues`` (corresponding to
``L`` above) and must be an array consisting of computed eigenvalues. The
array containing the eigenvalues must have shape ``(..., M)``.
- second element have have the field name ``eigenvectors`` (corresponding to
``Q`` above) and must be an array where the columns of the inner most
matrices contain the computed eigenvectors. These matrices must be
orthogonal. The array containing the eigenvectors must have shape
``(..., M, M)``.
- Each returned array must have the same floating-point data type as ``x``.
.. note::
Eigenvalue sort order is left unspecified and is thus implementation-dependent.
"""
return current_backend(x).eig(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def eigh(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
UPLO: str = "L",
out: Optional[ivy.Array] = None,
) -> Tuple[Union[ivy.Array, ivy.NativeArray]]:
r"""Return an eigendecomposition x = QLQᵀ of a symmetric matrix (or a stack
of symmetric matrices) ``x``, where ``Q`` is an orthogonal matrix (or a
stack of matrices) and ``L`` is a vector (or a stack of vectors).
.. note::
The function ``eig`` will be added in a future version of the specification, as
it requires complex number support, once complex numbers are supported,
each square matrix must be Hermitian.
.. note::
Whether an array library explicitly checks whether an input array is a symmetric
matrix (or a stack of symmetric matrices) is implementation-defined.
Parameters
----------
x
input array having shape ``(..., M, M)`` and whose innermost two dimensions form
square matrices. Must have a floating-point data type.
Returns
-------
ret
a namedtuple (``eigenvalues``, ``eigenvectors``) whose
- first element must have the field name ``eigenvalues`` (corresponding to
:math:`\operatorname{diag}\Lambda` above) and must be an array consisting
of computed eigenvalues. The array containing the eigenvalues must
have shape ``(..., M)`` and must have a real-valued floating-point
data type whose precision matches the precision of ``x`` (e.g., if ``x``
is ``complex128``, then the ``eigenvalues`` must be ``float64``).
- second element have have the field name ``eigenvectors`` (corresponding to
``Q`` above) and must be an array where the columns of the inner most
matrices contain the computed eigenvectors. These matrices must be
orthogonal. The array containing the eigenvectors must have shape
``(..., M, M)``.
- Each returned array must have the same floating-point data type as ``x``.
.. note::
Eigenvalue sort order is left unspecified and is thus implementation-dependent.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.eigh.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[1., 2.],[2., 5.]])
>>> eigenvalues, eigenvectors = ivy.eigh(x)
>>> print(eigenvalues)
ivy.array([0.17157288, 5.82842731])
>>> print(eigenvectors)
ivy.array([[-0.9238795 , 0.38268343],
[ 0.38268343, 0.9238795 ]])
>>> x = ivy.array([[1., 2.], [2., 5.]])
>>> eigenvalues, eigenvectors = ivy.zeros(len(x)), ivy.zeros(x.shape)
>>> ivy.eigh(x, out=(eigenvalues, eigenvectors))
>>> print(eigenvalues)
ivy.array([0.17157288, 5.82842731])
>>> print(eigenvectors)
ivy.array([[-0.9238795 , 0.38268343],
[ 0.38268343, 0.9238795 ]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(
... a = ivy.native_array([[1., 2., 0.], [3., 4., 5.], [1., 5., 9]]),
... b = ivy.array([[2., 4., 6.], [3., 5., 7.], [0., 0.8, 2.9]]))
>>> eigenvalues, eigenvectors = ivy.eigh(x, UPLO = 'U')
>>> print(eigenvalues)
{
a: ivy.array([-0.78930789, 2.59803128, 12.19127655]),
b: ivy.array([-4.31213903, -0.63418275, 14.84632206])
}
>>> print(eigenvectors)
{
a: ivy.array([[0.70548367, -0.70223427, 0.09570674],
[-0.63116378, -0.56109613, 0.53554028],
[0.32237405, 0.43822157, 0.83906901]]),
b: ivy.array([[0.50766778, 0.71475857, 0.48103389],
[0.3676433, -0.68466955, 0.62933773],
[-0.77917379, 0.14264561, 0.61036086]])
}
"""
return current_backend(x).eigh(x, UPLO=UPLO, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def eigvalsh(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
UPLO: str = "L",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the eigenvalues of a symmetric matrix (or a stack of symmetric
matrices) x.
.. note::
The function ``eig`` will be added in a future version of the specification, as
it requires complex number support, once complex numbers are supported,
each square matrix must be Hermitian.
.. note::
Whether an array library explicitly checks whether an input array is a symmetric
matrix (or a stack of symmetric matrices) is implementation-defined.
Parameters
----------
x
input array having shape (..., M, M) and whose innermost two dimensions form
square matrices. Must have floating-point data type.
UPLO
optional string being 'L' or 'U', specifying whether the calculation is done
with the lower triangular part of `x` ('L', default) or the
upper triangular part ('U').
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the computed eigenvalues. The returned array must have shape
(..., M) and and must have a real-valued floating-point
data type whose precision matches the precision of ``x`` (e.g., if ``x``
is ``complex128``, then the ``eigenvalues`` must be ``float64``).
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.eigvalsh.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([[[1.0,2.0],[2.0,1.0]]])
>>> y = ivy.eigvalsh(x)
>>> print(y)
ivy.array([[-1., 3.]])
>>> x = ivy.array([[[3.0,2.0],[2.0,3.0]]])
>>> y = ivy.zeros([1,2])
>>> ivy.eigvalsh(x, out=y)
>>> print(y)
ivy.array([[1., 5.]])
>>> x = ivy.array([[[3.0,2.0],[2.0,3.0]]])
>>> ivy.eigvalsh(x, out=x)
>>> print(x)
ivy.array([[1., 5.]])
>>> x = ivy.array([[[2.0,3.0,6.0],[3.0,4.0,5.0],[6.0,5.0,9.0]],
... [[1.0,1.0,1.0],[1.0,2.0,2.0],[1.0,2.0,2.0]]])
>>> y = ivy.eigvalsh(x, UPLO="U")
>>> print(y)
ivy.array([[-1.45033181e+00, 1.02829754e+00, 1.54220343e+01],
[-1.12647155e-15, 4.38447177e-01, 4.56155300e+00]])
With :class:`ivy.NativeArray` inputs:
>>> x = ivy.native_array([[[1., 1., 2.], [1., 2., 1.], [1., 1., 2]]])
>>> y = ivy.eigvalsh(x)
>>> print(y)
ivy.array([[0.26794919, 1. , 3.7320509 ]])
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([[[1.,2.,3.],[2.,4.,5.],[3.,5.,6.]]]),
... b=ivy.array([[[1.,1.,2.],[1.,2.,1.],[2.,1.,1.]]]),
... c=ivy.array([[[2.,2.,2.],[2.,3.,3.],[2.,3.,3.]]]))
>>> y = ivy.eigvalsh(x)
>>> print(y)
{
a: ivy.array([[-0.51572949, 0.17091519, 11.3448143]]),
b: ivy.array([[-1., 1., 4.]]),
c: ivy.array([[-8.88178420e-16, 5.35898387e-01, 7.46410179e+00]])
}
"""
return current_backend(x).eigvalsh(x, UPLO=UPLO, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def inner(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the inner product of two vectors ``x1`` and ``x2``.
Parameters
----------
x1
first one-dimensional input array of size N.
Should have a numeric data type.
a(N,) array_like
First input vector. Input is flattened if not already 1-dimensional.
x2
second one-dimensional input array of size M.
Should have a numeric data type.
b(M,) array_like
Second input vector. Input is flattened if not already 1-dimensional.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
a two-dimensional array containing the inner product and whose
shape is (N, M).
The returned array must have a data type determined by Type Promotion Rules.
Both the description and the type hints above assumes an array input for
simplicity, but this function is *nestable*, and therefore also accepts
:class:`ivy.Container` instances in place of any of the arguments.
Examples
--------
Matrices of identical shapes
>>> x = ivy.array([[1., 2.], [3., 4.]])
>>> y = ivy.array([[5., 6.], [7., 8.]])
>>> d = ivy.inner(x, y)
>>> print(d)
ivy.array([[17., 23.], [39., 53.]])
# Matrices of different shapes
>>> x = ivy.array([[1., 2.], [3., 4.], [5., 6.]])
>>> y = ivy.array([[5., 6.], [7., 8.]])
>>> d = ivy.inner(x, y)
>>> print(d)
ivy.array([[17., 23.], [39., 53.], [61., 83.]])
# 3D matrices
>>> x = ivy.array([[[1., 2.], [3., 4.]],
... [[5., 6.], [7., 8.]]])
>>> y = ivy.array([[[9., 10.], [11., 12.]],
... [[13., 14.], [15., 16.]]])
>>> d = ivy.inner(x, y)
>>> print(d)
ivy.array([[[[ 29., 35.], [ 41., 47.]],
[[ 67., 81.], [ 95., 109.]]],
[[[105., 127.], [149., 171.]],
[[143., 173.], [203., 233.]]]])
"""
return current_backend(x1, x2).inner(x1, x2, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def inv(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
adjoint: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the multiplicative inverse of a square matrix (or a stack of
square matrices) ``x``.
Parameters
----------
x
input array having shape ``(..., M, M)`` and whose innermost two dimensions form
square matrices. Should have a floating-point data type.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the multiplicative inverses. The returned array must have a
floating-point data type determined by :ref:`type-promotion` and must have the
same shape as ``x``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.inv.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([[1.0, 2.0], [3.0, 4.0]])
>>> y = ivy.zeros((2, 2))
>>> ivy.inv(x, out=y)
>>> print(y)
ivy.array([[-2., 1.],[1.5, -0.5]])
>>> x = ivy.array([[1.0, 2.0], [5.0, 5.0]])
>>> ivy.inv(x, out=x)
>>> print(x)
ivy.array([[-1., 0.4],[1., -0.2]])
>>> x = ivy.array([[[1.0, 2.0],[3.0, 4.0]],
... [[1.0, 3.0], [3.0, 5.0]]])
>>> y = ivy.inv(x)
>>> print(y)
ivy.array([[[-2., 1.],[1.5, -0.5]],
[[-1.25, 0.75],[0.75, -0.25]]])
With :class:`ivy.Container` inputs
>>> x = ivy.Container(a=ivy.array([[11., 100., 10.],
... [300., 40., 20.], [25., 30, 100.]]),
... b=ivy.array([[4., 400., 50.], [10., 10., 15.],
... [50., 5000., 40.]]),
... c=ivy.array([[25., 22., 100.], [55, 20., 20.],
... [55., 50., 100.]]))
>>> y = x.inv()
>>> print(y)
{
a: ivy.array([[-0.0012, 0.00342, -0.000565],
[0.0104, -0.0003, -0.000981],
[-0.00282, -0.000766, 0.0104]]),
b: ivy.array([[-0.0322, 0.101, 0.00237],
[0.000151, -0.00101, 0.00019],
[0.0214, 0., -0.00171]]),
c: ivy.array([[0.0107, 0.03, -0.0167],
[-0.0472, -0.0322, 0.0536],
[0.0177, -0.000429, -0.00762]])
}
"""
return current_backend(x).inv(x, adjoint=adjoint, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def matmul(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
transpose_a: bool = False,
transpose_b: bool = False,
adjoint_a: bool = False,
adjoint_b: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the matrix product.
Parameters
----------
x1
first input array. Should have a numeric data type. Must have at least one
dimension.
x2
second input array. Should have a numeric data type. Must have at least one
dimension.
transpose_a
if True, ``x1`` is transposed before multiplication.
transpose_b
if True, ``x2`` is transposed before multiplication.
adjoint_a
If True, takes the conjugate of the matrix then the transpose of the matrix.
adjoint_a and transpose_a can not be true at the same time.
adjoint_b
If True, takes the conjugate of the matrix then the transpose of the matrix.
adjoint_b and transpose_b can not be true at the same time.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
- if both x1 and x2 are one-dimensional arrays having shape (N,), a
zero-dimensional array containing the inner product as its only element.
- if x1 is a two-dimensional array having shape (M, K) and x2 is a
two-dimensional array having shape (K, N), a two-dimensional array
containing the conventional matrix product and having shape (M, N).
- if x1 is a one-dimensional array having shape (K,) and x2 is an array having
shape (..., K, N), an array having shape (..., N) (i.e., prepended
dimensions during vector-to-matrix promotion must be removed) and containing
the conventional matrix product.
- if x1 is an array having shape (..., M, K) and x2 is a one-dimensional array
having shape (K,), an array having shape (..., M) (i.e., appended dimensions
during vector-to-matrix promotion must be removed) and containing the
conventional matrix product.
- if x1 is a two-dimensional array having shape (M, K) and x2 is an array
having shape (..., K, N), an array having shape (..., M, N) and containing
the conventional matrix product for each stacked matrix.
- if x1 is an array having shape (..., M, K) and x2 is a two-dimensional array
having shape (K, N), an array having shape (..., M, N) and containing the
conventional matrix product for each stacked matrix.
- if either x1 or x2 has more than two dimensions, an array having a shape
determined by Broadcasting shape(x1)[:-2] against shape(x2)[:-2] and
containing the conventional matrix product for each stacked matrix.
**Raises**
- if either x1 or x2 is a zero-dimensional array.
- if x1 is a one-dimensional array having shape (K,), x2 is a one-dimensional
array having shape (L,), and K != L.
- if x1 is a one-dimensional array having shape (K,), x2 is an array
having shape (..., L, N), and K != L.
- if x1 is an array having shape (..., M, K), x2 is a one-dimensional array
having shape (L,), and K != L.
- if x1 is an array having shape (..., M, K), x2 is an array having shape
(..., L, N), and K != L.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.matmul.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([2., 0., 3.])
>>> y = ivy.array([4., 1., 8.])
>>> z = ivy.matmul(x, y)
>>> print(z)
ivy.array(32.)
>>> x = ivy.array([[1., 2.], [0., 1.]])
>>> y = ivy.array([[2., 0.], [0., 3.]])
>>> z = ivy.matmul(x, y, transpose_b=True)
>>> print(z)
ivy.array([[2., 6.],
[0., 3.]])
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([5., 1.]), b=ivy.array([1., 0.]))
>>> y = ivy.Container(a=ivy.array([4., 7.]), b=ivy.array([3., 0.]))
>>> z = ivy.matmul(x,y)
>>> print(z)
{
a: ivy.array(27.),
b: ivy.array(3.)
}
With a combination of :class:`ivy.Array`
and :class:`ivy.Container` inputs:
>>> x = ivy.array([9., 0.])
>>> y = ivy.Container(a=ivy.array([2., 1.]), b=ivy.array([1., 0.]))
>>> z = ivy.matmul(x, y)
>>> print(z)
{
a: ivy.array(18.),
b: ivy.array(9.)
}
>>> x = ivy.array([[1., 2.], [0., 3.]])
>>> y = ivy.array([[1.], [3.]])
>>> z = ivy.matmul(x, y, transpose_a=True)
>>> print(z)
ivy.array([[ 1.],
[11.]])
"""
return current_backend(x1).matmul(
x1,
x2,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def matrix_norm(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
ord: Union[int, float, Literal[inf, -inf, "fro", "nuc"]] = "fro",
axis: Tuple[int, int] = (-2, -1),
keepdims: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the matrix p-norm.
Parameters
----------
x
Input array having shape (..., M, N) and whose innermost two dimensions
form MxN matrices. Should have a floating-point data type.
ord
order of the norm. The following mathematical norms must be supported:
+------------------+---------------------------------+
| ord | description |
+==================+=================================+
| 'fro' | Frobenius norm |
+------------------+---------------------------------+
| 'nuc' | nuclear norm |
+------------------+---------------------------------+
| 1 | max(sum(abs(x), axis=0)) |
+------------------+---------------------------------+
| 2 | largest singular value |
+------------------+---------------------------------+
| inf | max(sum(abs(x), axis=1)) |
+------------------+---------------------------------+
The following non-mathematical "norms" must be supported:
+------------------+---------------------------------+
| ord | description |
+==================+=================================+
| -1 | min(sum(abs(x), axis=0)) |
+------------------+---------------------------------+
| -2 | smallest singular value |
+------------------+---------------------------------+
| -inf | min(sum(abs(x), axis=1)) |
+------------------+---------------------------------+
If ``ord=1``, the norm corresponds to the induced matrix norm where
``p=1`` (i.e., the maximum absolute value column sum).
If ``ord=2``, the norm corresponds to the induced matrix norm where
``p=inf`` (i.e., the maximum absolute value row sum).
If ``ord=inf``, the norm corresponds to the induced matrix norm where
``p=2`` (i.e., the largest singular value).
Default: "fro".
axis
specifies the axes that hold 2-D matrices. Default: (-2, -1).
keepdims
If this is set to True, the axes which are normed over are left in the result as
dimensions with size one. With this option the result will broadcast correctly
against the original x. Default is ``False``.
dtype
If specified, the input tensor is cast to dtype before performing the operation,
and the returned tensor's type will be dtype. Default: None
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Matrix norm of the array at specified axes. If ``keepdims`` is ``False``, the
returned array must have a rank which is two less than the ranl of ``x``.
If ``x`` has a real-valued data type, the returned array must have a real-valued
floating-point data type based on Type promotion. If ``x`` has a complex-valued
data type, the returned array must have a real-valued floating-point data type
whose precision matches the precision of ``x`` (e.g., if ``x`` is
``complex128``, then the returned array must have a `float64`` data type).
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.matrix_norm.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([[1., 2.], [3., 4.]])
>>> y = ivy.matrix_norm(x)
>>> print(y)
ivy.array(5.47722558)
>>> x = ivy.arange(8, dtype=float).reshape((2, 2, 2))
>>> y = ivy.zeros(2)
>>> ivy.matrix_norm(x, ord=1, out=y)
>>> print(y)
ivy.array([ 4., 12.])
>>> x = ivy.arange(12, dtype=float).reshape((3, 2, 2))
>>> y = ivy.zeros((3,))
>>> ivy.matrix_norm(x, ord=ivy.inf, axis=(2, 1), out=y)
>>> print(y)
ivy.array([ 4., 12., 20.])
>>> x = ivy.array([[1.1, 2.2], [3.3, 4.4], [5.5, 6.6]])
>>> y = ivy.matrix_norm(x, ord='nuc', keepdims=True)
>>> print(y)
ivy.array([[11.]])
>>> x = ivy.array([[[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]],
... [[1., 0., 1.1], [1., 1., 0.]]])
>>> y = ivy.zeros((2,))
>>> ivy.matrix_norm(x, ord='fro', out=y)
>>> print(y)
ivy.array([10.5 , 2.05])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[0.666, 9.11],
... [42.69, 9.23]]),
... b=ivy.array([[1.1, 2.2, 3.3],
... [4.4, 5.5, 6.6]]))
>>> y = ivy.matrix_norm(x, ord=-ivy.inf)
>>> print(y)
{
a: ivy.array(9.776),
b: ivy.array(6.6000004)
}
With multiple :class:`ivy:Container` inputs:
>>> x = ivy.Container(a=ivy.arange(12, dtype=float).reshape((3, 2, 2)),
... b=ivy.arange(8, dtype=float).reshape((2, 2, 2)))
>>> ord = ivy.Container(a=1, b=float('inf'))
>>> axis = ivy.Container(a=(1, 2), b=(2, 1))
>>> k = ivy.Container(a=False, b=True)
>>> y = ivy.matrix_norm(x, ord=ord, axis=axis, keepdims=k)
>>> print(y)
{
a: ivy.array([4., 12., 20.]),
b: ivy.array([[[4.]],
[[12.]]])
}
"""
return current_backend(x).matrix_norm(
x, ord=ord, axis=axis, keepdims=keepdims, dtype=dtype, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def matrix_power(
x: Union[ivy.Array, ivy.NativeArray], n: int, /, *, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""Raise a square matrix (or a stack of square matrices) x to an integer
power n.
Parameters
----------
x
input array having shape (..., M, M) and whose innermost two dimensions form
square matrices.
Should have a floating-point data type.
n
integer exponent.
Returns
-------
ret
if n is equal to zero, an array containing the identity matrix for each
square matrix.
If n is less than zero, an array containing the inverse of each
square matrix raised to the absolute value of n, provided that each
square matrix is invertible.
If n is greater than zero, an array containing the result of raising
each square matrix to the power n.
The returned array must have the same shape as x and a floating-point
data type determined by Type Promotion Rules.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.matrix_power.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :code: 'ivy.Array' inputs:
>>> x = ivy.array([[1., 2.], [3., 4.]])
>>> ivy.matrix_power(x,1)
ivy.array([[1., 2.],
[3., 4.]])
>>> x = ivy.array([[3., 2.], [-5., -3.]])
>>> ivy.matrix_power(x,-1)
ivy.array([[-3., -2.],
[ 5., 3.]])
>>> x = ivy.array([[4., -1.], [0., 2.]])
>>> ivy.matrix_power(x,0)
ivy.array([[1., 0.],
[0., 1.]])
>>> x = ivy.array([[1., 2.], [0., 1.]])
>>> ivy.matrix_power(x,5)
ivy.array([[ 1., 10.],
[ 0., 1.]])
>>> x = ivy.array([[1/2, 0.], [0., -1/3]])
>>> ivy.matrix_power(x,-2)
ivy.array([[4., 0.],
[0., 9.]])
With :code: 'ivy.NativeArray' inputs:
>>> x = ivy.native_array([[1., 2., 3.], [6., 5., 4.], [7., 8., 9.]])
>>> ivy.matrix_power(x,2)
ivy.array([[ 34., 36., 38.],
[ 64., 69., 74.],
[118., 126., 134.]])
With :code: 'ivy.Container' inputs:
>>> x = ivy.Container(a = ivy.array([[1., 2.], [3., 4.]]),
b = ivy.array([[1., 0.], [0., 0.]]))
>>> ivy.matrix_power(x,3)
{
a: ivy.array([[37., 54.],
[81., 118.]]),
b: ivy.array([[1., 0.],
[0., 0.]])
}
"""
return current_backend(x).matrix_power(x, n, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def matrix_rank(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
atol: Optional[Union[float, Tuple[float]]] = None,
rtol: Optional[Union[float, Tuple[float]]] = None,
hermitian: Optional[bool] = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the rank (i.e., number of non-zero singular values) of a matrix
(or a stack of matrices).
Parameters
----------
x
input array having shape ``(..., M, N)`` and whose innermost two dimensions form
``MxN`` matrices. Should have a floating-point data type.
atol
absolute tolerance. When None it's considered to be zero.
rtol
relative tolerance for small singular values. Singular values approximately less
than or equal to ``rtol * largest_singular_value`` are set to zero. If a
``float``, the value is equivalent to a zero-dimensional array having a
floating-point data type determined by :ref:`type-promotion` (as applied to
``x``) and must be broadcast against each matrix. If an ``array``, must have a
floating-point data type and must be compatible with ``shape(x)[:-2]`` (see
:ref:`broadcasting`). If ``None``, the default value is ``max(M, N) * eps``,
where ``eps`` must be the machine epsilon associated with the floating-point
data type determined by :ref:`type-promotion` (as applied to ``x``).
Default: ``None``.
hermitian
indicates whether ``x`` is Hermitian. When ``hermitian=True``, ``x``
is assumed to be Hermitian, enabling a more efficient method for finding
eigenvalues, but x is not checked inside the function.
Instead, We just use the lower triangular of the matrix to compute.
Default: ``False``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the ranks. The returned array must have a floating-point
data type determined by :ref:`type-promotion` and must have shape ``(...)``
(i.e., must have a shape equal to ``shape(x)[:-2]``).
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.matrix_rank.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :code: 'ivy.Array' inputs:
1. Full Matrix
>>> x = ivy.array([[1., 2.], [3., 4.]])
>>> ivy.matrix_rank(x)
ivy.array(2.)
2. Rank Deficient Matrix
>>> x = ivy.array([[1., 0.], [0., 0.]])
>>> ivy.matrix_rank(x)
ivy.array(1.)
3. 1 Dimension - rank 1 unless all 0
>>> x = ivy.array([[1., 1.])
>>> ivy.matrix_rank(x)
ivy.array(1.)
>>> x = ivy.array([[0., 0.])
>>> ivy.matrix_rank(x)
ivy.array(0)
With :code: 'ivy.NativeArray' inputs:
>>> x = ivy.native_array([[1., 2.], [3., 4.]], [[1., 0.], [0., 0.]])
>>> ivy.matrix_rank(x)
ivy.array([2., 1.])
With :code: 'ivy.Container' inputs:
>>> x = ivy.Container(a = ivy.array([[1., 2.], [3., 4.]]), \
b = ivy.array([[1., 0.], [0., 0.]]))
>>> ivy.matrix_rank(x)
{
a:ivy.array(2.),
b:ivy.array(1.)
}
"""
return current_backend(x).matrix_rank(
x, atol=atol, rtol=rtol, hermitian=hermitian, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def matrix_transpose(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
conjugate: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Transposes a matrix (or a stack of matrices) ``x``.
Parameters
----------
x
input array having shape ``(..., M, N)`` and whose innermost two
dimensions form ``MxN`` matrices.
conjugate
If True, takes the conjugate of the matrix.
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
an array containing the transpose for each matrix and having shape
``(..., N, M)``. The returned array must have the same data
type as ``x``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.matrix_transpose.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :code: 'ivy.Array' inputs:
>>> x = ivy.array([[0., 2.], [1., 3.]])
>>> y = ivy.matrix_transpose(x)
>>> print(y)
ivy.array([[0., 1.],
[2., 3.]])
>>> x = ivy.array([[1., 4.], [2., 5.], [3., 1.]])
>>> y = ivy.zeros((2, 3))
>>> ivy.matrix_transpose(x, out=y)
ivy.array([[1., 2., 3.],
[4., 5., 1.]])
>>> x = ivy.array([[2., 3.], [1., 2.]])
>>> ivy.matrix_transpose(x, out=x)
ivy.array([[2., 1.],
[3., 2.]])
>>> x = ivy.array([[0., 1., 2.], [1., 2., 3.]])
>>> y = ivy.matrix_transpose(x)
>>> print(y)
ivy.array([[0., 1.],
[1., 2.],
[2., 3.]])
With :code: 'ivy.Container' inputs:
>>> x = ivy.Container(a=ivy.array([[0., 1.], [0., 2.]]), \
b=ivy.array([[3., 4.], [3., 5.]]))
>>> y = ivy.matrix_transpose(x)
>>> print(y)
{
a: ivy.array([[0., 0.],
[1., 2.]]),
b: ivy.array([[3., 3.],
[4., 5.]])
}
"""
return current_backend(x).matrix_transpose(x, conjugate=conjugate, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def outer(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the outer product of two vectors ``x1`` and ``x2``.
Parameters
----------
x1
first one-dimensional input array of size N. Should have a numeric data type.
a(N,) array_like
First input vector. Input is flattened if not already 1-dimensional.
x2
second one-dimensional input array of size M. Should have a numeric data type.
b(M,) array_like
Second input vector. Input is flattened if not already 1-dimensional.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
a two-dimensional array containing the outer product and whose shape is (N, M).
The returned array must have a data type determined by Type Promotion Rules.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.outer.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
>>> x = ivy.array([[1., 2.],[3., 4.]])
>>> y = ivy.array([[5., 6.],[7., 8.]])
>>> d = ivy.outer(x,y)
>>> print(d)
ivy.array([[ 5., 6., 7., 8.],
[10., 12., 14., 16.],
[15., 18., 21., 24.],
[20., 24., 28., 32.]])
>>> d = ivy.outer(x, 1)
>>> print(d)
ivy.array([[1.],
[2.],
[3.],
[4.]])
>>> x = ivy.array([[[1., 2.],[3., 4.]],[[5., 6.],[7., 8.]]])
>>> y = ivy.array([[[9., 10.],[11., 12.]],[[13., 14.],[15., 16.]]])
>>> d = ivy.outer(x, y)
>>> print(d)
ivy.array([[ 9., 10., 11., 12., 13., 14., 15., 16.],
[ 18., 20., 22., 24., 26., 28., 30., 32.],
[ 27., 30., 33., 36., 39., 42., 45., 48.],
[ 36., 40., 44., 48., 52., 56., 60., 64.],
[ 45., 50., 55., 60., 65., 70., 75., 80.],
[ 54., 60., 66., 72., 78., 84., 90., 96.],
[ 63., 70., 77., 84., 91., 98., 105., 112.],
[ 72., 80., 88., 96., 104., 112., 120., 128.]])
"""
return current_backend(x1, x2).outer(x1, x2, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def pinv(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
rtol: Optional[Union[float, Tuple[float]]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the (Moore-Penrose) pseudo-inverse of a matrix (or a stack of
matrices) ``x``.
Parameters
----------
x
input array having shape ``(..., M, N)`` and whose innermost two dimensions form
``MxN`` matrices. Should have a floating-point data type.
rtol
relative tolerance for small singular values. Singular values approximately less
than or equal to ``rtol * largest_singular_value`` are set to zero. If a
``float``, the value is equivalent to a zero-dimensional array having a
floating-point data type determined by :ref:`type-promotion` (as applied to
``x``) and must be broadcast against each matrix. If an ``array``, must have a
floating-point data type and must be compatible with ``shape(x)[:-2]``
(see :ref:`broadcasting`). If ``None``, the default value is
``max(M, N) * eps``, where ``eps`` must be the machine epsilon associated with
the floating-point data type determined by :ref:`type-promotion` (as applied to
``x``). Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the pseudo-inverses. The returned array must have a
floating-point data type determined by :ref:`type-promotion` and must have shape
``(..., N, M)`` (i.e., must have the same shape as ``x``, except the innermost
two dimensions must be transposed).
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.pinv.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
>>> x = ivy.array([[1., 2.],[3., 4.]])
>>> y = ivy.pinv(x)
>>> print(y)
ivy.array([[-1.99999988, 1. ],
[ 1.5 , -0.5 ]])
>>> x = ivy.array([[1., 2.],[3., 4.]])
>>> out = ivy.zeros(x.shape)
>>> ivy.pinv(x, out=out)
>>> print(out)
ivy.array([[-1.99999988, 1. ],
[ 1.5 , -0.5 ]])
"""
return current_backend(x).pinv(x, rtol=rtol, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def qr(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
mode: str = "reduced",
out: Optional[Tuple[ivy.Array, ivy.Array]] = None,
) -> Tuple[ivy.Array, ivy.Array]:
"""Return the qr decomposition x = QR of a full column rank matrix (or a
stack of matrices), where Q is an orthonormal matrix (or a stack of
matrices) and R is an upper-triangular matrix (or a stack of matrices).
Parameters
----------
x
input array having shape (..., M, N) and whose innermost two dimensions form MxN
matrices of rank N. Should have a floating-point data type.
mode
decomposition mode. Should be one of the following modes:
- 'reduced': compute only the leading K columns of q, such that q and r have
dimensions (..., M, K) and (..., K, N), respectively, and where K = min(M, N).
- 'complete': compute q and r with dimensions (..., M, M) and (..., M, N),
respectively.
Default: 'reduced'.
out
optional output tuple of arrays, for writing the result to. The arrays must have
shapes that the inputs broadcast to.
Returns
-------
ret
a namedtuple (Q, R) whose
- first element must have the field name Q and must be an array whose shape
depends on the value of mode and contain matrices with orthonormal columns.
If mode is 'complete', the array must have shape (..., M, M). If mode is
'reduced', the array must have shape (..., M, K), where K = min(M, N). The
first x.ndim-2 dimensions must have the same size as those of the input array
x.
- second element must have the field name R and must be an array whose shape
depends on the value of mode and contain upper-triangular matrices. If mode is
'complete', the array must have shape (..., M, N). If mode is 'reduced', the
array must have shape (..., K, N), where K = min(M, N). The first x.ndim-2
dimensions must have the same size as those of the input x.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.qr.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[1.,2.,3.],[4.,5.,6.],[7.,8.,9.]])
>>> q, r = ivy.qr(x)
>>> print(q)
ivy.array([[-0.12309149, 0.90453403, 0.40824829],
[-0.49236596, 0.30151134, -0.81649658],
[-0.86164044, -0.30151134, 0.40824829]])
>>> print(r)
ivy.array([[-8.12403841e+00,-9.60113630e+00, -1.10782342e+01],
[ 0.00000000e+00, 9.04534034e-01, 1.80906807e+00],
[ 0.00000000e+00, 0.00000000e+00, -8.88178420e-16]])
# Note: if `int` values are used in `x` the output for q, r vary
>>> x = ivy.array([[1., 2.], [3., 4.]])
>>> q = ivy.zeros_like(x)
>>> r = ivy.zeros_like(x)
>>> ivy.qr(x, out=(q,r))
>>> print(q)
ivy.array([[-0.31622776, -0.94868332],
[-0.94868332, 0.31622776]])
>>> print(r)
ivy.array([[-3.1622777 , -4.42718887],
[ 0. , -0.63245553]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a = ivy.native_array([[1., 2.], [3., 4.]]),
... b = ivy.array([[2., 3.], [4. ,5.]]))
>>> q,r = ivy.qr(x, mode='complete')
>>> print(q)
{
a: ivy.array([[-0.31622777, -0.9486833],
[-0.9486833, 0.31622777]]),
b: ivy.array([[-0.4472136, -0.89442719],
[-0.89442719, 0.4472136]])
}
>>> print(r)
{
a: ivy.array([[-3.16227766, -4.42718872],
[0., -0.63245553]]),
b: ivy.array([[-4.47213595, -5.81377674],
[0., -0.4472136]])
}
"""
return current_backend(x).qr(x, mode=mode, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_array_function
@handle_device
def slogdet(
x: Union[ivy.Array, ivy.NativeArray],
/,
) -> Tuple[Union[ivy.Array, ivy.NativeArray], Union[ivy.Array, ivy.NativeArray]]:
"""
Return the sign and the natural logarithm of the absolute value of the determinant
of a square matrix (or a stack of square matrices) ``x``.
.. note::
The purpose of this function is to calculate the determinant more accurately
when the determinant is either very small or very large, as calling ``det`` may
overflow or underflow.
**Special cases**
For real-valued floating-point operands,
- If the determinant is zero, the ``sign`` should be ``0``and ``logabsdet``
should be ``infinity``.
For complex floating-point operands,
- If the detereminant is ``0 + 0j``, the ``sign`` should be ``0 + 0j``
and ``logabsdet`` should be ``infinity + 0j``.
Parameters
----------
x:
input array having shape ``(..., M, M)`` and whose innermost two dimensions
form square matrices. Should have a real-valued floating-point data type.
Returns
-------
ret:
a namedtuple (``sign``, ``logabsdet``) whose
- first element must have the field name ``sign`` and must be an array
containing a number representing the sign of the determinant for
each square matrix.
- second element must have the field name ``logabsdet`` and must be an array
containing the determinant for each square matrix.
For a real matrix, the sign of the determinant must be
either ``1``, ``0``, or ``-1``.
Each returned array must have shape ``shape(x)[:-2]`` and a real-valued
floating-point data type determined by :ref:`type-promotion`. If ``x``
is complex, the returned array must have a real-valued floating-point data
type having the same precision as ``x`` (1.g., if ``x`` is ``complex64``,
``logabsdet`` must have a ``float32`` data type)
.. note::
If a determinant is zero, then the corresponding ``sign`` should be ``0``
and ``logabsdet`` should be ``-infinity``; however, depending on the
underlying algorithm, the returned result may differ. In all cases,
the determinant should be equal to ``sign * exp(logsabsdet)``
(although, again, the result may be subject to numerical precision errors).
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.slogdet.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[2.0, 1.0],
... [3.0, 4.0]])
>>> y = ivy.slogdet(x)
>>> print(y)
slogdet(sign=ivy.array(1.), logabsdet=ivy.array(1.60943794))
>>> ivy.set_backend('numpy') # As the precision of results depends on backend.
>>> x = ivy.array([[1.2, 2.0, 3.1],
... [6.0, 5.2, 4.0],
... [9.0, 8.0, 7.0]])
>>> y = ivy.slogdet(x)
>>> print(y)
slogdet(sign=ivy.array(-1.), logabsdet=ivy.array(1.098611))
With :class:`ivy.Container` input:
>>> ivy.unset_backend() # unset backend again.
>>> x = ivy.Container(a=ivy.array([[1.0, 2.0],
... [3.0, 4.0]]),
... b=ivy.array([[1.0, 2.0],
... [2.0, 1.0]]))
>>> y = ivy.slogdet(x)
>>> print(y)
[{
a: ivy.array(-1.),
b: ivy.array(-1.)
}, {
a: ivy.array(0.69314718),
b: ivy.array(1.09861231)
}]
"""
return current_backend(x).slogdet(x)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def solve(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
adjoint: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the solution x to the system of linear equations represented by
the well- determined (i.e., full rank) linear matrix equation Ax = B.
Parameters
----------
x1
coefficient array A having shape (..., M, M) and whose innermost two dimensions
form square matrices. Must be of full rank (i.e., all rows or, equivalently,
columns must be linearly independent). Should have a floating-point data type.
x2
ordinate (or “dependent variable”) array B. If x2 has shape (M,1), x2 is
equivalent to an array having shape (..., M, 1). If x2 has shape (..., M, K),
each column k defines a set of ordinate values for which to compute a solution,
and shape(x2)[:-1] must be compatible with shape(x1)[:-1] (see Broadcasting).
Should have a floating-point data type.
adjoint
specifies whether the system should be solved for x1 or adjoint(x1)
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the solution to the system AX = B (or adjoint(A)X = B)
for each square matrix. The returned array must have the same shape as x2
(i.e., the array corresponding to B) and must have a floating-point data
type determined by Type Promotion Rules.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.solve.html>`_
in the standard.
Both the description and the type hints above assume an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With class:`ivy.Array` input:
>>> A = ivy.array([[1.1, 1.2, 1.3], [2.1, 2.2, 2.3], [3.1, 3.2, 3.3]]),
>>> B = ivy.array([[1.1], [2.1], [3.1]]),
>>> x = ivy.solve(A,B);
>>> print(x)
ivy.array([[1],
[0],
[0]])
>>> print(x.shape)
(1,3)
With shape(A) = (2,3,3) and shape(B) = (2,3,1):
>>> A = ivy.array([[[11.1, 11.2, 11.3],[12.1, 12.2, 12.3],[13.1, 13.2, 13.3]], [[21.1, 21.2, 21.3],[22.1, 22.2, 22.3],[23.1, 23.2, 23.3]]]),
>>> B = ivy.array([[[11.1],
[12.1],
[13.1]],
[[21.1],
[22.1],
[23.1]]]),
>>> x = ivy.solve(A,B);
>>> print(x)
ivy.array([[[1],
[0],
[0]],
[[1],
[0],
[0]]])
>>> print(x.shape)
(2,1,3)
With shape(A) = (3,3) and shape(B) = (3,2):
>>> A = ivy.array([[1.1, 1.2, 1.3], [2.1, 2.2, 2.3], [3.1, 3.2, 3.3]]),
>>> B = ivy.array([[1.1, 2.2], [2.1, 4.2], [3.1, 6.2]]),
>>> x = ivy.solve(A,B);
>>> print(x)
ivy.array([[[1],
[0],
[0]],
[[2],
[0],
[0]]])
>>> print(x.shape)
(2,1,3)
With class:`ivy.Container` input:
>>> A = ivy.array([[1.1, 1.2, 1.3], [2.1, 2.2, 2.3], [3.1, 3.2, 3.3]]),
>>> B = ivy.container(B1 = ivy.array([[1.1], [2.1], [3.1]]),
B2 = ivy.array([[2.2], [4.2], [6.2]]))
>>> x = ivy.solve(A,B);
>>> print(x)
{
B1:([[1],[0],[0]]),
B2:([[2],[0],[0]])
}
""" # noqa: E501
return current_backend(x1, x2).solve(x1, x2, adjoint=adjoint, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_array_function
@handle_device
def svd(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
compute_uv: bool = True,
full_matrices: bool = True,
) -> Union[ivy.Array, Tuple[ivy.Array, ...]]:
"""Return a singular value decomposition A = USVh of a matrix (or a stack
of matrices) ``x``, where ``U`` is a matrix (or a stack of matrices) with
orthonormal columns, ``S`` is a vector of non-negative numbers (or stack of
vectors), and ``Vh`` is a matrix (or a stack of matrices) with orthonormal
rows.
Parameters
----------
x
input array having shape ``(..., M, N)`` and whose innermost two dimensions form
matrices on which to perform singular value decomposition. Should have a
floating-point data type.
full_matrices
If ``True``, compute full-sized ``U`` and ``Vh``, such that ``U`` has shape
``(..., M, M)`` and ``Vh`` has shape ``(..., N, N)``. If ``False``, compute on
the leading ``K`` singular vectors, such that ``U`` has shape ``(..., M, K)``
and ``Vh`` has shape ``(..., K, N)`` and where ``K = min(M, N)``.
Default: ``True``.
compute_uv
If ``True`` then left and right singular vectors will be computed and returned
in ``U`` and ``Vh``, respectively. Otherwise, only the singular values will be
computed, which can be significantly faster.
.. note::
with backend set as torch, svd with still compute left and right singular
vectors irrespective of the value of compute_uv, however Ivy will still
only return the singular values.
Returns
-------
.. note::
once complex numbers are supported, each square matrix must be Hermitian.
ret
a namedtuple ``(U, S, Vh)`` whose
- first element must have the field name ``U`` and must be an array whose
shape depends on the value of ``full_matrices`` and contain matrices with
orthonormal columns (i.e., the columns are left singular vectors). If
``full_matrices`` is ``True``, the array must have shape ``(..., M, M)``.
If ``full_matrices`` is ``False``, the array must have shape
``(..., M, K)``, where ``K = min(M, N)``. The first ``x.ndim-2`` dimensions
must have the same shape as those of the input ``x``.
- second element must have the field name ``S`` and must be an array with
shape ``(..., K)`` that contains the vector(s) of singular values of length
``K``, where ``K = min(M, N)``. For each vector, the singular values must be
sorted in descending order by magnitude, such that ``s[..., 0]`` is the
largest value, ``s[..., 1]`` is the second largest value, et cetera. The
first ``x.ndim-2`` dimensions must have the same shape as those of the input
``x``. Must have a real-valued floating-point data type having the same
precision as ``x`` (e.g., if ``x`` is ``complex64``, ``S`` must have
a ``float32`` data type).
- third element must have the field name ``Vh`` and must be an array whose
shape depends on the value of ``full_matrices`` and contain orthonormal rows
(i.e., the rows are the right singular vectors and the array is the
adjoint). If ``full_matrices`` is ``True``, the array must have shape
``(..., N, N)``. If ``full_matrices`` is ``False``, the array must have
shape ``(..., K, N)`` where ``K = min(M, N)``. The first ``x.ndim-2``
dimensions must have the same shape as those of the input ``x``. Must
have the same data type as ``x``.
Each returned array must have the same floating-point data type as ``x``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.svd.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.random_normal(shape = (9, 6))
>>> U, S, Vh = ivy.svd(x)
>>> print(U.shape, S.shape, Vh.shape)
(9, 9) (6,) (6, 6)
With reconstruction from SVD, result is numerically close to x
>>> reconstructed_x = ivy.matmul(U[:,:6] * S, Vh)
>>> print((reconstructed_x - x > 1e-3).sum())
ivy.array(0)
>>> U, S, Vh = ivy.svd(x, full_matrices = False)
>>> print(U.shape, S.shape, Vh.shape)
(9, 6) (6,) (6, 6)
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[2.0, 3.0, 6.0], [5.0, 3.0, 4.0],
... [1.0, 7.0, 3.0], [3.0, 2.0, 5.0]]),
... b=ivy.array([[7.0, 1.0, 2.0, 3.0, 9.0],
... [2.0, 5.0, 3.0, 4.0, 10.0],
... [2.0, 11.0, 6.0, 1.0, 3.0],
... [8.0, 3.0, 4.0, 5.0, 9.0]]))
>>> U, S, Vh = ivy.svd(x)
>>> print(U.shape)
{
a: [
4,
4
],
b: [
4,
4
]
}
"""
return current_backend(x).svd(x, compute_uv=compute_uv, full_matrices=full_matrices)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def svdvals(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
driver: Optional[str] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the singular values of a matrix (or a stack of matrices) ``x``.
Parameters
----------
x
input array having shape ``(..., M, N)`` and whose innermost two dimensions form
``MxN`` matrices.
driver
optional output array,name of the cuSOLVER method to be used. This keyword
argument only works on CUDA inputs.
Available options are: None, gesvd, gesvdj, and gesvda.Default: None.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
array with shape ``(..., K)`` that contains the vector(s) of singular values of
length ``K``, where K = min(M, N). The values are sorted in descending order by
magnitude. The returned array must have a real-valued floating-point data type
having the same precision as ``x`` (e.g., if ``x`` is ``complex64``,
the returned array must have a ``float32`` data type).
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.svdvals.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[5.0, 7.0], [4.0, 3.0]])
>>> y = ivy.svdvals(x)
>>> print(y.shape)
ivy.Shape(2,)
With comparison of the singular value S ivy.svdvals() by the result ivy.svd().
>>> x = ivy.array([[5.0, 7.0], [4.0, 3.0]])
>>> _, y, _ = ivy.svd(x)
>>> print(y.shape)
ivy.Shape(2,)
>>> x = ivy.array([9.86217213, 1.31816804])
>>> y = ivy.array([9.86217213, 1.31816804])
>>> error = (x - y).abs()
>>> print(error)
ivy.array([0.,0.])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0],
... [2.0, 1.0, 3.0], [3.0, 4.0, 5.0]])
>>> x.shape
(4, 3)
>>> x = ivy.native_array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0],
... [2.0, 1.0, 3.0], [3.0, 4.0, 5.0]])
>>> y = ivy.svdvals(x)
>>> print(y)
ivy.array([10.3, 1.16, 0.615])
>>> _, SS, _ = ivy.svd(x)
>>> print(SS)
ivy.array([10.3, 1.16, 0.615])
with comparison of singular value S ivy.svdvals() by the result ivy.svd().
>>> x = ivy.array([10.25994301, 1.16403675, 0.61529762])
>>> y = ivy.array([9.86217213, 1.31816804, 0.51231241])
>>> error = (x - y).abs()
>>> print(error)
ivy.array([0.39777088, 0.15413129, 0.1029852 ])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[2.0, 3.0], [3.0, 4.0],
... [1.0, 3.0], [3.0, 5.0]]),
... b=ivy.array([[7.0, 1.0, 2.0, 3.0],
... [2.0, 5.0, 3.0, 4.0],
... [2.0, 6.0, 1.0, 3.0],
... [3.0, 4.0, 5.0, 9.0]]))
>>> y = ivy.svdvals(x)
>>> print(y)
{
a: ivy.array([9.01383495, 0.86647356]),
b: ivy.array([15.7786541, 5.55970621, 4.16857576, 0.86412698])
}
Instance Method Examples
~~~~~~~~~~~~~~~~~~~~~~~~
Using :class:`ivy.Array` instance method:
>>> x = ivy.array([[8.0, 3.0], [2.0, 3.0],
... [2.0, 1.0], [3.0, 4.0],
... [4.0, 1.0], [5.0, 6.0]])
>>> y = x.svdvals()
>>> print(y)
ivy.array([13.37566757, 3.88477993])
With :class:`ivy.Container` instance method:
>>> x = ivy.Container(a=ivy.array([[2.0, 3.0, 6.0], [5.0, 3.0, 4.0],
... [1.0, 7.0, 3.0], [3.0, 2.0, 5.0]]),
... b=ivy.array([[7.0, 1.0, 2.0, 3.0, 9.0],
... [2.0, 5.0, 3.0, 4.0, 10.0],
... [2.0, 11.0, 6.0, 1.0, 3.0],
... [8.0, 3.0, 4.0, 5.0, 9.0]]))
>>> y = x.svdvals()
>>> print(y)
{
a: ivy.array([12.95925522, 4.6444726, 2.54687881]),
b: ivy.array([23.16134834, 10.35037804, 4.31025076, 1.35769391])
}
"""
return current_backend(x).svdvals(x, driver=driver, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def tensordot(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
axes: Union[int, Tuple[List[int], List[int]]] = 2,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a tensor contraction of x1 and x2 over specific axes.
.. note::
If either ``x1`` or ``x2`` has a complex floating-point data type, neither
argument must be complex-conjugated or transposed. If conjugation and/or
transposition is desired, these operations should explicitly performed
prior to computing the generalized matrix product.
Parameters
----------
x1
First input array. Should have a numeric data type.
x2
second input array. Must be compatible with x1 for all non-contracted axes.
Should have a numeric data type.
axes
The axes to contract over.
Default is 2.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The tensor contraction of x1 and x2 over the specified axes.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.tensordot.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[1., 2.], [2., 3.]])
>>> y = ivy.array([[3., 4.], [4., 5.]])
>>> res = ivy.tensordot(x, y, axes =0)
>>> print(res)
ivy.array([[[[3.,4.],[4.,5.]],[[6.,8.],[8.,10.]]],[[[6.,8.],[8.,10.]],[[9.,12.],[12.,15.]]]])
With :class:'ivy.NativeArray' input:
>>> x = ivy.native_array([[1., 2.], [2., 3.]])
>>> y = ivy.native_array([[3., 4.], [4., 5.]])
>>> res = ivy.tensordot(x, y, axes = ([1],[1]))
>>> print(res)
ivy.array([[11., 14.],
[18., 23.]])
With a mix of :class:`ivy.Array` and :class:`ivy.NativeArray` inputs:
>>> x = ivy.array([[1., 0., 1.], [2., 3., 6.], [0., 7., 2.]])
>>> y = ivy.native_array([[1.], [2.], [3.]])
>>> res = ivy.tensordot(x, y, axes = 1)
>>> print(res)
ivy.array([[ 4.],
[26.],
[20.]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[1., 0., 3.], [2., 3., 4.]]),
... b=ivy.array([[5., 6., 7.], [3., 4., 8.]]))
>>> y = ivy.Container(a=ivy.array([[2., 4., 5.], [9., 10., 6.]]),
... b=ivy.array([[1., 0., 3.], [2., 3., 4.]]))
>>> res = ivy.tensordot(x, y)
>>> print(res)
{
a: ivy.array(89.),
b: ivy.array(76.)
}
"""
return current_backend(x1, x2).tensordot(x1, x2, axes=axes, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def trace(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
offset: int = 0,
axis1: int = 0,
axis2: int = 1,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the sum along the specified diagonals of a matrix (or a stack of
matrices) ``x``.
**Special cases**
Let ``N`` equal the number of elements over which to compute the sum.
- If ``N`` is ``0``, the sum is ``0`` (i.e., the empty sum).
For both real-valued and complex floating-point operands,
special cases must be handled as if the operation is implemented
by successive application of :func:`ivy.add`:
Parameters
----------
x
input array having shape ``(..., M, N)`` and whose innermost two dimensions form
``MxN`` matrices. Should have a numeric data type.
offset
offset specifying the off-diagonal relative to the main diagonal.
- ``offset = 0``: the main diagonal.
- ``offset > 0``: off-diagonal above the main diagonal.
- ``offset < 0``: off-diagonal below the main diagonal.
Default: ``0``.
axis1
axis to be used as the first axis of the 2-D sub-arrays from which the
diagonals should be taken.
Defaults to ``0.`` .
axis2
axis to be used as the second axis of the 2-D sub-arrays from which the
diagonals should be taken.
Defaults to ``1.`` .
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the traces and whose shape is determined by removing the
last two dimensions and storing the traces in the last array dimension. For
example, if ``x`` has rank ``k`` and shape ``(I, J, K, ..., L, M, N)``, then an
output array has rank ``k-2`` and shape ``(I, J, K, ..., L)`` where
::
out[i, j, k, ..., l] = trace(a[i, j, k, ..., l, :, :])
The returned array must have the same data type as ``x``.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([[2., 0., 3.],
... [3., 5., 6.]])
>>> y = ivy.trace(x, offset=0)
>>> print(y)
ivy.array(7.)
>>> x = ivy.array([[[1., 2.],
... [3., 4.]],
... [[5., 6.],
... [7., 8.]]])
>>> y = ivy.trace(x, offset=1)
>>> print(y)
ivy.array([3., 4.])
>>> x = ivy.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> y = ivy.zeros(1)
>>> ivy.trace(x, offset=1,out=y)
>>> print(y)
ivy.array(8.)
With :class:`ivy.NativeArray` inputs:
>>> x = ivy.native_array([[2., 0., 3.],[3., 5., 6.]])
>>> y = ivy.trace(x, offset=0)
>>> print(y)
ivy.array(7.)
>>> x = ivy.native_array([[0, 1, 2],
... [3, 4, 5],
... [6, 7, 8]])
>>> y = ivy.trace(x, offset=1)
>>> print(y)
ivy.array(6)
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(
... a = ivy.array([[7, 1, 2],
... [1, 3, 5],
... [0, 7, 4]]),
... b = ivy.array([[4, 3, 2],
... [1, 9, 5],
... [7, 0, 6]])
... )
>>> y = ivy.trace(x, offset=0)
>>> print(y)
{
a: ivy.array(14),
b: ivy.array(19)
}
>>> x = ivy.Container(
... a = ivy.array([[7, 1, 2],
... [1, 3, 5],
... [0, 7, 4]]),
... b = ivy.array([[4, 3, 2],
... [1, 9, 5],
... [7, 0, 6]])
... )
>>> y = ivy.trace(x, offset=1)
>>> print(y)
{
a: ivy.array(6),
b: ivy.array(8)
}
With multiple ivy.Container inputs:
>>> x = ivy.Container(
... a = ivy.array([[7, 1, 3],
... [8, 6, 5],
... [9, 7, 2]]),
... b = ivy.array([[4, 3, 2],
... [1, 9, 5],
... [7, 0, 6]])
... )
>>> offset = ivy.Container(a=1, b=0)
>>> y = ivy.trace(x, offset=offset)
>>> print(y)
{
a: ivy.array(6),
b: ivy.array(19)
}
With Array instance method example:
>>> x = ivy.array([[2., 0., 11.],
... [3., 5., 12.],
... [1., 6., 13.],
... [8., 9., 14.]])
>>> y = x.trace(offset=1)
>>> print(y)
ivy.array(12.)
With Container instance method example:
>>> x = ivy.Container(
... a=ivy.array([[2., 0., 11.],
... [3., 5., 12.]]),
... b=ivy.array([[1., 6., 13.],
... [8., 9., 14.]])
... )
>>> y = x.trace(offset=0)
>>> print(y)
{
a: ivy.array(7.),
b: ivy.array(10.)
}
"""
return current_backend(x).trace(x, offset=offset, axis1=axis1, axis2=axis2, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def vecdot(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: int = -1,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the (vector) dot product of two arrays.
Parameters
----------
x1
first input array. Should have a numeric data type.
x2
second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`).
Should have a numeric data type.
axis
axis over which to compute the dot product. Must be an integer on the interval
``[-N, N)``, where ``N`` is the rank (number of dimensions) of the shape
determined according to :ref:`broadcasting`. If specified as a negative integer,
the function must determine the axis along which to compute the dot product by
counting backward from the last dimension (where ``-1`` refers to the last
dimension). By default, the function must compute the dot product over the last
axis. Default: ``-1``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
if ``x1`` and ``x2`` are both one-dimensional arrays, a zero-dimensional
containing the dot product; otherwise, a non-zero-dimensional array containing
the dot products and having rank ``N-1``, where ``N`` is the rank (number of
dimensions) of the shape determined according to :ref:`broadcasting`. The
returned array must have a data type determined by :ref:`type-promotion`.
**Raises**
- if provided an invalid ``axis``.
- if the size of the axis over which to compute the dot product is not
the same for both ``x1`` and ``x2``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.vecdot.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x1 = ivy.array([1., 2., 3.])
>>> x2 = ivy.array([4., 5., 6.])
>>> dot_product = ivy.vecdot(x1, x2)
>>> print(dot_product)
ivy.array(32.)
>>> x1 = ivy.array([1., 2., 3.])
>>> x2 = ivy.array([1., .8, 4.])
>>> y = ivy.zeros(1)
>>> ivy.vecdot(x1, x2, out=y)
ivy.array(14.60000038)
With :class:`ivy.Container` input:
>>> x1 = ivy.array([1., 2., 3.])
>>> x2 = ivy.Container(a=ivy.array([7., 8., 9.]), b=ivy.array([10., 11., 12.]))
>>> dot_product = ivy.vecdot(x1, x2, axis=0)
>>> print(dot_product)
{
a: ivy.array(50.),
b: ivy.array(68.)
}
"""
return current_backend(x1).vecdot(x1, x2, axis=axis, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def vector_norm(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
ord: Union[int, float, Literal[inf, -inf]] = 2,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
r"""Compute the vector norm of a vector (or batch of vectors) ``x``.
Parameters
----------
x
input array. Should have a floating-point data type.
axis
If an integer, ``axis`` specifies the axis (dimension) along which to compute
vector norms. If an n-tuple, ``axis`` specifies the axes (dimensions) along
which to compute batched vector norms. If ``None``, the vector norm must be
computed over all array values (i.e., equivalent to computing the vector norm of
a flattened array). Negative indices are also supported. Default: ``None``.
keepdims
If ``True``, the axes (dimensions) specified by ``axis`` must be included in the
result as singleton dimensions, and, accordingly, the result must be compatible
with the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the
axes (dimensions) specified by ``axis`` must not be included in the result.
Default: ``False``.
ord
order of the norm. The following mathematical norms are supported:
+------------------+----------------------------+
| ord | description |
+==================+============================+
| 1 | L1-norm (Manhattan) |
+------------------+----------------------------+
| 2 | L2-norm (Euclidean) |
+------------------+----------------------------+
| inf | infinity norm |
+------------------+----------------------------+
| (int,float >= 1) | p-norm |
+------------------+----------------------------+
The following non-mathematical "norms" are also supported:
+------------------+--------------------------------+
| ord | description |
+==================+================================+
| 0 | sum(a != 0) |
+------------------+--------------------------------+
| -inf | min(abs(a)) |
+------------------+--------------------------------+
| (int,float < 1) | sum(abs(a)\*\*ord)\*\*(1./ord) |
+------------------+--------------------------------+
Default: ``2``.
dtype
data type that may be used to perform the computation more precisely. The input
array ``x`` gets cast to ``dtype`` before the function's computations.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the vector norms. If ``axis`` is ``None``, the returned
array must be a zero-dimensional array containing a vector norm. If ``axis`` is
a scalar value (``int`` or ``float``), the returned array must have a rank which
is one less than the rank of ``x``. If ``axis`` is a ``n``-tuple, the returned
array must have a rank which is ``n`` less than the rank of ``x``. The returned
array must have a floating-point data type determined by :ref:`type-promotion`.
If ``x`` has a complex-valued data type, the returned array must have a
real-valued floating-point data type whose precision matches the precision
of ``x`` (e.g., if ``x`` is ``complex128``, then the returned array must have
a ``float64`` data type).
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.vector_norm.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
>>> x = ivy.array([1., 2., 3.])
>>> y = ivy.vector_norm(x)
>>> print(y)
ivy.array([3.7416575])
>>> x = ivy.array([[1, 2, 3], [1.3, 2.4, -1.2]])
>>> y = ivy.vector_norm(x, axis = 1, ord = 1, dtype = ivy.float32)
>>> print(y)
ivy.array([6., 4.9000001])
>>> x = ivy.array([[1, 2, 3], [1.3, 2.4, -1.2]])
>>> y = ivy.vector_norm(x, axis = 0, keepdims = True, ord = float("inf"))
>>> print(y)
ivy.array([[1.3, 2.4, 3.]])
>>> x = ivy.native_array([1, 2, 3, 4], dtype = ivy.float32)
>>> y = ivy.vector_norm(x, ord = 3.)
>>> print(y)
ivy.array([4.64158917])
>>> x = ivy.array([1.,2.,3.,4.], dtype = ivy.float16)
>>> z = ivy.empty(shape = 1, dtype=ivy.float16)
>>> y = ivy.vector_norm(x, ord = 0, out = z)
>>> print(y)
ivy.array(4.)
>>> x = ivy.arange(8, dtype=ivy.float32).reshape((2,2,2))
>>> y = ivy.vector_norm(x, axis = (0,1), ord = float("-inf"))
>>> print(y)
ivy.array([0, 1])
>>> x = ivy.Container(a = [-1., 1., -2., 2.], b = [0., 1.2, 2.3, -3.1])
>>> y = ivy.vector_norm(x, ord = -1)
>>> print(y)
{
a: ivy.array([0.33333334]),
b: ivy.array([0.])
}
"""
return current_backend(x).vector_norm(
x, axis=axis, keepdims=keepdims, ord=ord, dtype=dtype, out=out
)
# Extra #
# ------#
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def diag(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
k: int = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the specified diagonals of the input array, or an array with the
input array's elements as diagonals.
Parameters
----------
x
An array with rank >= 1.
k
An integer that controls which diagonal to consider.
Positive value means superdiagonal,
0 refers to the main diagonal,
and negative value means subdiagonal.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
If x is a 1-D array, the function returns a 2-D square array with the elements
of input as diagonals.
If x is a 2-D array, the function returns a 1-D array with the diagonal elements
of x.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
extensions/generated/array_api.linalg.diagonal.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([[0, 1, 2],
>>> [3, 4, 5],
>>> [6, 7, 8]])
>>> ivy.diag(x)
ivy.array([0, 4, 8])
>>> x = ivy.array([[0, 1, 2],
>>> [3, 4, 5],
>>> [6, 7, 8]])
>>> ivy.diag(x, k=1)
ivy.array([1, 5])
>>> x = ivy.array([[0, 1, 2],
>>> [3, 4, 5],
>>> [6, 7, 8]])
>>> ivy.diag(x, k=-1)
ivy.array([3, 7])
>>> x = ivy.array([[0, 1, 2],
>>> [3, 4, 5],
>>> [6, 7, 8]])
>>> ivy.diag(ivy.diag(x))
ivy.array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
return current_backend(x).diag(x, k=k, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def vander(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
N: Optional[int] = None,
increasing: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Generate a Vandermonde matrix. The columns of the output matrix are
elementwise powers of the input vector x^{(N-1)}, x^{(N-2)}, ..., x^0x. If
increasing is True, the order of the columns is reversed x^0, x^1, ...,
x^{(N-1)}. Such a matrix with a geometric progression in each row is named
for Alexandre-Theophile Vandermonde.
Parameters
----------
x
1-D input array.
N
Number of columns in the output. If N is not specified,
a square array is returned (N = len(x))
increasing
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
out
optional output array, for writing the result to.
Returns
-------
ret
Vandermonde matrix.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([1, 2, 3, 5])
>>> ivy.vander(x)
ivy.array(
[[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]]
)
>>> x = ivy.array([1, 2, 3, 5])
>>> ivy.vander(x, N=3)
ivy.array(
[[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]]
)
>>> x = ivy.array([1, 2, 3, 5])
>>> ivy.vander(x, N=3, increasing=True)
ivy.array(
[[ 1, 1, 1],
[ 1, 2, 4],
[ 1, 3, 9],
[ 1, 5, 25]]
)
"""
return current_backend(x).vander(x, N=N, increasing=increasing, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def vector_to_skew_symmetric_matrix(
vector: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""
Given vector, return the associated `Skew-symmetric matrix
<https://en.wikipedia.org/wiki/Skew-symmetric_matrix#Cross_product/>`_.
Parameters
----------
vector
Vector to convert *(batch_shape,3)*.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Skew-symmetric matrix *(batch_shape,3,3)*.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
return current_backend(vector).vector_to_skew_symmetric_matrix(vector, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def tensorsolve(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
axes: Union[int, Tuple[List[int], List[int]]] = 2,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
ndim1 = ivy.get_num_dims(x1)
ndim2 = ivy.get_num_dims(x2)
if axes is not None:
allaxes = list(range(0, ndim1))
for k in axes:
allaxes.remove(k)
allaxes.insert(ndim1, k)
x1 = ivy.matrix_transpose(x1)
old_shape = x1.shape[-(ndim1 - ndim2) :]
prod = 1
for k in old_shape:
prod *= k
if ivy.shape(ivy.flatten(x1))[0] != prod**2:
raise ivy.utils.exceptions.IvyException(
"Input arrays must satisfy the requirement "
"prod(x1.shape[x2.ndim:]) == prod(x1.shape[:x2.ndim])"
)
x1 = ivy.reshape(x1, (prod, prod))
x2 = ivy.flatten(x2)
res = ivy.solve(x1, x2)
res = ivy.reshape(res, old_shape)
return res
# return current_backend(x1, x2).tensorsolve(x1, x2, axes=axes, out=out)
| ivy/ivy/functional/ivy/linear_algebra.py/0 | {
"file_path": "ivy/ivy/functional/ivy/linear_algebra.py",
"repo_id": "ivy",
"token_count": 47809
} | 49 |
# global
from typing import Tuple, Union, Optional
import abc
# local
import ivy
from ivy.functional.ivy.gradients import _variable
# Initializer #
# ----------- #
class Initializer(abc.ABC):
"""An initializer for internal variables for a layer.
A neuron is a function of the form `a = g(z)`, where `g` is the
activation functions and `z = w_1x_1 + w_2x_2 + ... + w_nx_n` where the
`w_i` are the weights and the `x_i` are the inputs. To prevent this
`z` from vanishing (getting too small) or exploding (getting too big), the
initial weights must be picked carefully.
"""
@abc.abstractmethod
def create_variables(
self,
var_shape: Tuple[int, int],
device: Union[ivy.Device, ivy.NativeDevice],
fan_out: Optional[float] = None,
fan_in: Optional[float] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
) -> ivy.Array:
"""Create internal variables for the layer.
Parameters
----------
var_shape
Tuple representing the shape of the desired array. If considering
the array as a rectangular matrix, this tuple is represented as
'(ROWS, COLUMNS)'.
device
Device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
fan_out
The number of nodes in the next layer.
fan_in
The number of nodes in the previous layer.
dtype
Desired data type.
"""
return None
# Constant #
# ---------#
class Constant(Initializer):
def __init__(self, constant: float):
"""Constant initializer, will fill in all values with the value of
`constant`.
Parameters
----------
constant
Constant value for initialization.
"""
self._constant = constant
def create_variables(
self,
var_shape: Tuple[int, int],
device: Union[ivy.Device, ivy.NativeDevice],
fan_out: Optional[float] = None,
fan_in: Optional[float] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
) -> ivy.Array:
return _variable(
ivy.full(var_shape, self._constant, device=device, dtype=dtype),
)
class Zeros(Constant):
def __init__(self):
"""Constant initializer that fills with the constant value `0.0`."""
super().__init__(0.0)
class Ones(Constant):
def __init__(self):
"""Constant initializer that fills with the constant value `1.0`."""
super().__init__(1.0)
# Uniform #
# --------#
class Uniform(Initializer):
def __init__(self, numerator, fan_mode, power, gain):
"""Initialize based on a uniform distribution, will fill in all values
with values drawn from a uniform (all values have an equal probability)
distribution.
with range `[-wlim, wlim]` (endpoints included) with `wlim` being calculated as
`gain * (numerator / fan)**power`. This distribution helps with issues when
trying to optimize and train networks. The expected value of this distribution
is `0` and the variance is
`(gain * numerator / fan)^power / 4`.
This is intended as a base-class for special predefined initializers.
Parameters
----------
numerator
fan_mode
Determines how `fan` is calculated.
- `fan_out` sets `fan` to the number of output features of this neuron.
This is useful when training using back-propogation.
- `fan_in` sets `fan` to the number of input features of this neuron.
This is useful when training using forward-propogation.
- `fan_sum` sets `fan` to the sum of the number of input features and
output features of this neuron.
- `fan_avg` sets `fan` to the average of the number of input features and
output features of this neuron.
power
Sets the drop-off factor for the calculated `fan`.
gain
Scales the output of the distribution.
"""
ivy.utils.assertions.check_elem_in_list(
fan_mode, ["fan_in", "fan_out", "fan_sum", "fan_avg"]
)
self._numerator = numerator
self._fan_mode = fan_mode
self._power = power
self._gain = gain
def create_variables(
self, var_shape, device, fan_out=None, fan_in=None, dtype=None
):
"""Create internal variables for the layer.
Parameters
----------
var_shape
Tuple representing the shape of the desired array. If considering
the array as a rectangular matrix, this tuple is represented as
'(ROWS, COLUMNS)'.
device
Device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
fan_out
The number of nodes in the next layer.
fan_in
The number of nodes in the previous layer.
dtype
Desired data type.
"""
if self._fan_mode == "fan_in":
ivy.utils.assertions.check_exists(
fan_in,
message="input_channels must be specified for fan_in denominator mode",
)
fan = fan_in
elif self._fan_mode == "fan_out":
ivy.utils.assertions.check_exists(
fan_out,
message=(
"output_channels must be specified for fan_out denominator mode"
),
)
fan = fan_out
elif self._fan_mode == "fan_sum":
ivy.utils.assertions.check_all_or_any_fn(
fan_in,
fan_out,
fn=ivy.exists,
type="all",
message=(
"input_channels and output_channels must both be"
" specified for fan_sum denominator mode."
),
as_array=False,
)
fan = fan_in + fan_out
elif self._fan_mode == "fan_avg":
ivy.utils.assertions.check_all_or_any_fn(
fan_in,
fan_out,
fn=ivy.exists,
type="all",
message=(
"input_channels and output_channels must both be"
" specified for fan_avg denominator mode."
),
as_array=False,
)
fan = (fan_in + fan_out) / 2
else:
raise ivy.utils.exceptions.IvyException(
"Invalid denominator mode, must be one of [ fan_in | fan_out | "
"fan_sum | fan_avg ] "
)
wlim = ((self._numerator / fan) ** self._power) * self._gain
return _variable(
ivy.random_uniform(
low=-wlim, high=wlim, shape=var_shape, device=device, dtype=dtype
),
)
class GlorotUniform(Uniform):
def __init__(self):
"""Initialize Glorot uniform, also known as the Xavier uniform
initializer.
It draws values from a uniform distribution `[-limit, limit]` where
`limit = sqrt(6 / (fan_in + fan_out))` where `fan_in` and `fan_out` are the
number of input and output features respectively.
"""
super().__init__(numerator=6, fan_mode="fan_sum", power=0.5, gain=1)
class FirstLayerSiren(Uniform):
def __init__(self):
"""Initialize Siren uniform for the first layer.
It draws values from a uniform distribution `[-limit, limit]`
where `limit=fan_in` where `fan_in` is the number of input
features.
"""
super().__init__(numerator=1, fan_mode="fan_in", power=1, gain=1)
class Siren(Uniform):
def __init__(self, w0=30):
"""Initialize Siren uniform initializer for the first layer.
It draws values from a uniform distribution `[-limit, limit]`
where `limit=sqrt(6 / fan_in) / w0` where `fan_in` is the number
of input features.
"""
super().__init__(numerator=6, fan_mode="fan_in", power=0.5, gain=1 / w0)
# Gaussian #
# ---------#
class KaimingNormal(Initializer):
def __init__(self, mean=0, fan_mode="fan_in"):
"""Initialize Kaiming normal, also known as He Initialization.
It is an method for initializing layers that takes into account the
non-linearity of activation functions. It uses a normal distribution centered
at `mean` with standard distribution `sqrt(2 / ((1 + negative_slope^2) * fan))`.
Parameters
----------
mean
Sets the expected value, average, and center of the normal distribution.
fan_mode
Determines how `fan` is calculated.
- `fan_out` sets `fan` to the number of output features of this neuron.
This is useful when training using back-propogation.
- `fan_in` sets `fan` to the number of input features of this neuron.
This is useful when training using forward-propogation.
- `fan_sum` sets `fan` to the sum of the number of input features and
output features of this neuron.
- `fan_sum` sets `fan` to the average of the number of input features and
output features of this neuron.
"""
ivy.utils.assertions.check_elem_in_list(
fan_mode, ["fan_in", "fan_out", "fan_sum", "fan_avg"]
)
self._mean = mean
self._fan_mode = fan_mode
def create_variables(
self,
var_shape,
device,
fan_out=None,
fan_in=None,
negative_slope=0.0,
dtype=None,
):
"""Create internal variables for the layer.
Parameters
----------
var_shape
Tuple representing the shape of the desired array. If considering
the array as a rectangular matrix, this tuple is represented as
'(ROWS, COLUMNS)'.
device
Device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
fan_out
The number of nodes in the next layer.
fan_in
The number of nodes in the previous layer.
negative_slope
How much a higher `fan` should lower the standard deviation. A value of `0`
gives a relationship proportional to `1/fan`.
dtype
Desired data type.
"""
if self._fan_mode == "fan_in":
ivy.utils.assertions.check_exists(
fan_in,
message="input_channels must be specified for fan_in denominator mode",
)
fan = fan_in
elif self._fan_mode == "fan_out":
ivy.utils.assertions.check_exists(
fan_out,
message=(
"output_channels must be specified for fan_out denominator mode"
),
)
fan = fan_out
elif self._fan_mode == "fan_sum":
ivy.utils.assertions.check_all_or_any_fn(
fan_in,
fan_out,
fn=ivy.exists,
type="all",
message=(
"input_channels and output_channels must both be"
" specified for fan_sum denominator mode."
),
as_array=False,
)
fan = fan_in + fan_out
elif self._fan_mode == "fan_avg":
ivy.utils.assertions.check_all_or_any_fn(
fan_in,
fan_out,
fn=ivy.exists,
type="all",
message=(
"input_channels and output_channels must both be"
" specified for fan_avg denominator mode."
),
as_array=False,
)
fan = (fan_in + fan_out) / 2
else:
raise ivy.utils.exceptions.IvyException(
"Invalid denominator mode, must be one of [ fan_in | fan_out | "
"fan_sum | fan_avg ] "
)
std = (2 / ((1 + negative_slope**2) * fan)) ** 0.5
return _variable(
ivy.random_normal(
mean=self._mean, std=std, shape=var_shape, device=device, dtype=dtype
)
)
class RandomNormal(Initializer):
def __init__(self, mean=0.0, stddev=0.05, seed=None):
"""Initialize with Random Normal Distribution.
It draws values from a Random Normal Distribution with given mean and
standard deviation.
Parameters
----------
mean
Sets the expected value, average, and center of the normal distribution.
stddev
Sets the standard deviation of the normal distribution.
seed
Used to create a random seed distribution.(Default:None)
"""
self._mean = mean
self._stddev = stddev
self._seed = seed
def create_variables(
self,
var_shape=None,
device=None,
dtype=None,
):
"""Create internal variables for the layer.
Parameters
----------
var_shape
Tuple representing the shape of the desired array. If considering
the array as a rectangular matrix, this tuple is represented as
'(ROWS, COLUMNS)'.
device
Device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu'
etc. Default is cpu.
dtype
Desired data type.
"""
return _variable(
ivy.random_normal(
mean=self._mean,
std=self._stddev,
shape=var_shape,
seed=self._seed,
device=device,
dtype=dtype,
)
)
| ivy/ivy/stateful/initializers.py/0 | {
"file_path": "ivy/ivy/stateful/initializers.py",
"repo_id": "ivy",
"token_count": 6607
} | 50 |
# Einsum expression parser, this file has been adapted from `opt_einsum` parser here
# https://github.com/dgasmith/opt_einsum/blob/master/opt_einsum/parser.py
import itertools
from typing import Any, Dict, Iterator, List, Tuple, Union
import numpy as np
ArrayType = Any
TensorShapeType = Tuple[int, ...]
_einsum_symbols_base = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
def is_valid_einsum_char(x: str) -> bool:
"""Check if the character ``x`` is valid for numpy einsum. **Examples:**
```python
is_valid_einsum_char("a")
#> True
is_valid_einsum_char("Ǵ")
#> False
```
"""
return (x in _einsum_symbols_base) or (x in ",->.")
def has_valid_einsum_chars_only(einsum_str: str) -> bool: # [x]
"""Check if ``einsum_str`` contains only valid characters for numpy einsum.
**Examples:**
```python
has_valid_einsum_chars_only("abAZ")
#> True
has_valid_einsum_chars_only("Över")
#> False
```
"""
return all(map(is_valid_einsum_char, einsum_str))
def get_symbol(i: int) -> str:
"""
Get the symbol corresponding to int ``i`` - runs through the usual 52
letters before resorting to unicode characters, starting at ``chr(192)``
and skipping surrogates.
**Examples:**
```python
get_symbol(2)
#> 'c'
get_symbol(200)
#> 'Ŕ'
get_symbol(20000)
#> '京'
```
"""
if i < 52:
return _einsum_symbols_base[i]
elif i >= 55296:
# Skip chr(57343) - chr(55296) as surrogates
return chr(i + 2048)
else:
return chr(i + 140)
def gen_unused_symbols(used: str, n: int) -> Iterator[str]:
"""Generate ``n`` symbols that are not already in ``used``.
**Examples:**
```python
list(oe.parser.gen_unused_symbols("abd", 2))
#> ['c', 'e']
```
"""
i = cnt = 0
while cnt < n:
s = get_symbol(i)
i += 1
if s in used:
continue
yield s
cnt += 1
def find_output_str(subscripts: str) -> str:
"""Find the output string for the inputs ``subscripts`` under canonical
einstein summation rules.That is, repeated indices are summed over by
default.
Examples
--------
>>> oe.parser.find_output_str("ab,bc")
'ac'
>>> oe.parser.find_output_str("a,b")
'ab'
>>> oe.parser.find_output_str("a,a,b,b")
''
"""
tmp_subscripts = subscripts.replace(",", "")
return "".join(
s for s in sorted(set(tmp_subscripts)) if tmp_subscripts.count(s) == 1
)
def find_output_shape(
inputs: List[str], shapes: List[TensorShapeType], output: str
) -> TensorShapeType:
"""Find the output shape for given inputs, shapes and output string, taking
into account broadcasting.
Examples
--------
>>> oe.parser.find_output_shape(["ab", "bc"], [(2, 3), (3, 4)], "ac")
(2, 4)
# Broadcasting is accounted for
>>> oe.parser.find_output_shape(["a", "a"], [(4, ), (1, )], "a")
(4,)
"""
return tuple(
max(
shape[loc]
for shape, loc in zip(shapes, [x.find(c) for x in inputs])
if loc >= 0
)
for c in output
)
def possibly_convert_to_numpy(x: Any) -> Any: # possibly convert to native
"""Convert things without a 'shape' to ndarrays, but leave everything else.
Examples
--------
>>> oe.parser.possibly_convert_to_numpy(5)
array(5)
>>> oe.parser.possibly_convert_to_numpy([5, 3])
array([5, 3])
>>> oe.parser.possibly_convert_to_numpy(np.array([5, 3]))
array([5, 3])
# Any class with a shape is passed through
>>> class Shape:
... def __init__(self, shape):
... self.shape = shape
...
>>> myshape = Shape((5, 5))
>>> oe.parser.possibly_convert_to_numpy(myshape)
<__main__.Shape object at 0x10f850710>
"""
if not hasattr(x, "shape"):
return np.asanyarray(x)
else:
return x
def convert_subscripts(old_sub: List[Any], symbol_map: Dict[Any, Any]) -> str:
"""Convert user custom subscripts list to subscript string according to
`symbol_map`.
Examples
--------
>>> oe.parser.convert_subscripts(['abc', 'def'], {'abc':'a', 'def':'b'})
'ab'
>>> oe.parser.convert_subscripts([Ellipsis, object], {object:'a'})
'...a'
"""
return "".join("..." if s is Ellipsis else symbol_map[s] for s in old_sub)
def convert_interleaved_input(
operands: Union[List[Any], Tuple[Any]],
) -> Tuple[str, List[Any]]:
"""Convert 'interleaved' input to standard einsum input."""
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = [possibly_convert_to_numpy(x) for x in operand_list]
# build a map from user symbols to single-character symbols based on `get_symbol`
# The map retains the intrinsic order of user symbols
try:
# collect all user symbols
symbol_set = set(itertools.chain.from_iterable(subscript_list))
# remove Ellipsis because it can not be compared with other objects
symbol_set.discard(Ellipsis)
# build the map based on sorted user symbols retaining order lost in `set`
symbol_map = {
symbol: get_symbol(idx) for idx, symbol in enumerate(sorted(symbol_set))
}
except TypeError as e: # unhashable or uncomparable object
raise TypeError(
"For this input type lists must contain either Ellipsis "
"or hashable and comparable object (e.g. int, str)."
) from e
subscripts = ",".join(convert_subscripts(sub, symbol_map) for sub in subscript_list)
if output_list is not None:
subscripts += "->"
subscripts += convert_subscripts(output_list, symbol_map)
return subscripts, operands
def legalise_einsum_expr(*operands: Any) -> str:
"""Reproduction of einsum c side einsum parsing in python. **Parameters:**
Intakes the same inputs as `contract_path`, but NOT the keyword args. The
only.
supported keyword argument is:
- **shapes** - *(bool, optional)* Whether
``parse_einsum_input`` should assume
arrays (the default) or
array shapes have been supplied.
Returns
-------
einsum_eqn : str
Legalised einsum equation
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> legalise_einsum_eqn(('...a,...a->...', a, b))
'za,xza->xz'
>>> parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
'za,xza->xz'
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = [possibly_convert_to_numpy(x) for x in operands[1:]]
else:
subscripts, operands = convert_interleaved_input(operands)
operand_shapes = [o.shape for o in operands]
# Check for proper "->"
if ("-" in subscripts) or (">" in subscripts):
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
if invalid or (subscripts.count("->") != 1):
raise ValueError("Subscripts can only contain one '->'.")
# Parse ellipses
if "." in subscripts:
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
ellipse_inds = "".join(
gen_unused_symbols(used, max(len(x) for x in operand_shapes))
)
longest = 0
# Do we have an output to account for?
if "->" in subscripts:
input_tmp, output_sub = subscripts.split("->")
split_subscripts = input_tmp.split(",")
out_sub = True
else:
split_subscripts = subscripts.split(",")
out_sub = False
for num, sub in enumerate(split_subscripts):
if "." in sub:
if (sub.count(".") != 3) or (sub.count("...") != 1):
raise ValueError("Invalid Ellipses.")
# Take into account numerical values
if operand_shapes[num] == ():
ellipse_count = 0
else:
ellipse_count = max(len(operand_shapes[num]), 1) - (len(sub) - 3)
if ellipse_count > longest:
longest = ellipse_count
if ellipse_count < 0:
raise ValueError("Ellipses lengths do not match.")
elif ellipse_count == 0:
split_subscripts[num] = sub.replace("...", "")
else:
split_subscripts[num] = sub.replace(
"...", ellipse_inds[-ellipse_count:]
)
subscripts = ",".join(split_subscripts)
# Figure out output ellipses
if longest == 0:
out_ellipse = ""
else:
out_ellipse = ellipse_inds[-longest:]
if out_sub:
subscripts += "->" + output_sub.replace("...", out_ellipse)
else:
# Special care for outputless ellipses
output_subscript = find_output_str(subscripts)
normal_inds = "".join(sorted(set(output_subscript) - set(out_ellipse)))
subscripts += f"->{out_ellipse}{normal_inds}"
# Build output string if does not exist
if "->" in subscripts:
input_subscripts, output_subscript = subscripts.split("->")
else:
input_subscripts, output_subscript = subscripts, find_output_str(subscripts)
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError(f"Output character '{char}' did not appear in the input")
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(",")) != len(operands):
raise ValueError(
f"Number of einsum subscripts, {len(input_subscripts.split(','))}, must be"
f" equal to the number of operands, {len(operands)}."
)
eqn = f"{input_subscripts}->{output_subscript}"
return eqn
| ivy/ivy/utils/einsum_parser.py/0 | {
"file_path": "ivy/ivy/utils/einsum_parser.py",
"repo_id": "ivy",
"token_count": 4560
} | 51 |
# global
import warnings
import re
from contextlib import redirect_stdout
from io import StringIO
import numpy as np
import sys
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pytest
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
# function that trims white spaces from docstrings
def trim(*, docstring):
"""Trim function from PEP-257."""
if not docstring:
return ""
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Current code/unittests expects a line return at
# end of multiline docstrings
# workaround expected behavior from unittests
if "\n" in docstring:
trimmed.append("")
return "\n".join(trimmed)
def check_docstring_examples_run(
*, fn, from_container=False, from_array=False, num_sig_fig=2
):
"""Performs docstring tests for a given function.
Parameters
----------
fn
Callable function to be tested.
from_container
if True, check docstring of the function as a method of an Ivy Container.
from_array
if True, check docstring of the function as a method of an Ivy Array.
num_sig_fig
Number of significant figures to check in the example.
Returns
-------
None if the test passes, else marks the test as failed.
"""
"""
Functions skipped as their output dependent on outside factors:
random_normal, random_uniform, shuffle, num_gpus, current_backend,
get_backend
"""
to_skip = [
"random_normal",
"random_uniform",
"randint",
"shuffle",
"beta",
"gamma",
"dev",
"num_gpus",
"current_backend",
"get_backend",
"namedtuple",
"invalid_dtype",
"DType",
"NativeDtype",
"Dtype",
"multinomial",
"num_cpu_cores",
"get_all_ivy_arrays_on_dev",
"num_ivy_arrays_on_dev",
"total_mem_on_dev",
"used_mem_on_dev",
"percent_used_mem_on_dev",
"function_supported_dtypes",
"function_unsupported_dtypes",
"randint",
"unique_counts",
"unique_all",
"dropout",
"dropout1d",
"dropout2d",
"dropout3d",
"total_mem_on_dev",
"supports_inplace_updates",
"get",
"deserialize",
"set_split_factor",
]
# the temp skip list consists of functions
# which have an issue with their implementation
skip_list_temp = [
"outer", # Failing only torch backend as inputs must be 1-D.
"pool", # Maximum recursion depth exceeded ivy.pool
"put_along_axis", # Depends on scatter_nd for numpy.
"result_type", # Different ouput coming for diff backends in 1st example.
"scaled_dot_product_attention", # Different backends giving different answers.
"eigh_tridiagonal", # Failing only for TF backend
"dct",
"choose", # Maximum recurion depth exceeded (No backend choose fn).
"idct", # Function already failing for all 5 backends.
"set_item", # Different errors for diff backends (jax, torch)
"l1_normalize", # Function already failing for all 5 backends.
"histogram", # Failing for TF, Torch backends (TODO's left)
"value_and_grad", # Failing only for Torch backend. (Requires_grad=True)
"layer_norm", # Failing only for Torch backend.
"eigvalsh", # Failing only Jax Backend + only for Native Array Example.
"conv2d_transpose", # Function already failing for all 5 backends.
"solve",
"one_hot", # One small example failing for all backends except torch.
"scatter_flat", # Function Already failing for 3 backends
"scatter_nd", #
"execute_with_gradients", # Function Already failing for 4 backends.
"gather",
"multiprocessing",
"if_else",
"trace_graph", # SystemExit: Please sign up for free pilot access.
"dill",
"smooth_l1_loss", # Function already failing for all 5 backends.
"cummax", # Function already failing for all 5 backends.
"insert_into_nest_at_index",
"while_loop",
"argmax",
"native_array",
]
# skip list for array and container docstrings
skip_arr_cont = [
# generates different results due to randomization
"cumprod",
"supports_inplace_updates",
"shuffle",
"dropout",
"dropout1d",
"dropout2d",
"dropout3",
"svd",
"unique_all",
# exec and self run generates diff results
"dev",
"scaled_dot_product_attention",
# temp list for array/container methods
"einops_reduce",
"array_equal",
"batched_outer",
"huber_loss",
"softshrink",
"tt_matrix_to_tensor",
"unsorted_segment_mean",
"array_equal",
"batched_outer",
"huber_loss",
"kl_div",
"soft_margin_loss",
"threshold",
]
# comment out the line below in future to check for the functions in temp skip list
to_skip += skip_list_temp # + currently_being_worked_on
if not hasattr(fn, "__name__"):
return True
fn_name = fn.__name__
if fn_name not in ivy.utils.backend.handler.ivy_original_dict:
return True
if from_container:
docstring = getattr(
ivy.utils.backend.handler.ivy_original_dict["Container"], fn_name
).__doc__
elif from_array:
docstring = getattr(
ivy.utils.backend.handler.ivy_original_dict["Array"], fn_name
).__doc__
else:
docstring = ivy.utils.backend.handler.ivy_original_dict[fn_name].__doc__
if docstring is None:
return True
if fn_name in to_skip:
return True
if (from_container or from_array) and fn_name in skip_arr_cont:
return True
# removing extra new lines and trailing white spaces from the docstrings
trimmed_docstring = trim(docstring=docstring)
trimmed_docstring = trimmed_docstring.split("\n")
# end_index: -1, if print statement is not found in the docstring
end_index = -1
# parsed_output is set as an empty string to manage functions with multiple inputs
parsed_output = ""
# parsing through the docstrings to find lines with print statement
# following which is our parsed output
sub = ">>> print("
for index, line in enumerate(trimmed_docstring):
if sub in line:
for i, s in enumerate(trimmed_docstring[index + 1 :]):
if s.startswith(">>>") or s.lower().startswith(
("with", "#", "instance")
):
end_index = index + i + 1
break
else:
end_index = len(trimmed_docstring)
p_output = trimmed_docstring[index + 1 : end_index]
p_output = "".join(p_output).replace(" ", "")
p_output = p_output.replace("...", "")
if parsed_output != "":
parsed_output += ","
parsed_output += p_output
if end_index == -1:
return True
executable_lines = []
for line in trimmed_docstring:
if line.startswith(">>>"):
executable_lines.append(line.split(">>>")[1][1:])
is_multiline_executable = True
if line.startswith("...") and is_multiline_executable:
executable_lines[-1] += line.split("...")[1][1:]
if ">>> print(" in line:
is_multiline_executable = False
# noinspection PyBroadException
f = StringIO()
with redirect_stdout(f):
for line in executable_lines:
# noinspection PyBroadException
try:
if f.getvalue() != "" and f.getvalue()[-2] != ",":
print(",")
exec(line)
except Exception as e:
print(e, " ", ivy.current_backend_str(), " ", line)
output = f.getvalue()
output = output.rstrip()
output = output.replace(" ", "").replace("\n", "")
output = output.rstrip(",")
# handling cases when the stdout contains ANSI colour codes
# 7-bit C1 ANSI sequences
ansi_escape = re.compile(
r"""
\x1B # ESC
(?: # 7-bit C1 Fe (except CSI)
[@-Z\\-_]
| # or [ for CSI, followed by a control sequence
\[
[0-?]* # Parameter bytes
[ -/]* # Intermediate bytes
[@-~] # Final byte
)
""",
re.VERBOSE,
)
output = ansi_escape.sub("", output)
# print("Output: ", output)
# print("Putput: ", parsed_output)
# assert output == parsed_output, "Output is unequal to the docstrings output."
sig_fig = float(f"1e-{str(num_sig_fig)}")
atol = sig_fig / 10000
numeric_pattern = re.compile(
r"""
[\{\}\(\)\[\]\<>]|\w+:
""",
re.VERBOSE,
)
num_output = output.replace("ivy.array", "").replace("ivy.Shape", "")
num_parsed_output = parsed_output.replace("ivy.array", "").replace("ivy.Shape", "")
num_output = numeric_pattern.sub("", num_output)
num_parsed_output = numeric_pattern.sub("", num_parsed_output)
num_output = num_output.split(",")
num_parsed_output = num_parsed_output.split(",")
docstr_result = True
for doc_u, doc_v in zip(num_output, num_parsed_output):
try:
docstr_result = np.allclose(
np.nan_to_num(complex(doc_u)),
np.nan_to_num(complex(doc_v)),
rtol=sig_fig,
atol=atol,
)
except Exception:
if str(doc_u) != str(doc_v):
docstr_result = False
if not docstr_result:
print(
"output for ",
fn_name,
" on run: ",
output,
"\noutput in docs :",
parsed_output,
"\n",
doc_u,
" != ",
doc_v,
"\n",
)
ivy.warn(
f"Output is unequal to the docstrings output: {fn_name}",
stacklevel=0,
)
break
return docstr_result
@pytest.mark.parametrize("backend", ["jax", "numpy", "tensorflow", "torch"])
def test_docstrings(backend):
ivy.set_default_device("cpu")
ivy.set_backend(backend)
failures = []
success = True
for k, v in ivy.__dict__.copy().items():
if k == "Array":
for method_name in dir(v):
method = getattr(ivy.Array, method_name)
if hasattr(ivy.functional, method_name):
if helpers.gradient_incompatible_function(
fn=getattr(ivy.functional, method_name)
) or check_docstring_examples_run(fn=method, from_array=True):
continue
elif helpers.gradient_incompatible_function(
fn=method
) or check_docstring_examples_run(fn=method, from_array=True):
continue
failures.append(f"Array.{method_name}")
success = False
elif k == "Container":
for method_name in dir(v):
method = getattr(ivy.Container, method_name)
if hasattr(ivy.functional, method_name):
if helpers.gradient_incompatible_function(
fn=getattr(ivy.functional, method_name)
) or check_docstring_examples_run(fn=method, from_container=True):
continue
elif helpers.gradient_incompatible_function(
fn=method
) or check_docstring_examples_run(fn=method, from_container=True):
continue
failures.append(f"Container.{method_name}")
success = False
else:
if check_docstring_examples_run(
fn=v
) or helpers.gradient_incompatible_function(fn=v):
continue
success = False
failures.append(k)
if not success:
assert (
success
), "\nThe following methods had failing docstrings:\n\n{}\n".format(
"\n".join(failures)
)
ivy.previous_backend()
| ivy/ivy_tests/test_docstrings.py/0 | {
"file_path": "ivy/ivy_tests/test_docstrings.py",
"repo_id": "ivy",
"token_count": 6113
} | 52 |
import abc
from hypothesis import strategies as st
from . import globals as test_globals
from .pipeline_helper import BackendHandler
from dataclasses import dataclass
from hypothesis.strategies import SearchStrategy
@dataclass
class DynamicFlag:
strategy: SearchStrategy
@st.composite
def _gradient_strategy(draw):
if test_globals.CURRENT_BACKEND == "numpy":
return draw(st.just(False))
return draw(st.booleans())
@st.composite
def _as_varaible_strategy(draw):
if (
test_globals.CURRENT_BACKEND is not test_globals._Notsetval
and test_globals.CURRENT_BACKEND == "numpy"
):
return draw(st.just([False]))
if not test_globals.CURRENT_FRONTEND_STR:
if (
test_globals.CURRENT_FRONTEND is not test_globals._Notsetval
and test_globals.CURRENT_FRONTEND == "numpy"
):
return draw(st.just([False]))
return draw(st.lists(st.booleans(), min_size=1, max_size=1))
BuiltNativeArrayStrategy = DynamicFlag(st.lists(st.booleans(), min_size=1, max_size=1))
BuiltAsVariableStrategy = DynamicFlag(_as_varaible_strategy())
BuiltContainerStrategy = DynamicFlag(st.lists(st.booleans(), min_size=1, max_size=1))
BuiltInstanceStrategy = DynamicFlag(st.booleans())
BuiltInplaceStrategy = DynamicFlag(st.just(False))
BuiltGradientStrategy = DynamicFlag(_gradient_strategy())
BuiltWithOutStrategy = DynamicFlag(st.booleans())
BuiltWithCopyStrategy = DynamicFlag(st.just(False))
BuiltCompileStrategy = DynamicFlag(st.just(False))
BuiltTraceStrategy = DynamicFlag(st.just(False))
BuiltFrontendArrayStrategy = DynamicFlag(st.booleans())
BuiltTranspileStrategy = DynamicFlag(st.just(False))
BuiltPrecisionModeStrategy = DynamicFlag(st.booleans())
BuiltCythonWrapperStrategy = DynamicFlag(st.just(False))
flags_mapping = {
"native_array": "BuiltNativeArrayStrategy",
"as_variable": "BuiltAsVariableStrategy",
"container": "BuiltContainerStrategy",
"instance_method": "BuiltInstanceStrategy",
"test_gradients": "BuiltGradientStrategy",
"with_out": "BuiltWithOutStrategy",
"with_copy": "BuiltWithCopyStrategy",
"inplace": "BuiltInplace",
"test_trace": "BuiltTraceStrategy",
"transpile": "BuiltTranspileStrategy",
"precision_mode": "BuiltPrecisionModeStrategy",
"test_cython_wrapper": "BuiltCythonWrapperStrategy",
}
def build_flag(key: str, value: bool):
if value is not None:
value = st.just(value)
# Prevent silently passing if variables names were changed
assert (
flags_mapping[key] in globals()
), f"{flags_mapping[key]} is not a valid flag variable."
globals()[flags_mapping[key]].strategy = value
# Strategy Helpers #
class TestFlags(metaclass=abc.ABCMeta):
def apply_flags(self, args_to_iterate, input_dtypes, offset, *, backend, on_device):
pass
class FunctionTestFlags(TestFlags):
def __init__(
self,
ground_truth_backend,
num_positional_args,
with_out,
with_copy,
instance_method,
as_variable,
native_arrays,
container,
test_gradients,
test_trace,
transpile,
precision_mode,
test_cython_wrapper,
):
self.ground_truth_backend = ground_truth_backend
self.num_positional_args = num_positional_args
self.with_out = with_out
self.with_copy = with_copy
self.instance_method = instance_method
self.native_arrays = native_arrays
self.container = container
self.as_variable = as_variable
self.test_gradients = test_gradients
self.test_trace = test_trace
self.transpile = transpile
self.precision_mode = precision_mode
self.test_cython_wrapper = test_cython_wrapper
def apply_flags(self, args_to_iterate, input_dtypes, offset, *, backend, on_device):
ret = []
with BackendHandler.update_backend(backend) as backend:
for i, entry in enumerate(args_to_iterate, start=offset):
x = backend.array(entry, dtype=input_dtypes[i], device=on_device)
if self.as_variable[i]:
x = backend.gradients._variable(x)
if self.native_arrays[i]:
x = backend.to_native(x)
if self.container[i]:
x = backend.Container({"a": x, "b": {"c": x, "d": x}})
ret.append(x)
return ret
def __str__(self):
return (
f"ground_truth_backend={self.ground_truth_backend}"
f"num_positional_args={self.num_positional_args}. "
f"with_out={self.with_out}. "
f"with_copy={self.with_copy}. "
f"instance_method={self.instance_method}. "
f"native_arrays={self.native_arrays}. "
f"container={self.container}. "
f"as_variable={self.as_variable}. "
f"test_gradients={self.test_gradients}. "
f"test_trace={self.test_trace}. "
f"transpile={self.transpile}. "
f"precision_mode={self.precision_mode}. "
)
def __repr__(self):
return self.__str__()
@st.composite
def function_flags(
draw,
*,
ground_truth_backend,
num_positional_args,
instance_method,
with_out,
with_copy,
test_gradients,
test_trace,
transpile,
as_variable,
native_arrays,
container_flags,
precision_mode,
test_cython_wrapper,
):
return draw(
st.builds(
FunctionTestFlags,
ground_truth_backend=ground_truth_backend,
num_positional_args=num_positional_args,
with_out=with_out,
with_copy=with_copy,
instance_method=instance_method,
test_gradients=test_gradients,
test_trace=test_trace,
transpile=transpile,
as_variable=as_variable,
native_arrays=native_arrays,
container=container_flags,
precision_mode=precision_mode,
test_cython_wrapper=test_cython_wrapper,
)
)
class FrontendFunctionTestFlags(TestFlags):
def __init__(
self,
num_positional_args,
with_out,
with_copy,
inplace,
as_variable,
native_arrays,
test_trace,
generate_frontend_arrays,
transpile,
precision_mode,
):
self.num_positional_args = num_positional_args
self.with_out = with_out
self.with_copy = with_copy
self.inplace = inplace
self.native_arrays = native_arrays
self.as_variable = as_variable
self.test_trace = test_trace
self.generate_frontend_arrays = generate_frontend_arrays
self.transpile = transpile
self.precision_mode = precision_mode
def apply_flags(self, args_to_iterate, input_dtypes, offset, *, backend, on_device):
ret = []
with BackendHandler.update_backend(backend) as backend:
for i, entry in enumerate(args_to_iterate, start=offset):
x = backend.array(entry, dtype=input_dtypes[i], device=on_device)
if self.as_variable[i]:
x = backend.gradients._variable(x)
if self.native_arrays[i]:
x = backend.to_native(x)
ret.append(x)
return ret
def __str__(self):
return (
f"num_positional_args={self.num_positional_args}. "
f"with_out={self.with_out}. "
f"with_copy={self.with_copy}. "
f"inplace={self.inplace}. "
f"native_arrays={self.native_arrays}. "
f"as_variable={self.as_variable}. "
f"test_trace={self.test_trace}. "
f"generate_frontend_arrays={self.generate_frontend_arrays}. "
f"transpile={self.transpile}."
f"precision_mode={self.precision_mode}. "
)
def __repr__(self):
return self.__str__()
@st.composite
def frontend_function_flags(
draw,
*,
num_positional_args,
with_out,
with_copy,
inplace,
as_variable,
native_arrays,
test_trace,
generate_frontend_arrays,
transpile,
precision_mode,
):
return draw(
st.builds(
FrontendFunctionTestFlags,
num_positional_args=num_positional_args,
with_out=with_out,
with_copy=with_copy,
inplace=inplace,
as_variable=as_variable,
native_arrays=native_arrays,
test_trace=test_trace,
generate_frontend_arrays=generate_frontend_arrays,
transpile=transpile,
precision_mode=precision_mode,
)
)
class InitMethodTestFlags(TestFlags):
def __init__(
self,
num_positional_args,
as_variable,
native_arrays,
precision_mode,
):
self.num_positional_args = num_positional_args
self.native_arrays = native_arrays
self.as_variable = as_variable
self.precision_mode = precision_mode
def apply_flags(self, args_to_iterate, input_dtypes, offset, *, backend, on_device):
ret = []
with BackendHandler.update_backend(backend) as backend:
for i, entry in enumerate(args_to_iterate, start=offset):
x = backend.array(entry, dtype=input_dtypes[i], device=on_device)
if self.as_variable[i]:
x = backend.gradients._variable(x)
if self.native_arrays[i]:
x = backend.to_native(x)
ret.append(x)
return ret
def __str__(self):
return (
f"num_positional_args={self.num_positional_args}. "
f"native_arrays={self.native_arrays}. "
f"as_variable={self.as_variable}. "
f"precision_mode={self.precision_mode}. "
)
def __repr__(self):
return self.__str__()
@st.composite
def init_method_flags(
draw,
*,
num_positional_args,
as_variable,
native_arrays,
precision_mode,
):
return draw(
st.builds(
InitMethodTestFlags,
num_positional_args=num_positional_args,
as_variable=as_variable,
native_arrays=native_arrays,
precision_mode=precision_mode,
)
)
class MethodTestFlags(TestFlags):
def __init__(
self,
num_positional_args,
as_variable,
native_arrays,
container_flags,
precision_mode,
):
self.num_positional_args = num_positional_args
self.native_arrays = native_arrays
self.as_variable = as_variable
self.container = container_flags
self.precision_mode = precision_mode
def apply_flags(self, args_to_iterate, input_dtypes, offset, *, backend, on_device):
ret = []
with BackendHandler.update_backend(backend) as backend:
for i, entry in enumerate(args_to_iterate, start=offset):
x = backend.array(entry, dtype=input_dtypes[i], device=on_device)
if self.as_variable[i]:
x = backend.gradients._variable(x)
if self.native_arrays[i]:
x = backend.to_native(x)
if self.container[i]:
x = backend.Container({"a": x, "b": {"c": x, "d": x}})
ret.append(x)
return ret
def __str__(self):
return (
f"num_positional_args={self.num_positional_args}. "
f"native_arrays={self.native_arrays}. "
f"as_variable={self.as_variable}. "
f"container_flags={self.container}. "
f"precision_mode={self.precision_mode}. "
)
def __repr__(self):
return self.__str__()
@st.composite
def method_flags(
draw,
*,
num_positional_args,
as_variable,
native_arrays,
container_flags,
precision_mode,
):
return draw(
st.builds(
MethodTestFlags,
num_positional_args=num_positional_args,
as_variable=as_variable,
native_arrays=native_arrays,
container_flags=container_flags,
precision_mode=precision_mode,
)
)
class FrontendInitTestFlags(TestFlags):
def __init__(
self,
num_positional_args,
as_variable,
native_arrays,
):
self.num_positional_args = num_positional_args
self.native_arrays = native_arrays
self.as_variable = as_variable
def apply_flags(self, args_to_iterate, input_dtypes, offset, *, backend, on_device):
ret = []
with BackendHandler.update_backend(backend) as backend:
for i, entry in enumerate(args_to_iterate, start=offset):
x = backend.array(entry, dtype=input_dtypes[i], device=on_device)
if self.as_variable[i]:
x = backend.gradients._variable(x)
if self.native_arrays[i]:
x = backend.to_native(x)
ret.append(x)
return ret
def __str__(self):
return (
f"num_positional_args={self.num_positional_args}. "
f"native_arrays={self.native_arrays}. "
f"as_variable={self.as_variable}. "
)
def __repr__(self):
return self.__str__()
@st.composite
def frontend_init_flags(
draw,
*,
num_positional_args,
as_variable,
native_arrays,
):
return draw(
st.builds(
FrontendInitTestFlags,
num_positional_args=num_positional_args,
as_variable=as_variable,
native_arrays=native_arrays,
)
)
class FrontendMethodTestFlags(TestFlags):
def __init__(
self,
num_positional_args,
as_variable,
native_arrays,
precision_mode,
inplace,
test_trace,
generate_frontend_arrays,
):
self.num_positional_args = num_positional_args
self.native_arrays = native_arrays
self.as_variable = as_variable
self.precision_mode = precision_mode
self.inplace = inplace
self.test_trace = test_trace
self.generate_frontend_arrays = generate_frontend_arrays
def apply_flags(self, args_to_iterate, input_dtypes, offset, *, backend, on_device):
ret = []
with BackendHandler.update_backend(backend) as backend:
for i, entry in enumerate(args_to_iterate, start=offset):
x = backend.array(entry, dtype=input_dtypes[i], device=on_device)
if self.as_variable[i]:
x = backend.gradients._variable(x)
if self.native_arrays[i]:
x = backend.to_native(x)
ret.append(x)
return ret
def __str__(self):
return (
f"num_positional_args={self.num_positional_args}. "
f"native_arrays={self.native_arrays}. "
f"as_variable={self.as_variable}. "
f"precision_mode={self.precision_mode}. "
f"inplace={self.inplace}. "
f"test_trace={self.test_trace}."
f"generate_frontend_arrays={self.generate_frontend_arrays}."
)
def __repr__(self):
return self.__str__()
@st.composite
def frontend_method_flags(
draw,
*,
num_positional_args,
as_variable,
native_arrays,
precision_mode,
inplace,
test_trace,
generate_frontend_arrays,
):
return draw(
st.builds(
FrontendMethodTestFlags,
num_positional_args=num_positional_args,
as_variable=as_variable,
native_arrays=native_arrays,
precision_mode=precision_mode,
inplace=inplace,
test_trace=test_trace,
generate_frontend_arrays=generate_frontend_arrays,
)
)
| ivy/ivy_tests/test_ivy/helpers/test_parameter_flags.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/helpers/test_parameter_flags.py",
"repo_id": "ivy",
"token_count": 7661
} | 53 |
import pytest
from ivy_tests.test_ivy.helpers import globals as test_globals
@pytest.fixture(autouse=True)
def run_around_tests(request, on_device, backend_fw, frontend, trace_graph, implicit):
try:
test_globals.setup_frontend_test(
frontend,
backend_fw,
on_device,
(
request.function.test_data
if hasattr(request.function, "test_data")
else None
),
)
except Exception as e:
test_globals.teardown_frontend_test()
raise RuntimeError(f"Setting up test for {request.function} failed.") from e
yield
test_globals.teardown_frontend_test()
| ivy/ivy_tests/test_ivy/test_frontends/conftest.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/conftest.py",
"repo_id": "ivy",
"token_count": 331
} | 54 |
# global
import sys
from hypothesis import strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import (
handle_frontend_test,
assert_all_close,
BackendHandler,
)
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import (
_get_dtype_and_matrix,
)
# eig
@handle_frontend_test(
fn_tree="numpy.linalg.eig",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=10,
shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)),
).filter(
lambda x: "float16" not in x[0]
and "bfloat16" not in x[0]
and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon
and np.linalg.det(np.asarray(x[1][0])) != 0
),
test_with_out=st.just(False),
)
def test_numpy_eig(
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
x = np.array(x[0], dtype=dtype[0])
"""Make symmetric positive-definite since ivy does not support complex data
dtypes currently."""
x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
a=x,
)
with BackendHandler.update_backend(backend_fw) as ivy_backend:
ret = [ivy_backend.to_numpy(x).astype(np.float64) for x in ret]
frontend_ret = [x.astype(np.float64) for x in frontend_ret]
L, Q = ret
frontend_L, frontend_Q = frontend_ret
assert_all_close(
ret_np=Q @ np.diag(L) @ Q.T,
ret_from_gt_np=frontend_Q @ np.diag(frontend_L) @ frontend_Q.T,
atol=1e-02,
backend=backend_fw,
ground_truth_backend=frontend,
)
# eigh
@handle_frontend_test(
fn_tree="numpy.linalg.eigh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=10,
shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)),
).filter(
lambda x: "float16" not in x[0]
and "bfloat16" not in x[0]
and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon
and np.linalg.det(np.asarray(x[1][0])) != 0
),
UPLO=st.sampled_from(("L", "U")),
test_with_out=st.just(False),
)
def test_numpy_eigh(
*,
dtype_and_x,
UPLO,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
x = np.array(x[0], dtype=dtype[0])
# make symmetric positive-definite beforehand
x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
a=x,
UPLO=UPLO,
)
with BackendHandler.update_backend(backend_fw) as ivy_backend:
ret = [ivy_backend.to_numpy(x) for x in ret]
frontend_ret = [np.asarray(x) for x in frontend_ret]
L, Q = ret
frontend_L, frontend_Q = frontend_ret
assert_all_close(
ret_np=Q @ np.diag(L) @ Q.T,
ret_from_gt_np=frontend_Q @ np.diag(frontend_L) @ frontend_Q.T,
atol=1e-02,
backend=backend_fw,
ground_truth_backend=frontend,
)
@handle_frontend_test(
fn_tree="numpy.linalg.eigvals",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=10,
shape=helpers.ints(min_value=2, max_value=4).map(lambda x: (x, x)),
).filter(
lambda x: "float16" not in x[0]
and "bfloat16" not in x[0]
and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon
and np.linalg.det(np.asarray(x[1][0])) != 0
),
test_with_out=st.just(False),
)
def test_numpy_eigvals(
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
a=x,
)
with BackendHandler.update_backend(backend_fw) as ivy_backend:
ret = np.sort(
np.array([ivy_backend.to_numpy(x).astype(np.float128) for x in ret])
)
frontend_ret = np.sort(np.array([x.astype(np.float128) for x in frontend_ret]))
assert_all_close(
ret_np=ret,
ret_from_gt_np=frontend_ret,
backend=backend_fw,
ground_truth_backend=frontend,
atol=1e-2,
rtol=1e-2,
)
# eigvalsh
@handle_frontend_test(
fn_tree="numpy.linalg.eigvalsh",
x=_get_dtype_and_matrix(symmetric=True),
UPLO=st.sampled_from(["L", "U"]),
)
def test_numpy_eigvalsh(
x,
UPLO,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs = x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
a=xs,
UPLO=UPLO,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_matrix_eigenvalues.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_matrix_eigenvalues.py",
"repo_id": "ivy",
"token_count": 3028
} | 55 |
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
# nextafter
@handle_frontend_test(
fn_tree="numpy.nextafter",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="nextafter"
),
)
def test_numpy_nextafter(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# signbit
@handle_frontend_test(
fn_tree="numpy.signbit",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="signbit"
),
)
def test_numpy_signbit(
dtypes_values_casting,
where,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
out=None,
where=where,
casting="safe",
order="K",
dtype=dtype,
subok=True,
)
# spacing
@handle_frontend_test(
fn_tree="numpy.spacing",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="spacing"
),
)
def test_numpy_spacing(
dtypes_values_casting,
where,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
atol=1e-03,
x=xs[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_floating_point_routines.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_floating_point_routines.py",
"repo_id": "ivy",
"token_count": 2033
} | 56 |
# global,
from hypothesis import strategies as st, assume
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# beta
@handle_frontend_test(
fn_tree="numpy.random.beta",
input_dtypes=helpers.get_dtypes("float", index=2),
a=st.floats(
allow_nan=False, allow_infinity=False, width=32, min_value=0, exclude_min=True
),
b=st.floats(
allow_nan=False, allow_infinity=False, width=32, min_value=0, exclude_min=True
),
size=st.tuples(
st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5)
),
)
def test_numpy_beta(
input_dtypes,
size,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
a,
b,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
a=a,
b=b,
size=size,
)
# binomial
@handle_frontend_test(
fn_tree="numpy.random.binomial",
n=st.integers(min_value=0, max_value=2),
dtype=helpers.get_dtypes("float", full=False, index=2),
p=st.floats(
allow_nan=False, allow_infinity=False, width=32, min_value=0, max_value=1
),
size=st.tuples(
st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5)
),
)
def test_numpy_binomial(
dtype,
size,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
n,
p,
):
helpers.test_frontend_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
n=n,
p=p,
size=size,
)
# chisquare
# The test values are restricted to (0, 1000] because df<=0 is invalid
# and very large df can cause problems with type conversions
@handle_frontend_test(
fn_tree="numpy.random.chisquare",
dtypes=helpers.get_dtypes("float", full=False),
df=st.one_of(
st.floats(
min_value=0,
max_value=1000,
exclude_min=True,
allow_subnormal=False,
width=32,
),
st.integers(min_value=1, max_value=1000),
st.lists(
st.one_of(
st.floats(
min_value=0,
max_value=1000,
exclude_min=True,
allow_subnormal=False,
width=32,
)
| st.integers(min_value=1, max_value=1000)
),
min_size=1,
),
),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_chisquare(
dtypes,
df,
size,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
# make sure `size` is something `df` can be broadcast to
if (
hasattr(df, "__len__")
and size is not None
and (len(size) == 0 or size[-1] != len(df))
):
size = (*size, len(df))
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
df=df,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.choice",
dtypes=helpers.get_dtypes("float", full=False),
a=helpers.ints(min_value=2, max_value=10),
size=helpers.get_shape(allow_none=True),
)
def test_numpy_choice(
dtypes,
size,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
a,
):
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
a=a,
size=size,
replace=True,
p=np.array([1 / a] * a, dtype=dtypes[0]),
)
# dirichlet
@handle_frontend_test(
fn_tree="numpy.random.dirichlet",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.tuples(
st.integers(min_value=2, max_value=5),
),
min_value=1,
max_value=100,
exclude_min=True,
),
size=st.tuples(
st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5)
),
test_with_out=st.just(False),
)
def test_numpy_dirichlet(
dtype_and_x,
size,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
alpha=x[0],
test_values=False,
size=size,
)
# exponential
@handle_frontend_test(
fn_tree="numpy.random.exponential",
input_dtypes=helpers.get_dtypes("float", index=2),
scale=st.floats(
allow_nan=False, allow_infinity=False, width=32, min_value=0, exclude_min=True
),
size=st.tuples(
st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5)
),
test_with_out=st.just(False),
)
def test_numpy_exponential(
input_dtypes,
size,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
scale,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
scale=scale,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.f",
input_dtypes=helpers.get_dtypes("float"),
dfn=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=1,
max_value=1000,
exclude_min=True,
),
dfd=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=1,
max_value=1000,
exclude_min=True,
),
size=helpers.get_shape(allow_none=False),
)
def test_numpy_f(
input_dtypes,
size,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
dfn,
dfd,
):
test_flags.num_positional_args = 2
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
test_values=False,
on_device=on_device,
dfn=dfn,
dfd=dfd,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.gamma",
input_dtypes=helpers.get_dtypes("float", full=False),
shape=st.floats(
allow_nan=False, allow_infinity=False, width=32, min_value=0, exclude_min=True
),
scale=st.floats(
allow_nan=False, allow_infinity=False, width=32, min_value=0, exclude_min=True
),
size=st.tuples(
st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5)
),
test_with_out=st.just(False),
)
def test_numpy_gamma(
input_dtypes,
shape,
scale,
size,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
shape=shape,
scale=scale,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.geometric",
input_dtypes=helpers.get_dtypes("float"),
p=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=9.999999747378752e-06,
max_value=0.9999899864196777,
),
size=st.tuples(
st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5)
),
)
def test_numpy_geometric(
input_dtypes,
size,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
p,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
p=p,
size=size,
)
# gumbel
@handle_frontend_test(
fn_tree="numpy.random.gumbel",
input_dtypes=helpers.get_dtypes("float"),
loc=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
max_value=1000,
),
scale=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=0,
max_value=1000,
exclude_min=True,
),
size=helpers.get_shape(allow_none=True),
)
def test_numpy_gumbel(
input_dtypes,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
loc,
scale,
size,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
loc=loc,
scale=scale,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.laplace",
input_dtypes=helpers.get_dtypes("float", full=False),
loc=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=0,
exclude_min=True,
),
scale=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=0,
exclude_min=True,
),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_laplace(
input_dtypes,
size,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
loc,
scale,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
loc=loc,
scale=scale,
size=size,
)
# logistic
@handle_frontend_test(
fn_tree="numpy.random.logistic",
input_dtypes=helpers.get_dtypes("float", full=False),
loc=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=0,
exclude_min=True,
),
scale=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=0,
exclude_min=True,
),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_logistic(
input_dtypes,
size,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
loc,
scale,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
loc=loc,
scale=scale,
size=size,
)
# lognormal
# min value is set 0
@handle_frontend_test(
fn_tree="numpy.random.lognormal",
input_dtypes=helpers.get_dtypes("float", index=2),
mean=st.floats(
allow_nan=False, allow_infinity=False, width=32, min_value=-5, max_value=5
),
sigma=st.floats(
allow_nan=False, allow_infinity=False, width=32, min_value=0, max_value=5
),
size=st.tuples(
st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5)
),
)
def test_numpy_lognormal(
input_dtypes,
size,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
mean,
sigma,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
mean=mean,
sigma=sigma,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.logseries",
input_dtypes=helpers.get_dtypes("float", index=2),
p=st.floats(
allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1,
exclude_max=True,
),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_logseries(
input_dtypes,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
p,
size,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
p=p,
size=size,
)
# multinomial
@handle_frontend_test(
fn_tree="numpy.random.multinomial",
n=helpers.ints(min_value=2, max_value=10),
dtype=helpers.get_dtypes("float", full=False),
size=st.tuples(
st.integers(min_value=1, max_value=10), st.integers(min_value=2, max_value=2)
),
)
def test_numpy_multinomial(
n,
dtype,
size,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
n=n,
pvals=np.array([1 / n] * n, dtype=dtype[0]),
size=size,
)
# negative_binomial
@handle_frontend_test(
fn_tree="numpy.random.negative_binomial",
input_dtypes=helpers.get_dtypes("float", index=2),
# max value for n and min value for p are restricted in testing
# as they can blow up poisson lambda, which will cause an
# error (lam value too large).
n=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=0,
max_value=100000,
exclude_min=True,
),
p=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=9.999999747378752e-06,
exclude_min=True,
max_value=1,
exclude_max=True,
),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_negative_binomial(
input_dtypes,
size,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
n,
p,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
n=n,
p=p,
size=size,
)
# noncentral_chisquare
@handle_frontend_test(
fn_tree="numpy.random.noncentral_chisquare",
dtype_and_df=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=0,
exclude_min=True,
),
dtype_and_nonc=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=0,
),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_noncentral_chisquare(
dtype_and_df,
dtype_and_nonc,
size,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype_df, df = dtype_and_df
dtype_nonc, nonc = dtype_and_nonc
helpers.test_frontend_function(
input_dtypes=dtype_df + dtype_nonc,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
df=df[0],
nonc=nonc[0],
size=size,
)
# normal
@handle_frontend_test(
fn_tree="numpy.random.normal",
input_dtypes=helpers.get_dtypes("float", index=2),
loc=st.floats(allow_nan=False, allow_infinity=False, width=32),
scale=st.floats(allow_nan=False, allow_infinity=False, width=32, min_value=0),
size=st.tuples(
st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5)
),
)
def test_numpy_normal(
input_dtypes,
size,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
loc,
scale,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
loc=loc,
scale=scale,
size=size,
)
# pareto
@handle_frontend_test(
fn_tree="numpy.random.pareto",
input_dtypes=helpers.get_dtypes("float", index=2),
a=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=1,
max_value=1000,
exclude_min=True,
),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_pareto(
input_dtypes,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
a,
size,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
a=a,
size=size,
)
# permutation
@handle_frontend_test(
fn_tree="numpy.random.permutation",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1
),
)
def test_numpy_permutation(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
x=x[0],
)
# poisson
@handle_frontend_test(
fn_tree="numpy.random.poisson",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.tuples(st.integers(min_value=1, max_value=2)),
min_value=1,
max_value=100,
),
size=st.tuples(
st.integers(min_value=1, max_value=10), st.integers(min_value=2, max_value=2)
),
)
def test_numpy_poisson(
dtype_and_x,
size,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
lam=x[0],
test_values=False,
size=size,
)
# random_sample
@handle_frontend_test(
fn_tree="numpy.random.random_sample",
input_dtypes=helpers.get_dtypes("integer", full=False),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_random_sample(
input_dtypes,
size,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
size=size,
)
# rayleigh
@handle_frontend_test(
fn_tree="numpy.random.rayleigh",
input_dtypes=helpers.get_dtypes("float"),
scale=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=0,
max_value=1000,
exclude_min=True,
),
size=helpers.get_shape(allow_none=True),
)
def test_numpy_rayleigh(
input_dtypes,
size,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
scale,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
scale=scale,
size=size,
)
# shuffle
@handle_frontend_test(
fn_tree="numpy.random.shuffle",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), min_num_dims=1
),
)
def test_numpy_shuffle(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
x=x[0],
)
# standard_cauchy
@handle_frontend_test(
fn_tree="numpy.random.standard_cauchy",
input_dtypes=helpers.get_dtypes("integer", full=False),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_standard_cauchy(
input_dtypes,
size,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.standard_exponential",
input_dtypes=helpers.get_dtypes("float", index=2),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_standard_exponential(
input_dtypes,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
size,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.standard_gamma",
shape_dtypes=helpers.get_dtypes("float", full=False),
shape=st.floats(
allow_nan=False, allow_infinity=False, width=32, min_value=0, exclude_min=True
),
size=st.tuples(
st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5)
),
size_dtypes=helpers.get_dtypes("integer", full=False),
test_with_out=st.just(False),
)
def test_numpy_standard_gamma(
shape,
shape_dtypes,
size,
size_dtypes,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
assume("float16" not in shape_dtypes)
helpers.test_frontend_function(
input_dtypes=shape_dtypes + size_dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
shape=shape,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.standard_normal",
input_dtypes=helpers.get_dtypes("integer", full=False),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_standard_normal(
input_dtypes,
size,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
size=size,
)
# standard_t
@handle_frontend_test(
fn_tree="numpy.random.standard_t",
df=st.floats(min_value=1, max_value=20),
df_dtypes=helpers.get_dtypes("integer", full=False),
size=st.tuples(
st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5)
),
size_dtypes=helpers.get_dtypes("integer", full=False),
test_with_out=st.just(False),
)
def test_numpy_standard_t(
df,
df_dtypes,
size,
size_dtypes,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=df_dtypes + size_dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
df=df,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.triangular",
input_dtypes=helpers.get_dtypes("float"),
left=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=0,
max_value=10,
),
mode=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=10,
max_value=100,
exclude_min=True,
),
right=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=100,
max_value=1000,
exclude_min=True,
),
size=helpers.get_shape(allow_none=True),
)
def test_numpy_triangular(
input_dtypes,
size,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
left,
mode,
right,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
left=left,
mode=mode,
right=right,
size=size,
)
# uniform
@handle_frontend_test(
fn_tree="numpy.random.uniform",
input_dtypes=helpers.get_dtypes("float", index=2),
low=st.floats(allow_nan=False, allow_infinity=False, width=32),
high=st.floats(allow_nan=False, allow_infinity=False, width=32),
size=st.tuples(
st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5)
),
)
def test_numpy_uniform(
input_dtypes,
size,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
low,
high,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
low=low,
high=high,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.vonmises",
input_dtypes=helpers.get_dtypes("float"),
mu=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=0,
max_value=1,
exclude_min=True,
),
kappa=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=0,
max_value=10,
exclude_min=True,
),
size=helpers.get_shape(allow_none=True),
)
def test_numpy_vonmises(
input_dtypes,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
mu,
kappa,
size,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
mu=mu,
kappa=kappa,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.wald",
input_dtypes=helpers.get_dtypes("float"),
mean=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=0,
exclude_min=True,
max_value=1000,
),
scale=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=0,
exclude_min=True,
max_value=1000,
),
size=helpers.get_shape(allow_none=False),
)
def test_numpy_wald(
input_dtypes,
size,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
mean,
scale,
):
test_flags.num_positional_args = 2
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
mean=mean,
scale=scale,
size=size,
)
# weibull
@handle_frontend_test(
fn_tree="numpy.random.weibull",
input_dtypes=helpers.get_dtypes("float", index=2),
a=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=1,
max_value=1000,
exclude_min=True,
),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_weibull(
input_dtypes,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
a,
size,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
a=a,
size=size,
)
@handle_frontend_test(
fn_tree="numpy.random.zipf",
input_dtypes=helpers.get_dtypes("float", index=2),
a=st.floats(
allow_nan=False,
allow_infinity=False,
width=32,
min_value=1,
max_value=1000,
exclude_min=True,
),
size=helpers.get_shape(allow_none=True),
test_with_out=st.just(False),
)
def test_numpy_zipf(
input_dtypes,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
a,
size,
):
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
a=a,
size=size,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_random/test_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_random/test_functions.py",
"repo_id": "ivy",
"token_count": 15690
} | 57 |
# import paddle
from ivy_tests.test_ivy.test_frontends import NativeClass
paddle_classes_to_ivy_classes = {}
def convpaddle(argument):
"""Convert NativeClass in argument to ivy frontend counter part for
paddle."""
if isinstance(argument, NativeClass):
return paddle_classes_to_ivy_classes.get(argument._native_class)
return argument
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/__init__.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/__init__.py",
"repo_id": "ivy",
"token_count": 120
} | 58 |
# global
from hypothesis import strategies as st
import ivy
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
# cosine embedding loss
@st.composite
def _cos_embd_loss_helper(draw):
dtype_inputs_shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=2,
min_dim_size=2,
ret_shape=True,
num_arrays=2,
)
)
input_dtypes, inputs, shape = dtype_inputs_shape
_, label = draw(
helpers.dtype_and_values(
dtype=input_dtypes, shape=(shape[0],), min_value=-1, max_value=1
),
)
return input_dtypes, inputs, label
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="paddle.nn.functional.binary_cross_entropy",
dtype_and_vals=helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
num_arrays=3,
min_value=1.0013580322265625e-05,
max_value=1,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
allow_inf=False,
exclude_min=True,
exclude_max=True,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
),
reduction=st.sampled_from(["mean", "sum", "none"]),
)
def test_paddle_binary_cross_entropy(
dtype_and_vals,
reduction,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_vals
helpers.test_frontend_function(
input_dtypes=[input_dtype[0], input_dtype[1]],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
label=x[1],
weight=x[2],
reduction=reduction,
rtol=1e-02,
atol=1e-02,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.binary_cross_entropy_with_logits",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
exclude_min=True,
exclude_max=True,
shared_dtype=True,
min_num_dims=1,
),
dtype_and_weight=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
),
reduction=st.sampled_from(["mean", "none", "sum"]),
)
def test_paddle_binary_cross_entropy_with_logits(
dtype_and_x,
dtype_and_weight,
reduction,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
# TODO: paddle's implementation of pos_weight is wrong
# https://github.com/PaddlePaddle/Paddle/pull/54869
x_dtype, x = dtype_and_x
weight_dtype, weight = dtype_and_weight
helpers.test_frontend_function(
input_dtypes=[
x_dtype[0],
x_dtype[1],
weight_dtype[0],
],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
logit=x[0],
label=x[1],
weight=weight[0],
reduction=reduction,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.cosine_embedding_loss",
dtype_xs_label=_cos_embd_loss_helper(),
margin=st.floats(
min_value=-1.0,
max_value=1.0,
width=16,
),
reduction=st.sampled_from(["none", "mean", "sum"]),
test_with_out=st.just(False),
)
def test_paddle_cosine_embedding_loss(
*,
dtype_xs_label,
margin,
reduction,
test_flags,
fn_tree,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs, label = dtype_xs_label
input1_dtype, input1 = input_dtypes[0], xs[0]
input2_dtype, input2 = input_dtypes[1], xs[1]
helpers.test_frontend_function(
input_dtypes=[input1_dtype, input2_dtype],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input1=input1,
input2=input2,
label=label[0],
margin=margin,
reduction=reduction,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.dice_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
shared_dtype=False,
min_num_dims=3,
min_dim_size=3,
max_num_dims=3,
max_dim_size=3,
),
labels=st.lists(
(
st.lists(
(
st.lists(
st.integers(min_value=0, max_value=1), min_size=3, max_size=3
)
),
min_size=3,
max_size=3,
)
),
min_size=1,
max_size=1,
),
epsilon=st.floats(
min_value=1e-6,
max_value=1e-2,
),
)
def test_paddle_dice_loss(
dtype_and_x,
labels,
epsilon,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
x_dtype, x = dtype_and_x
x[0] = x[0].reshape([3, 3, 3])
labels = ivy.array(labels, dtype=ivy.int64)
labels = labels.reshape([3, 3, 1])
helpers.test_frontend_function(
input_dtypes=[ivy.int64] + [ivy.float64] + x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
label=labels,
epsilon=epsilon,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.hinge_embedding_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
),
margin=st.floats(
min_value=-1.0,
max_value=1.0,
width=16,
),
reduction=st.sampled_from(["none", "mean", "sum"]),
)
def test_paddle_hinge_embedding_loss(
dtype_and_x,
margin,
reduction,
test_flags,
backend_fw,
fn_tree,
frontend,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
label=x[1],
margin=margin,
reduction=reduction,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.kl_div",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
min_value=1.0013580322265625e-05,
),
reduction=st.sampled_from(["mean", "batchmean", "sum", "none"]),
)
def test_paddle_kl_div(
dtype_and_x, reduction, on_device, backend_fw, fn_tree, frontend, test_flags
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
label=x[1],
reduction=reduction,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.l1_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
reduction=st.sampled_from(["mean", "sum", "none"]),
)
def test_paddle_l1_loss(
dtype_and_x,
reduction,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
label=x[1],
reduction=reduction,
)
# log_loss
@handle_frontend_test(
fn_tree="paddle.nn.functional.log_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
min_value=0,
max_value=1,
exclude_min=True,
exclude_max=True,
shared_dtype=True,
min_num_dims=2,
max_num_dims=2,
max_dim_size=1,
),
epsilon=st.floats(
min_value=1e-7,
max_value=1.0,
),
)
def test_paddle_log_loss(
dtype_and_x,
epsilon,
fn_tree,
test_flags,
backend_fw,
frontend,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
label=x[1],
epsilon=epsilon,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.margin_ranking_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=3,
shared_dtype=True,
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
margin=st.floats(
min_value=-1.0,
max_value=1.0,
width=16,
),
reduction=st.sampled_from(["mean", "sum", "none"]),
)
def test_paddle_margin_ranking_loss(
dtype_and_x,
margin,
reduction,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
label=x[2],
margin=margin,
reduction=reduction,
)
# mse_loss
@handle_frontend_test(
fn_tree="paddle.nn.functional.mse_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
reduction=st.sampled_from(["mean", "none", "sum"]),
)
def test_paddle_mse_loss(
dtype_and_x,
reduction,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
label=x[1],
reduction=reduction,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.multi_label_soft_margin_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=-2,
max_value=2,
shared_dtype=True,
allow_inf=False,
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype_and_weight=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
min_value=-2,
max_value=2,
),
reduction=st.sampled_from(["mean", "none", "sum"]),
)
def test_paddle_multi_label_soft_margin_loss(
dtype_and_x,
dtype_and_weight,
reduction,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
x_dtype, x = dtype_and_x
weight_dtype, weight = dtype_and_weight
helpers.test_frontend_function(
input_dtypes=[
x_dtype[0],
x_dtype[1],
weight_dtype[0],
],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
label=x[1],
weight=weight[0],
reduction=reduction,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.nll_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
max_num_dims=2,
),
dtype_and_weight=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
max_num_dims=1,
),
ignore_index=st.integers(
min_value=-100,
),
reduction=st.sampled_from(["mean", "sum", "none"]),
)
def test_paddle_nll_loss(
dtype_and_x,
dtype_and_weight,
ignore_index,
reduction,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
x_dtype, x = dtype_and_x
weight_dtype, weight = dtype_and_weight
helpers.test_frontend_function(
input_dtypes=[
x_dtype[0],
x_dtype[1],
weight_dtype[0],
],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
label=x[1],
weight=weight[0],
ignore_index=ignore_index,
reduction=reduction,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.sigmoid_focal_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
shared_dtype=False,
min_num_dims=1,
min_dim_size=1,
),
dtype_and_normalizer=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
shared_dtype=True,
min_num_dims=1,
min_dim_size=1,
max_num_dims=1,
max_dim_size=1,
),
dtype_and_labels=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
shared_dtype=False,
min_num_dims=1,
min_dim_size=1,
min_value=0,
max_value=1,
),
alpha=st.floats(
min_value=0.0,
max_value=1.0,
),
gamma=st.floats(
min_value=0.0,
max_value=5.0,
),
reduction=st.sampled_from(["mean", "sum", "none"]),
)
def test_paddle_sigmoid_focal_loss(
dtype_and_x,
dtype_and_normalizer,
dtype_and_labels,
alpha,
gamma,
reduction,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
x_dtype, x = dtype_and_x
normalizer_dtype, normalizer = dtype_and_normalizer
label_dtype, labels = dtype_and_labels
normalizer = [norm.reshape(-1) for norm in normalizer]
labels = ivy.array(labels, dtype=ivy.int64)
helpers.test_frontend_function(
input_dtypes=[ivy.int64]
+ [ivy.float64]
+ x_dtype
+ normalizer_dtype
+ label_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
logit=x[0],
label=labels[0],
alpha=alpha,
gamma=gamma,
normalizer=normalizer[0],
reduction=reduction,
)
# smooth_l1_loss
@handle_frontend_test(
fn_tree="paddle.nn.functional.smooth_l1_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
delta=st.floats(
min_value=0.1,
max_value=1.0,
),
reduction=st.sampled_from(["mean", "sum", "none"]),
)
def test_paddle_smooth_l1_loss(
dtype_and_x,
delta,
reduction,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
label=x[1],
reduction=reduction,
delta=delta,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.softmax_with_cross_entropy",
dtype_and_x_and_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=1e-04,
max_value=1,
min_num_dims=2,
allow_inf=False,
shared_dtype=True,
force_int_axis=True,
valid_axis=True,
),
soft_label=st.booleans(),
numeric_stable_mode=st.booleans(),
return_softmax=st.booleans(),
)
def test_paddle_softmax_with_cross_entropy(
dtype_and_x_and_axis,
soft_label,
numeric_stable_mode,
return_softmax,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
x_dtype, x, axis = dtype_and_x_and_axis
logits = x[0]
labels = x[1]
label_dtype = x_dtype
ignore_index = 0
if soft_label:
labels = labels / ivy.sum(labels).to_native()
else:
labels = ivy.argmax(labels, axis=axis).to_native()
flattened_labels = labels.flatten()
ignore_index = ivy.randint(0, flattened_labels.size)
ignore_index = flattened_labels[ignore_index]
label_dtype = [str(labels.dtype)]
if on_device == "cpu" or soft_label:
numeric_stable_mode = True
helpers.test_frontend_function(
input_dtypes=[x_dtype[0], label_dtype[0]],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
logits=logits,
label=labels,
soft_label=soft_label,
ignore_index=ignore_index,
numeric_stable_mode=numeric_stable_mode,
return_softmax=return_softmax,
axis=axis,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.loss.square_error_cost",
dtype_and_input_and_label=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), num_arrays=2
),
)
def test_paddle_square_error_cost(
*,
dtype_and_input_and_label,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtypes, input_and_label = dtype_and_input_and_label
input, label = input_and_label
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
input=input,
label=label,
fn_tree=fn_tree,
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.triplet_margin_loss",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=3,
allow_inf=False,
shared_dtype=True,
min_value=0.0,
max_value=1.0,
min_num_dims=1,
max_num_dims=2,
min_dim_size=1,
),
margin=st.floats(min_value=1e-6, max_value=1e6),
p=st.integers(min_value=0, max_value=2),
swap=st.booleans(),
reduction=st.sampled_from(["none", "mean", "sum"]),
test_with_out=st.just(False),
)
def test_paddle_triplet_margin_loss(
dtype_and_inputs,
margin,
p,
swap,
reduction,
test_flags,
fn_tree,
backend_fw,
frontend,
on_device,
):
input_dtype, x = dtype_and_inputs
helpers.test_frontend_function(
input_dtypes=[input_dtype[0], input_dtype[1], input_dtype[2]],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
positive=x[1],
negative=x[2],
margin=margin,
p=p,
swap=swap,
reduction=reduction,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_loss.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_loss.py",
"repo_id": "ivy",
"token_count": 10560
} | 59 |
import pytest
@pytest.fixture(scope="session")
def frontend():
return "pandas"
| ivy/ivy_tests/test_ivy/test_frontends/test_pandas/conftest.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_pandas/conftest.py",
"repo_id": "ivy",
"token_count": 32
} | 60 |
from hypothesis import strategies as st
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
import numpy as np
@handle_frontend_test(
fn_tree="sklearn.metrics.accuracy_score",
arrays_and_dtypes=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
num_arrays=2,
min_value=-2,
max_value=2,
shared_dtype=True,
shape=(helpers.ints(min_value=2, max_value=5)),
),
normalize=st.booleans(),
)
def test_sklearn_accuracy_score(
arrays_and_dtypes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
normalize,
):
dtypes, values = arrays_and_dtypes
# sklearn accuracy_score does not support continuous values
for i in range(2):
if "float" in dtypes[i]:
values[i] = np.floor(values[i])
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
frontend=frontend,
on_device=on_device,
y_true=values[0],
y_pred=values[1],
normalize=normalize,
sample_weight=None,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_sklearn/test_metrics/test_classification.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_sklearn/test_metrics/test_classification.py",
"repo_id": "ivy",
"token_count": 563
} | 61 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# as_dtype
@handle_frontend_test(
fn_tree="tensorflow.dtypes.as_dtype",
gt_fn_tree="tensorflow.as_dtype",
input_dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_tensorflow_as_dtype(
*,
input_dtype,
frontend,
test_flags,
fn_tree,
gt_fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
gt_fn_tree=gt_fn_tree,
on_device=on_device,
type_value=input_dtype[0],
)
# cast
@handle_frontend_test(
fn_tree="tensorflow.dtypes.cast",
gt_fn_tree="tensorflow.cast",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_tensorflow_cast(
*,
dtype_and_x,
dtype,
frontend,
test_flags,
fn_tree,
gt_fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype + dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
gt_fn_tree=gt_fn_tree,
on_device=on_device,
x=x[0],
dtype=dtype[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_dtypes.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_dtypes.py",
"repo_id": "ivy",
"token_count": 794
} | 62 |
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
# stateless_poisson
@st.composite
def _shape_lam_dtype(draw):
dtype = draw(helpers.array_dtypes(available_dtypes=("float32", "float64")))
common_shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=2,
max_num_dims=3,
min_dim_size=1,
max_dim_size=5,
)
)
_, lam = draw(
helpers.dtype_and_values(
available_dtypes=dtype, min_value=0, max_value=10, shape=(common_shape[-1],)
)
)
return common_shape, lam, dtype
# --- Main --- #
# ------------ #
# random gamma
@handle_frontend_test(
fn_tree="tensorflow.random.gamma",
dtype=helpers.array_dtypes(
available_dtypes=("float32", "float64"),
),
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=5,
),
alpha=st.floats(
allow_infinity=False, allow_nan=False, width=32, min_value=1, max_value=3
),
beta=st.floats(
allow_infinity=False, allow_nan=False, width=32, min_value=1, max_value=3
),
seed=helpers.ints(min_value=0, max_value=10),
test_with_out=st.just(False),
)
def test_tensorflow_gamma(
frontend,
fn_tree,
on_device,
shape,
alpha,
beta,
dtype,
seed,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
shape=shape,
alpha=alpha,
beta=beta,
dtype=dtype[0],
seed=seed,
test_values=False,
)
# random_normal
@handle_frontend_test(
fn_tree="tensorflow.random.normal",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
mean=st.floats(allow_nan=False, allow_infinity=False, width=32),
stddev=st.floats(allow_nan=False, allow_infinity=False, width=32, min_value=0),
dtype=helpers.get_dtypes("float", full=False),
seed=helpers.ints(min_value=0, max_value=10),
test_with_out=st.just(False),
)
def test_tensorflow_normal(
frontend,
fn_tree,
on_device,
shape,
mean,
stddev,
dtype,
seed,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
shape=shape,
mean=mean,
stddev=stddev,
dtype=dtype[0],
seed=seed,
)
# random poisson
@handle_frontend_test(
fn_tree="tensorflow.random.poisson",
shape=helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=5,
),
lam=st.one_of(
helpers.floats(allow_inf=False, allow_nan=False, min_value=-2, max_value=5),
helpers.lists(
x=helpers.floats(
allow_nan=False, allow_inf=False, min_value=-2, max_value=5
),
min_size=1,
max_size=10,
),
),
dtype=helpers.get_dtypes("float", full=False),
seed=helpers.ints(min_value=0, max_value=10),
test_with_out=st.just(False),
)
def test_tensorflow_poisson(
frontend,
fn_tree,
on_device,
shape,
lam,
dtype,
seed,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
shape=shape,
lam=lam,
dtype=dtype[0],
seed=seed,
test_values=False,
)
# random_shuffle
@handle_frontend_test(
fn_tree="tensorflow.random.shuffle",
dtype_value=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
seed=helpers.ints(min_value=0, max_value=10),
test_with_out=st.just(False),
)
def test_tensorflow_shuffle(
frontend,
fn_tree,
on_device,
dtype_value,
seed,
test_flags,
backend_fw,
):
input_dtypes, values = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
value=values[0],
seed=seed,
)
# stateless_normal
@handle_frontend_test(
fn_tree="tensorflow.random.stateless_normal",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
seed=helpers.dtype_and_values(
available_dtypes=("int64", "int32"), min_value=0, max_value=10, shape=[2]
),
mean=st.floats(allow_nan=False, allow_infinity=False, width=32),
stddev=st.floats(allow_nan=False, allow_infinity=False, width=32, min_value=0),
dtype=helpers.get_dtypes("float", full=False),
test_with_out=st.just(False),
)
def test_tensorflow_stateless_normal(
frontend,
fn_tree,
on_device,
shape,
seed,
mean,
stddev,
dtype,
test_flags,
backend_fw,
):
input_dtypes, seed = seed
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
shape=shape,
seed=seed[0],
mean=mean,
stddev=stddev,
dtype=dtype[0],
)
@handle_frontend_test(
fn_tree="tensorflow.random.stateless_poisson",
shape_lam_dtype=_shape_lam_dtype(),
seed=helpers.dtype_and_values(
available_dtypes=("int64", "int32"), min_value=0, max_value=10, shape=[2]
),
test_with_out=st.just(False),
)
def test_tensorflow_stateless_poisson(
frontend,
fn_tree,
on_device,
shape_lam_dtype,
seed,
test_flags,
backend_fw,
):
shape, lam, dtype = shape_lam_dtype
input_dtypes, seed = seed
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
shape=shape,
seed=seed[0],
lam=lam[0],
dtype=dtype[0],
)
# random_stateless_uniform
@handle_frontend_test(
fn_tree="tensorflow.random.stateless_uniform",
shape=helpers.dtype_and_values(
available_dtypes=("int64", "int32"),
min_value=1,
max_value=5,
min_num_dims=1,
max_num_dims=1,
max_dim_size=9,
),
seed=helpers.dtype_and_values(
available_dtypes=("int64", "int32"), min_value=0, max_value=10, shape=[2]
),
minmaxval=helpers.get_bounds(dtype="int32"),
dtype=helpers.array_dtypes(
available_dtypes=("int32", "int64", "float16", "float32", "float64"),
),
test_with_out=st.just(False),
)
def test_tensorflow_stateless_uniform(
shape,
seed,
minmaxval,
dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
shape_input_dtypes, shape = shape
seed_input_dtypes, seed = seed
helpers.test_frontend_function(
input_dtypes=shape_input_dtypes + seed_input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
shape=shape[0],
seed=seed[0],
minval=int(minmaxval[0]),
maxval=int(minmaxval[1]),
dtype=dtype[0],
)
# random_sample
@handle_frontend_test(
fn_tree="tensorflow.random.uniform",
shape=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_value=1,
max_value=5,
min_num_dims=1,
max_num_dims=1,
),
minval=helpers.ints(min_value=0, max_value=3),
maxval=helpers.ints(min_value=4, max_value=10),
dtype=helpers.get_dtypes("float", full=False),
seed=helpers.ints(min_value=0, max_value=10),
test_with_out=st.just(False),
)
def test_tensorflow_uniform(
shape,
minval,
maxval,
dtype,
seed,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, shape = shape
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
shape=shape[0],
minval=minval,
maxval=maxval,
dtype=dtype[0],
seed=seed,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_random.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_random.py",
"repo_id": "ivy",
"token_count": 4654
} | 63 |
# global
import math
import sys
import numpy as np
from hypothesis import strategies as st, assume, settings, HealthCheck
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import assert_all_close
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_frontends.test_torch.test_miscellaneous_ops import (
dtype_value1_value2_axis,
)
from ivy_tests.test_ivy.helpers.hypothesis_helpers.general_helpers import (
matrix_is_stable,
)
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import _matrix_rank_helper
# --- Helpers --- #
# --------------- #
@st.composite
def _generate_multi_dot_dtype_and_arrays(draw):
input_dtype = [draw(st.sampled_from(draw(helpers.get_dtypes("valid"))))]
matrices_dims = draw(
st.lists(st.integers(min_value=2, max_value=10), min_size=4, max_size=4)
)
shape_1 = (matrices_dims[0], matrices_dims[1])
shape_2 = (matrices_dims[1], matrices_dims[2])
shape_3 = (matrices_dims[2], matrices_dims[3])
matrix_1 = draw(
helpers.dtype_and_values(
shape=shape_1,
dtype=input_dtype,
min_value=-10,
max_value=10,
)
)
matrix_2 = draw(
helpers.dtype_and_values(
shape=shape_2,
dtype=input_dtype,
min_value=-10,
max_value=10,
)
)
matrix_3 = draw(
helpers.dtype_and_values(
shape=shape_3,
dtype=input_dtype,
min_value=-10,
max_value=10,
)
)
return input_dtype, [matrix_1[1][0], matrix_2[1][0], matrix_3[1][0]]
@st.composite
def _get_axis_and_p(draw):
p = draw(st.sampled_from(["fro", "nuc", 1, 2, -1, -2, float("inf"), -float("inf")]))
if p in ["fro", "nuc"]:
max_axes_size = 2
min_axes_size = 2
else:
min_axes_size = 1
max_axes_size = 5
x_dtype, values, axis = draw(
helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
valid_axis=True,
min_value=-1e04,
max_value=1e04,
min_axes_size=min_axes_size,
max_axes_size=max_axes_size,
large_abs_safety_factor=2,
safety_factor_scale="log",
)
)
axis = axis[0] if isinstance(axis, tuple) and len(axis) == 1 else axis
# ToDo: fix the castable dtype helper. Right now using `dtype` causes errors
# dtype should be real for real inputs, but got ComplexDouble
x_dtype, values, dtype = draw(
helpers.get_castable_dtype(
draw(helpers.get_dtypes("valid")), x_dtype[0], values[0]
)
)
return p, x_dtype, values, axis, x_dtype
# helpers
@st.composite
def _get_dtype_and_matrix(
draw, dtype="valid", square=False, invertible=False, batch=False
):
if batch:
arbitrary_dims = draw(helpers.get_shape(max_dim_size=3))
else:
arbitrary_dims = []
if square:
random_size = draw(st.integers(1, 5))
shape = (*arbitrary_dims, random_size, random_size)
else:
shape = (*arbitrary_dims, draw(st.integers(1, 5)), draw(st.integers(1, 5)))
ret = helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(dtype, full=True),
min_value=-10,
max_value=10,
abs_smallest_val=1e-04,
shape=shape,
)
if invertible:
ret = ret.filter(
lambda x: np.all(np.linalg.cond(x[1]) < 1 / sys.float_info.epsilon)
)
return draw(ret)
@st.composite
def _get_dtype_and_symmetrix_matrix(draw):
input_dtype = draw(st.shared(st.sampled_from(draw(helpers.get_dtypes("valid")))))
random_size = draw(helpers.ints(min_value=2, max_value=4))
batch_shape = draw(helpers.get_shape(min_num_dims=1, max_num_dims=3))
num_independnt_vals = int((random_size**2) / 2 + random_size / 2)
array_vals_flat = np.array(
draw(
helpers.array_values(
dtype=input_dtype,
shape=tuple(list(batch_shape) + [num_independnt_vals]),
min_value=2,
max_value=5,
)
)
)
array_vals = np.zeros(batch_shape + (random_size, random_size))
c = 0
for i in range(random_size):
for j in range(random_size):
if j < i:
continue
array_vals[..., i, j] = array_vals_flat[..., c]
array_vals[..., j, i] = array_vals_flat[..., c]
c += 1
return [input_dtype], array_vals
# tensorsolve
@st.composite
def _get_solve_matrices(draw):
# batch_shape, random_size, shared
# float16 causes a crash when filtering out matrices
# for which `np.linalg.cond` is large.
input_dtype_strategy = st.shared(
st.sampled_from(draw(helpers.get_dtypes("valid"))),
key="shared_dtype",
)
input_dtype = draw(input_dtype_strategy)
dim = draw(helpers.ints(min_value=2, max_value=5))
first_matrix = draw(
helpers.array_values(
dtype=input_dtype,
shape=(dim, dim, dim, dim),
min_value=1.2,
max_value=5,
).filter(
lambda x: np.linalg.cond(x.reshape((dim**2, dim**2)))
< 1 / sys.float_info.epsilon
)
)
second_matrix = draw(
helpers.array_values(
dtype=input_dtype,
shape=(dim, dim),
min_value=1.2,
max_value=3,
).filter(
lambda x: np.linalg.cond(x.reshape((dim, dim))) < 1 / sys.float_info.epsilon
)
)
return input_dtype, first_matrix, second_matrix
# tensorinv
@st.composite
def _tensorinv_helper(draw):
def factors(x):
result = [
1,
]
i = 2
while i * i <= x:
if x % i == 0:
result.append(i)
if x // i != i:
result.append(x // i)
i += 1
result.append(x)
return np.array(result)
ind = draw(helpers.ints(min_value=1, max_value=6))
product_half = draw(helpers.ints(min_value=2, max_value=25))
factors_list = factors(product_half)
shape = ()
while len(shape) < ind and ind > 2:
while np.prod(shape) < product_half:
a = factors_list[np.random.randint(len(factors_list))]
shape += (a,)
if np.prod(shape) > product_half or len(shape) > ind:
shape = ()
while len(shape) < ind and shape != ():
shape += (1,)
if np.prod(shape) == product_half:
shape += shape[::-1]
break
if ind == 1 and shape == ():
shape += (product_half, product_half)
if ind == 2 and shape == ():
shape += (1, product_half, product_half, 1)
shape_cor = ()
for i in shape:
shape_cor += (int(i),)
shape_draw = (product_half, product_half)
dtype, input = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=shape_draw,
).filter(lambda x: np.linalg.cond(x[1]) < 1 / sys.float_info.epsilon)
)
input[0] = input[0].reshape(shape_cor)
return dtype, input[0], ind
# vander
@st.composite
def _vander_helper(draw):
# generate input matrix of shape (*, n) and where '*' is one or more
# batch dimensions
N = draw(helpers.ints(min_value=2, max_value=5))
if draw(st.floats(min_value=0, max_value=1.0)) < 0.5:
N = None
shape = draw(
helpers.get_shape(
min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10
)
)
x = draw(
helpers.dtype_and_values(
available_dtypes=draw(helpers.get_dtypes("valid")),
shape=shape,
min_value=-10,
max_value=10,
)
)
return *x, N
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="torch.linalg.lu_solve",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=helpers.get_shape(
min_num_dims=2, max_num_dims=2, min_dim_size=2, max_dim_size=2
),
num_arrays=2,
shared_dtype=True,
).filter(lambda x: helpers.matrix_is_stable(x[1][0], cond_limit=10)),
)
def test_lu_solve(dtype_x, test_flags, backend_fw, fn_name, on_device):
dtype, arr = dtype_x
A, B = arr[0], arr[1]
ivy.set_backend(backend_fw)
lu_ = ivy.lu_factor(A)
lu, p = lu_.LU, lu_.p
X, X_gt = helpers.test_frontend_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
lu=lu,
p=p,
b=B,
test_values=False,
)
assert np.allclose(A @ X, B)
@handle_frontend_test(
fn_tree="torch.linalg.cholesky",
aliases=["torch.cholesky"],
dtype_and_x=_get_dtype_and_matrix(square=True),
upper=st.booleans(),
)
def test_torch_cholesky(
*,
dtype_and_x,
upper,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
x = np.asarray(x[0], dtype=dtype[0])
x = np.matmul(x.T, x) + np.identity(x.shape[0]) # make symmetric positive-definite
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
input=x,
upper=upper,
)
@handle_frontend_test(
fn_tree="torch.linalg.cholesky_ex",
dtype_and_x=_get_dtype_and_matrix(square=True, batch=True),
upper=st.booleans(),
)
def test_torch_cholesky_ex(
*,
dtype_and_x,
upper,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
x = np.matmul(x.T, x) + np.identity(x.shape[0]) # make symmetric positive-definite
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
input=x,
upper=upper,
)
@handle_frontend_test(
fn_tree="torch.linalg.cond",
dtype_and_x=_get_dtype_and_matrix(square=True, invertible=True, batch=True),
p=st.sampled_from([None, "fro", "nuc", np.inf, -np.inf, 1, -1, 2, -2]),
)
def test_torch_cond(
*, dtype_and_x, p, on_device, fn_tree, frontend, backend_fw, test_flags
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
input=x[0],
rtol=1e-2,
atol=1e-3,
p=p,
)
# cross
@handle_frontend_test(
fn_tree="torch.linalg.cross",
dtype_input_other_dim=dtype_value1_value2_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=3,
max_dim_size=3,
min_value=-1e3,
max_value=1e3,
abs_smallest_val=0.01,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
)
def test_torch_cross(
dtype_input_other_dim,
frontend,
test_flags,
fn_tree,
backend_fw,
):
dtype, input, other, dim = dtype_input_other_dim
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
rtol=1e-2,
atol=1e-3,
input=input,
other=other,
dim=dim,
)
# det
@handle_frontend_test(
fn_tree="torch.linalg.det",
aliases=["torch.det"],
dtype_and_x=_get_dtype_and_matrix(square=True, batch=True),
)
def test_torch_det(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
test_flags.num_positional_args = len(x)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
A=x[0],
)
@handle_frontend_test(
fn_tree="torch.diag_embed",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(min_num_dims=1, max_num_dims=2), key="shape"),
),
dims_and_offsets=helpers.dims_and_offset(
shape=st.shared(helpers.get_shape(min_num_dims=1, max_num_dims=2), key="shape"),
ensure_dim_unique=True,
),
)
@settings(suppress_health_check=list(HealthCheck))
def test_torch_diag_embed(
*,
dtype_and_values,
dims_and_offsets,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, value = dtype_and_values
dim1, dim2, offset = dims_and_offsets
num_of_dims = len(np.shape(value[0]))
if dim1 < 0:
assume(dim1 + num_of_dims != dim2)
if dim2 < 0:
assume(dim1 != dim2 + num_of_dims)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
offset=offset,
dim1=dim1,
dim2=dim2,
)
# eig
# TODO: Test for all valid dtypes once ivy.eig supports complex data types
@handle_frontend_test(
fn_tree="torch.linalg.eig",
dtype_and_input=_get_dtype_and_matrix(dtype="float", square=True),
)
def test_torch_eig(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_input
x = np.asarray(x[0], dtype=input_dtype[0])
x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3
if x.dtype == ivy.float32:
x = x.astype("float64")
input_dtype = [ivy.float64]
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
input=x,
)
ret = [ivy.to_numpy(x).astype("float64") for x in ret]
frontend_ret = [np.asarray(x, dtype=np.float64) for x in frontend_ret]
l, v = ret # noqa: E741
front_l, front_v = frontend_ret
assert_all_close(
ret_np=v @ np.diag(l) @ np.linalg.inv(v),
ret_from_gt_np=front_v @ np.diag(front_l) @ np.linalg.inv(front_v),
rtol=1e-2,
atol=1e-2,
ground_truth_backend=frontend,
backend=backend_fw,
)
@handle_frontend_test(
fn_tree="torch.linalg.eigh",
dtype_and_x=_get_dtype_and_matrix(dtype="valid", square=True, invertible=True),
UPLO=st.sampled_from(("L", "U")),
)
def test_torch_eigh(
*,
dtype_and_x,
UPLO,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
x = np.array(x[0], dtype=dtype[0])
# make symmetric positive-definite beforehand
x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
a=x,
UPLO=UPLO,
)
ret = [ivy.to_numpy(x) for x in ret]
frontend_ret = [np.asarray(x) for x in frontend_ret]
L, Q = ret
frontend_L, frontend_Q = frontend_ret
assert_all_close(
ret_np=Q @ np.diag(L) @ Q.T,
ret_from_gt_np=frontend_Q @ np.diag(frontend_L) @ frontend_Q.T,
atol=1e-02,
backend=backend_fw,
)
# eigvals
@handle_frontend_test(
fn_tree="torch.linalg.eigvals",
dtype_x=_get_dtype_and_matrix(square=True),
)
def test_torch_eigvals(
*,
dtype_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_x
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
test_values=False,
)
"""In "ret" we have out eigenvalues calculated with our backend and in
"frontend_ret" are our eigenvalues calculated with the specified
frontend."""
"""
Depending on the chosen framework there may be small differences between our
extremely small or big eigenvalues (eg: -3.62831993e-33+0.j(numpy)
vs -1.9478e-32+0.j(PyTorch)).
Important is that both are very very close to zero, indicating a
small value(very close to 0) either way.
To asses the correctness of our calculated eigenvalues for our initial matrix
we sort both numpy arrays and call assert_all_close on their modulus.
"""
"""
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if A is a batch of matrices then the
output has the same batch dimension
"""
frontend_ret = np.asarray(frontend_ret[0])
frontend_ret = np.sort(frontend_ret)
frontend_ret_modulus = np.zeros(len(frontend_ret), dtype=np.float64)
for i in range(len(frontend_ret)):
frontend_ret_modulus[i] = math.sqrt(
math.pow(frontend_ret[i].real, 2) + math.pow(frontend_ret[i].imag, 2)
)
ret = ivy.to_numpy(ret).astype(str(frontend_ret.dtype))
ret = np.sort(ret)
ret_modulus = np.zeros(len(ret), dtype=np.float64)
for i in range(len(ret)):
ret_modulus[i] = math.sqrt(math.pow(ret[i].real, 2) + math.pow(ret[i].imag, 2))
assert_all_close(
ret_np=ret_modulus,
ret_from_gt_np=frontend_ret_modulus,
rtol=1e-2,
atol=1e-2,
ground_truth_backend=frontend,
backend=backend_fw,
)
# eigvalsh
@handle_frontend_test(
fn_tree="torch.linalg.eigvalsh",
dtype_x=_get_dtype_and_symmetrix_matrix(),
UPLO=st.sampled_from(("L", "U")),
)
def test_torch_eigvalsh(
*,
dtype_x,
UPLO,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
UPLO=UPLO,
atol=1e-4,
rtol=1e-3,
)
# inv
@handle_frontend_test(
fn_tree="torch.linalg.inv",
aliases=["torch.inverse"],
dtype_and_x=_get_dtype_and_matrix(square=True, invertible=True, batch=True),
)
def test_torch_inv(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
test_flags.num_positional_args = 1
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
atol=1e-03,
A=x[0],
)
# inv_ex
# TODO: Test for singular matrices
@handle_frontend_test(
fn_tree="torch.linalg.inv_ex",
dtype_and_x=_get_dtype_and_matrix(square=True, invertible=True, batch=True),
)
def test_torch_inv_ex(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
atol=1e-02,
A=x[0],
)
# lu_factor
@handle_frontend_test(
fn_tree="torch.linalg.lu_factor",
input_dtype_and_input=_get_dtype_and_matrix(batch=True),
)
def test_torch_lu_factor(
*,
input_dtype_and_input,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input = input_dtype_and_input
ret = helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
atol=1e-02,
A=input[0],
test_values=False,
)
ret_f, ret_gt = ret
LU, p = ret_f.LU, ret_f.p
L = np.tril(LU, -1) + np.eye(LU.shape[0])
U = np.triu(LU)
P = np.eye(LU.shape[0])[p]
assert np.allclose(L @ U, P @ input[0])
@handle_frontend_test(
fn_tree="torch.linalg.lu_factor_ex",
input_dtype_and_input=_get_dtype_and_matrix(
batch=True, square=True, invertible=True
),
)
def test_torch_lu_factor_ex(
*,
input_dtype_and_input,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input = input_dtype_and_input
ret = helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
atol=1e-02,
A=input[0],
check_errors=False,
test_values=False,
)
ret_f, ret_gt = ret
ret_f_matrix, ret_f_info = ret_f
if ret_f_info == 0:
(
LU,
p,
) = (
ret_f_matrix.LU,
ret_f_matrix.p,
)
L = np.tril(LU, -1) + np.eye(LU.shape[0])
U = np.triu(LU)
P = np.eye(LU.shape[0])[p]
assert np.allclose(L @ U, P @ input[0])
@handle_frontend_test(
fn_tree="torch.linalg.matmul",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=(3, 3),
num_arrays=2,
shared_dtype=True,
min_value=-1e04,
max_value=1e04,
),
)
def test_torch_matmul(
*,
dtype_x,
frontend,
fn_tree,
on_device,
test_flags,
backend_fw,
):
input_dtype, x = dtype_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_flags=test_flags,
input=x[0],
other=x[1],
rtol=1e-03,
atol=1e-03,
)
# matrix_exp
@handle_frontend_test(
fn_tree="torch.linalg.matrix_exp",
dtype_and_x=_get_dtype_and_matrix(square=True, invertible=True),
)
def test_torch_matrix_exp(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
test_flags.num_positional_args = len(x)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
atol=1e-03,
A=x[0],
)
# matrix_norm
@handle_frontend_test(
fn_tree="torch.linalg.matrix_norm",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_num_dims=2,
min_axes_size=2,
max_axes_size=2,
max_value=10e4,
min_value=-10e4,
abs_smallest_val=10e-4,
valid_axis=True,
force_tuple_axis=True,
),
ord=st.sampled_from(["fro", "nuc", np.inf, -np.inf, 1, -1, 2, -2]),
keepdim=st.booleans(),
dtypes=helpers.get_dtypes("float_and_complex", none=True, full=False),
)
def test_torch_matrix_norm(
*,
dtype_values_axis,
ord,
keepdim,
frontend,
dtypes,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, axis = dtype_values_axis
if dtypes[0] is not None and "complex128" in input_dtype[0]:
dtypes[0] = input_dtype[0]
if dtypes[0] is not None:
dtypes[0] = input_dtype[0][:-2] + max([input_dtype[0][-2:], dtypes[0][-2:]])
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
atol=1e-03,
input=x[0],
ord=ord,
dim=axis,
keepdim=keepdim,
dtype=dtypes[0],
)
# matrix_power
@handle_frontend_test(
fn_tree="torch.linalg.matrix_power",
aliases=["torch.matrix_power"],
dtype_and_x=_get_dtype_and_matrix(square=True, invertible=True, batch=True),
n=helpers.ints(min_value=2, max_value=5),
)
def test_torch_matrix_power(
*,
dtype_and_x,
n,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
test_flags.num_positional_args = len(x) + 1
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
A=x[0],
n=n,
)
# matrix_rank
@handle_frontend_test(
fn_tree="torch.linalg.matrix_rank",
dtype_x_hermitian_atol_rtol=_matrix_rank_helper(),
)
def test_torch_matrix_rank(
dtype_x_hermitian_atol_rtol,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, hermitian, atol, rtol = dtype_x_hermitian_atol_rtol
assume(matrix_is_stable(x, cond_limit=10))
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
rtol=rtol,
atol=atol,
hermitian=hermitian,
)
@handle_frontend_test(
fn_tree="torch.linalg.multi_dot",
dtype_x=_generate_multi_dot_dtype_and_arrays(),
)
def test_torch_multi_dot(
dtype_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
on_device=on_device,
frontend=frontend,
fn_tree=fn_tree,
test_values=True,
tensors=x,
)
@handle_frontend_test(
fn_tree="torch.linalg.norm",
args=_get_axis_and_p(),
keepdim=st.booleans(),
test_with_out=st.just(False),
)
def test_torch_norm(
*,
args,
keepdim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
p, x_dtype, x, axis, dtype = args
helpers.test_frontend_function(
input_dtypes=[x_dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
atol=1e-08,
input=x,
ord=p,
dim=axis,
keepdim=keepdim,
dtype=dtype,
)
# pinv
# TODO: add testing for hermitian
@handle_frontend_test(
fn_tree="torch.linalg.pinv",
dtype_and_input=_get_dtype_and_matrix(batch=True),
)
def test_torch_pinv(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
atol=1e-02,
rtol=1e-02,
)
# qr
@handle_frontend_test(
fn_tree="torch.linalg.qr",
dtype_and_input=_get_dtype_and_matrix(batch=True),
)
def test_torch_qr(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_input
ivy.set_backend(backend_fw)
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
A=x[0],
test_values=False,
)
ret = [ivy.to_numpy(x) for x in ret]
frontend_ret = [np.asarray(x) for x in frontend_ret]
q, r = ret
frontend_q, frontend_r = frontend_ret
assert_all_close(
ret_np=q @ r,
ret_from_gt_np=frontend_q @ frontend_r,
rtol=1e-2,
atol=1e-2,
ground_truth_backend=frontend,
backend=backend_fw,
)
ivy.previous_backend()
# slogdet
@handle_frontend_test(
fn_tree="torch.linalg.slogdet",
aliases=["torch.slogdet"],
dtype_and_x=_get_dtype_and_matrix(square=True, batch=True),
)
def test_torch_slogdet(
*,
dtype_and_x,
fn_tree,
frontend,
on_device,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
test_flags.num_positional_args = len(x)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
A=x[0],
)
# solve
@handle_frontend_test(
fn_tree="torch.linalg.solve",
A=helpers.get_first_solve_batch_matrix(),
B=helpers.get_second_solve_batch_matrix(choose_side=True),
)
def test_torch_solve(
*,
A,
B,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, A, _ = A
_, B, left = B
test_flags.num_positional_args = 2
helpers.test_frontend_function(
input_dtypes=[dtype, dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-4,
atol=1e-4,
A=A,
B=B,
left=left,
)
# solve_ex
@handle_frontend_test(
fn_tree="torch.linalg.solve_ex",
A=helpers.get_first_solve_batch_matrix(),
B=helpers.get_second_solve_batch_matrix(choose_side=True),
check_errors=st.booleans(),
)
def test_torch_solve_ex(
*,
A,
B,
check_errors,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, A, _ = A
_, B, left = B
helpers.test_frontend_function(
input_dtypes=[dtype, dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-4,
atol=1e-4,
A=A,
B=B,
left=left,
check_errors=check_errors,
)
# svd
@handle_frontend_test(
fn_tree="torch.linalg.svd",
dtype_and_x=_get_dtype_and_matrix(square=True),
full_matrices=st.booleans(),
)
def test_torch_svd(
*,
dtype_and_x,
full_matrices,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
x = np.asarray(x[0], dtype=dtype[0])
# make symmetric positive definite beforehand
x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
atol=1e-03,
rtol=1e-05,
A=x,
full_matrices=full_matrices,
)
ret = [ivy.to_numpy(x) for x in ret]
frontend_ret = [np.asarray(x) for x in frontend_ret]
u, s, vh = ret
frontend_u, frontend_s, frontend_vh = frontend_ret
assert_all_close(
ret_np=u @ np.diag(s) @ vh,
ret_from_gt_np=frontend_u @ np.diag(frontend_s) @ frontend_vh,
rtol=1e-2,
atol=1e-2,
ground_truth_backend=frontend,
backend=backend_fw,
)
# svdvals
@handle_frontend_test(
fn_tree="torch.linalg.svdvals",
dtype_and_x=_get_dtype_and_matrix(batch=True),
driver=st.sampled_from([None, "gesvd", "gesvdj", "gesvda"]),
)
def test_torch_svdvals(
*,
dtype_and_x,
driver,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
driver=driver,
A=x[0],
)
@handle_frontend_test(
fn_tree="torch.linalg.tensorinv", dtype_input_ind=_tensorinv_helper()
)
def test_torch_tensorinv(
*,
dtype_input_ind,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, ind = dtype_input_ind
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-04,
atol=1e-03,
input=x,
ind=ind,
)
@handle_frontend_test(
fn_tree="torch.linalg.tensorsolve",
a_and_b=_get_solve_matrices(),
)
def test_torch_tensorsolve(
*,
a_and_b,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, A, B = a_and_b
test_flags.num_positional_args = len(a_and_b) - 1
helpers.test_frontend_function(
input_dtypes=[input_dtype],
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-3,
rtol=1e-3,
A=A,
B=B,
)
@handle_frontend_test(
fn_tree="torch.linalg.vander",
dtype_and_input=_vander_helper(),
)
def test_torch_vander(
*,
dtype_and_input,
frontend,
fn_tree,
on_device,
test_flags,
backend_fw,
):
input_dtype, x, N = dtype_and_input
test_flags.num_positional_args = 1
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_flags=test_flags,
x=x[0],
N=N,
)
# vecdot
@handle_frontend_test(
fn_tree="torch.linalg.vecdot",
dtype_input_other_dim=dtype_value1_value2_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=3,
max_dim_size=3,
min_value=-1e3,
max_value=1e3,
abs_smallest_val=0.01,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
)
def test_torch_vecdot(
dtype_input_other_dim,
frontend,
test_flags,
fn_tree,
backend_fw,
):
dtype, input, other, dim = dtype_input_other_dim
test_flags.num_positional_args = len(dtype_input_other_dim) - 2
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
rtol=1e-2,
atol=1e-3,
x=input,
y=other,
dim=dim,
)
# vector_norm
@handle_frontend_test(
fn_tree="torch.linalg.vector_norm",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
valid_axis=True,
abs_smallest_val=1e04,
),
kd=st.booleans(),
ord=st.one_of(
helpers.ints(min_value=0, max_value=5),
helpers.floats(min_value=1.0, max_value=5.0),
st.sampled_from((float("inf"), -float("inf"))),
),
dtype=helpers.get_dtypes("valid", full=False),
)
def test_torch_vector_norm(
*,
dtype_values_axis,
kd,
ord,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, axis = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
ord=ord,
dim=axis,
keepdim=kd,
dtype=dtype[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py",
"repo_id": "ivy",
"token_count": 19122
} | 64 |
# global
import numpy as np
from hypothesis import strategies as st, assume
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers.hypothesis_helpers.general_helpers import (
two_broadcastable_shapes,
)
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
# float_power_helper
@st.composite
def _float_power_helper(draw, *, available_dtypes=None):
if available_dtypes is None:
available_dtypes = helpers.get_dtypes("numeric")
dtype1, x1 = draw(
helpers.dtype_and_values(
available_dtypes=available_dtypes,
small_abs_safety_factor=16,
large_abs_safety_factor=16,
safety_factor_scale="log",
)
)
dtype2 = draw(helpers.get_dtypes("numeric"))
if ivy.is_int_dtype(dtype2[0]):
min_value = 0
else:
min_value = -10
dtype2, x2 = draw(
helpers.dtype_and_values(
min_value=min_value,
max_value=10,
dtype=dtype2,
)
)
return (dtype1[0], dtype2[0]), (x1[0], x2[0])
@st.composite
def _get_clip_inputs(draw):
shape = draw(
helpers.get_shape(
min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10
)
)
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=shape,
)
)
min = draw(st.booleans())
if min:
max = draw(st.booleans())
min = draw(
helpers.array_values(
dtype=x_dtype[0], shape=shape, min_value=0, max_value=25
)
)
max = (
draw(
helpers.array_values(
dtype=x_dtype[0], shape=shape, min_value=26, max_value=50
)
)
if max
else None
)
else:
min = None
max = draw(
helpers.array_values(
dtype=x_dtype[0], shape=shape, min_value=26, max_value=50
)
)
return x_dtype, x, min, max
@st.composite
def _masked_fill_helper(draw):
shape_1, shape_2 = draw(two_broadcastable_shapes())
dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=shape_1,
)
)
_, mask = draw(
helpers.dtype_and_values(
dtype=["bool"],
shape=shape_2,
)
)
_, fill_value = draw(
helpers.dtype_and_values(
dtype=dtype,
shape=(),
)
)
return dtype[0], x[0], mask[0], fill_value[0]
# --- Main --- #
# ------------ #
# abs
@handle_frontend_test(
fn_tree="torch.abs",
aliases=["torch.absolute"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric", full=False).filter(
lambda x: "uint8" not in x[0]
and "int8" not in x[0]
and "uint16" not in x[0]
and "int16" not in x[0]
and "float16" not in x[0]
and "bfloat16" not in x[0]
),
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
)
def test_torch_abs(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# absolute
@handle_frontend_test(
fn_tree="torch.absolute",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
)
def test_torch_absolute(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
)
# acos
@handle_frontend_test(
fn_tree="torch.acos",
aliases=["torch.arccos"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_acos(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# acosh
@handle_frontend_test(
fn_tree="torch.acosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_acosh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# add
@handle_frontend_test(
fn_tree="torch.add",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
alpha=st.integers(min_value=1, max_value=5),
)
def test_torch_add(
*,
dtype_and_x,
alpha,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
input=x[0],
other=x[1],
alpha=alpha,
)
# addcdiv
@handle_frontend_test(
fn_tree="torch.addcdiv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=3,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
value=st.floats(min_value=-100, max_value=100),
)
def test_torch_addcdiv(
dtype_and_x,
value,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[2], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
input=x[0],
tensor1=x[1],
tensor2=x[2],
value=value,
atol=1e-03,
out=None,
)
# addcmul
@handle_frontend_test(
fn_tree="torch.addcmul",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=3,
min_value=-1e4,
max_value=1e4,
shared_dtype=True,
),
value=st.floats(min_value=-10, max_value=10),
)
def test_torch_addcmul(
dtype_and_x,
value,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
atol=1e-2,
input=x[0],
tensor1=x[1],
tensor2=x[2],
value=value,
out=None,
)
# angle
@handle_frontend_test(
fn_tree="torch.angle",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=["float64", "complex64", "complex128"],
),
)
def test_torch_angle(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# arccos
@handle_frontend_test(
fn_tree="torch.arccos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_arccos(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# arccosh
@handle_frontend_test(
fn_tree="torch.arccosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_arccosh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# arcsin
@handle_frontend_test(
fn_tree="torch.arcsin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_arcsin(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# arctan
@handle_frontend_test(
fn_tree="torch.arctan",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_arctan(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# arctan2
@handle_frontend_test(
fn_tree="torch.arctan2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
),
)
def test_torch_arctan2(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
# arctanh
@handle_frontend_test(
fn_tree="torch.arctanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_arctanh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# asin
@handle_frontend_test(
fn_tree="torch.asin",
aliases=["torch.arcsin"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_asin(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# asinh
@handle_frontend_test(
fn_tree="torch.asinh",
aliases=["torch.arcsinh"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_asinh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# atan
@handle_frontend_test(
fn_tree="torch.atan",
aliases=["torch.arctan"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_atan(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# atan2
@handle_frontend_test(
fn_tree="torch.atan2",
aliases=["torch.arctan2"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
),
)
def test_torch_atan2(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
# atanh
@handle_frontend_test(
fn_tree="torch.atanh",
aliases=["torch.arctanh"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_atanh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# bitwise_and
@handle_frontend_test(
fn_tree="torch.bitwise_and",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.just(("bool",)) | helpers.get_dtypes("integer"),
num_arrays=2,
),
)
def test_torch_bitwise_and(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
@handle_frontend_test(
fn_tree="torch.bitwise_left_shift",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
array_api_dtypes=True,
),
)
def test_torch_bitwise_left_shift(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
# negative shifts will throw an exception
# shifts >= dtype width produce backend-defined behavior
x[1] = np.asarray(
np.clip(x[1], 0, np.iinfo(input_dtype[1]).bits - 1), dtype=input_dtype[1]
)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
@handle_frontend_test(
fn_tree="torch.bitwise_not",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=1,
),
)
def test_torch_bitwise_not(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@handle_frontend_test(
fn_tree="torch.bitwise_xor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.just(("bool",)) | helpers.get_dtypes("integer"),
num_arrays=2,
),
)
def test_torch_bitwise_or(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
@handle_frontend_test(
fn_tree="torch.bitwise_right_shift",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
array_api_dtypes=True,
),
)
def test_torch_bitwise_right_shift(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
# negative shifts will throw an exception
# shifts >= dtype width produce backend-defined behavior
x[1] = np.asarray(
np.clip(x[1], 0, np.iinfo(input_dtype[1]).bits - 1), dtype=input_dtype[1]
)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
@handle_frontend_test(
fn_tree="torch.bitwise_or",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.just(("bool",)) | helpers.get_dtypes("integer"),
num_arrays=2,
),
)
def test_torch_bitwise_xor(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
# ceil
@handle_frontend_test(
fn_tree="torch.ceil",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_ceil(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# clamp
@handle_frontend_test(
fn_tree="torch.clamp",
aliases=["torch.clip"],
input_and_ranges=_get_clip_inputs(),
)
def test_torch_clamp(
*,
input_and_ranges,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
x_dtype, x, min, max = input_and_ranges
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
min=min,
max=max,
)
# clip
@handle_frontend_test(
fn_tree="torch.clip",
input_and_ranges=_get_clip_inputs(),
)
def test_torch_clip(
*,
input_and_ranges,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
x_dtype, x, min, max = input_and_ranges
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
min=min,
max=max,
)
# conj_physical
@handle_frontend_test(
fn_tree="torch.conj_physical",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_torch_conj_physical(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# copysign
@handle_frontend_test(
fn_tree="torch.copysign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
min_value=-100,
max_value=100,
shared_dtype=True,
),
)
def test_torch_copysign(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-03,
input=x[0],
other=x[1],
)
# cos
@handle_frontend_test(
fn_tree="torch.cos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_cos(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# cosh
@handle_frontend_test(
fn_tree="torch.cosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_cosh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# deg2rad
@handle_frontend_test(
fn_tree="torch.deg2rad",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-1000,
max_value=1000,
),
)
def test_torch_deg2rad(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# div
@handle_frontend_test(
fn_tree="torch.div",
aliases=["torch.divide"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
rounding_mode=st.sampled_from(["floor", "trunc"]) | st.none(),
)
def test_torch_div(
*,
dtype_and_x,
rounding_mode,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
# Absolute tolerance is 1,
# due to flooring can cause absolute error of 1 due to precision
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1,
input=x[0],
other=x[1],
rounding_mode=rounding_mode,
)
# erf
@handle_frontend_test(
fn_tree="torch.erf",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_erf(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# erfc
@handle_frontend_test(
fn_tree="torch.special.erfc",
aliases=["torch.erfc"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_erfc(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# exp
@handle_frontend_test(
fn_tree="torch.exp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_torch_exp(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# exp2
@handle_frontend_test(
fn_tree="torch.exp2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_exp2(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# expm1
@handle_frontend_test(
fn_tree="torch.expm1",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_expm1(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# flipud
@handle_frontend_test(
fn_tree="torch.flipud",
dtype_and_m=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
)
def test_torch_flipud(
*,
dtype_and_m,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, m = dtype_and_m
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=m[0],
)
@handle_frontend_test(
fn_tree="torch.float_power",
dtype_and_x=_float_power_helper(),
)
def test_torch_float_power(
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
# Making sure zero to the power of negative doesn't occur
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
input=x[0],
exponent=x[1],
)
# floor
@handle_frontend_test(
fn_tree="torch.floor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_floor(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
out=None,
)
# floor_divide
@handle_frontend_test(
fn_tree="torch.floor_divide",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
)
def test_torch_floor_divide(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
atol=1,
input=x[0],
other=x[1],
out=None,
)
# fmod
@handle_frontend_test(
fn_tree="torch.fmod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
min_value=-100,
max_value=100,
shared_dtype=True,
),
)
def test_torch_fmod(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-03,
x1=x[0],
x2=x[1],
)
# frac
@handle_frontend_test(
fn_tree="torch.frac",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_frac(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@handle_frontend_test(
fn_tree="torch.frexp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
shared_dtype=True,
min_value=-10,
max_value=10,
min_num_dims=1,
max_num_dims=1,
),
)
def test_torch_frexp(
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
)
# gradient
@handle_frontend_test(
fn_tree="torch.gradient",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
force_int_axis=True,
min_num_dims=1,
max_num_dims=3,
min_dim_size=2,
max_dim_size=4,
valid_axis=True,
),
spacing=helpers.ints(
min_value=-3,
max_value=3,
),
test_with_out=st.just(False),
)
def test_torch_gradient(
*,
dtype_input_axis,
spacing,
test_flags,
on_device,
fn_tree,
backend_fw,
frontend,
):
input_dtype, x, dim = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
spacing=spacing,
dim=dim,
)
# hypot
@handle_frontend_test(
fn_tree="torch.hypot",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
),
)
def test_torch_hypot(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
input=x[0],
other=x[1],
)
# i0
@handle_frontend_test(
fn_tree="torch.i0",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), num_arrays=1
),
)
def test_torch_i0(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-03,
input=x[0],
)
# igamma
@handle_frontend_test(
fn_tree="torch.igamma",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_value=2,
max_value=100,
),
test_with_out=st.just(False),
)
def test_torch_igamma(
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-04,
input=x[0],
other=x[1],
)
# imag
@handle_frontend_test(
fn_tree="torch.imag",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("complex"),
),
)
def test_torch_imag(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
)
# ldexp
@handle_frontend_test(
fn_tree="torch.ldexp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
min_value=-1e04,
max_value=1e04,
allow_inf=False,
),
)
def test_torch_ldexp(
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
input=x[0],
other=x[1],
)
# lerp
@handle_frontend_test(
fn_tree="torch.lerp",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", mixed_fn_compos=False),
num_arrays=3,
shared_dtype=True,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
min_value=-1e3,
max_value=1e3,
),
)
def test_torch_lerp(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, inputs = dtype_and_input
start, end, weight = inputs
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=start,
end=end,
weight=weight,
)
@handle_frontend_test(
fn_tree="torch.lgamma",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_lgamma(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, input = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
)
# log
@handle_frontend_test(
fn_tree="torch.log",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_log(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# log10
@handle_frontend_test(
fn_tree="torch.log10",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_log10(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# log1p
@handle_frontend_test(
fn_tree="torch.log1p",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-1e4,
max_value=1e4,
),
)
def test_torch_log1p(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
input=x[0],
out=None,
)
# log2
@handle_frontend_test(
fn_tree="torch.log2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_log2(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
)
# logaddexp
@handle_frontend_test(
fn_tree="torch.logaddexp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
min_value=-100,
max_value=100,
shared_dtype=True,
),
)
def test_torch_logaddexp(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-03,
x1=x[0],
x2=x[1],
)
# logaddexp2
@handle_frontend_test(
fn_tree="torch.logaddexp2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
min_value=-100,
max_value=100,
shared_dtype=True,
),
)
def test_torch_logaddexp2(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-02,
x1=x[0],
x2=x[1],
)
# logical and
@handle_frontend_test(
fn_tree="torch.logical_and",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2
),
)
def test_torch_logical_and(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
# logical not
@handle_frontend_test(
fn_tree="torch.logical_not",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=1
),
)
def test_torch_logical_not(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# logical or
@handle_frontend_test(
fn_tree="torch.logical_or",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2
),
)
def test_torch_logical_or(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
# logical xor
@handle_frontend_test(
fn_tree="torch.logical_xor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2
),
)
def test_torch_logical_xor(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
@handle_frontend_test(
fn_tree="torch.logit",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
min_dim_size=1,
),
eps=st.sampled_from([1e-05, -1e-05, None]),
)
def test_torch_logit(
*,
dtype_and_input,
eps,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, input = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
eps=eps,
out=None,
)
# masked_fill
@handle_frontend_test(
fn_tree="torch.masked_fill",
x_mask_val=_masked_fill_helper(),
)
def test_torch_masked_fill(
*, x_mask_val, on_device, fn_tree, frontend, test_flags, backend_fw
):
dtype, x, mask, val = x_mask_val
helpers.test_frontend_function(
input_dtypes=[dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
mask=mask,
value=val,
)
# mul
@handle_frontend_test(
fn_tree="torch.mul",
aliases=["torch.multiply"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
min_value=-1e04,
max_value=1e04,
allow_inf=False,
),
)
def test_torch_mul(
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
input=x[0],
other=x[1],
)
# mvlgamma
@handle_frontend_test(
fn_tree="torch.mvlgamma",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float")
),
p=helpers.ints(min_value=1, max_value=11),
)
def test_torch_mvlgamma(
*, dtype_and_input, frontend, test_flags, fn_tree, backend_fw, on_device, p
):
input_dtype, input = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
p=p,
)
@handle_frontend_test(
fn_tree="torch.nan_to_num",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
allow_nan=True,
allow_inf=True,
),
nan=st.floats(min_value=-100.0, max_value=100.0),
posinf=st.just(None) | st.floats(min_value=5e100, max_value=5e100),
neginf=st.just(None) | st.floats(min_value=-5e100, max_value=-5e100),
test_with_out=st.just(False),
)
def test_torch_nan_to_num(
*,
dtype_and_x,
nan,
posinf,
neginf,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
nan=nan,
posinf=posinf,
neginf=neginf,
)
# negative
@handle_frontend_test(
fn_tree="torch.negative",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
)
def test_torch_negative(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# nextafter
@handle_frontend_test(
fn_tree="torch.nextafter",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
)
def test_torch_nextafter(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
# negative
@handle_frontend_test(
fn_tree="torch.positive",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
)
def test_torch_positive(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@handle_frontend_test(
fn_tree="torch.pow",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
min_value=1,
max_value=7,
shared_dtype=True,
),
)
def test_torch_pow(
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
if "int" in input_dtype[0] and isinstance(x[1], int) and x[1] < 0:
x[1] = -x[1]
try:
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
exponent=x[1],
)
except Exception as e:
if any(
error_string in str(e)
for error_string in ["overflow", "too large to convert to"]
):
assume(False)
else:
raise
# rad2deg
@handle_frontend_test(
fn_tree="torch.rad2deg",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
max_dim_size=3,
max_num_dims=3,
min_dim_size=1,
min_num_dims=1,
),
)
def test_torch_rad2deg(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# real
@handle_frontend_test(
fn_tree="torch.real",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_torch_real(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
)
# reciprocal
@handle_frontend_test(
fn_tree="torch.reciprocal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=1,
),
)
def test_torch_reciprocal(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
input=x[0],
)
# remainder
@handle_frontend_test(
fn_tree="torch.remainder",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
)
def test_torch_remainder(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1,
input=x[0],
other=x[1],
)
# round
@handle_frontend_test(
fn_tree="torch.round",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
decimals=st.integers(min_value=0, max_value=5),
)
def test_torch_round(
dtype_and_x,
decimals,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
input=x[0],
decimals=decimals,
)
# rsqrt
@handle_frontend_test(
fn_tree="torch.rsqrt",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_rsqrt(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@handle_frontend_test(
fn_tree="torch.sgn",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_num_dims=1,
max_num_dims=1,
min_dim_size=1,
max_dim_size=1,
abs_smallest_val=1e-10,
min_value=-10,
max_value=10,
),
)
def test_torch_sgn(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, input = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
out=None,
)
# sigmoid
@handle_frontend_test(
fn_tree="torch.sigmoid",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_sigmoid(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# sign
@handle_frontend_test(
fn_tree="torch.sign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_torch_sign(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# signbit
@handle_frontend_test(
fn_tree="torch.signbit",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_torch_signbit(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# sin
@handle_frontend_test(
fn_tree="torch.sin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_sin(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# sinc
@handle_frontend_test(
fn_tree="torch.sinc",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_sinc(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# sinh
@handle_frontend_test(
fn_tree="torch.sinh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_sinh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# sqrt
@handle_frontend_test(
fn_tree="torch.sqrt",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
)
def test_torch_sqrt(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
)
# square
@handle_frontend_test(
fn_tree="torch.square",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_square(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
)
# subtract
@handle_frontend_test(
fn_tree="torch.subtract",
aliases=["torch.sub"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
alpha=st.integers(min_value=1, max_value=5),
)
def test_torch_subtract(
*,
dtype_and_x,
alpha,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
alpha=alpha,
)
# tan
@handle_frontend_test(
fn_tree="torch.tan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_tan(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# tanh
@handle_frontend_test(
fn_tree="torch.tanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_tanh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# true_divide
@handle_frontend_test(
fn_tree="torch.true_divide",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
)
def test_torch_true_divide(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
other=x[1],
)
# trunc
@handle_frontend_test(
fn_tree="torch.trunc",
aliases=["torch.fix"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_trunc(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# xlogy
@handle_frontend_test(
fn_tree="torch.xlogy",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
min_value=-100,
max_value=100,
shared_dtype=True,
),
)
def test_torch_xlogy(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-03,
input=x[0],
other=x[1],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_pointwise_ops.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_pointwise_ops.py",
"repo_id": "ivy",
"token_count": 33758
} | 65 |
"""Collection of tests for creation functions."""
# global
from hypothesis import strategies as st, assume
import numpy as np
import ivy
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test, BackendHandler
import ivy_tests.test_ivy.helpers.globals as test_globals
from ivy_tests.test_ivy.test_functional.test_core.test_dtype import astype_helper
# --- Helpers --- #
# --------------- #
@st.composite
def _asarray_helper(draw):
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=st.integers(min_value=1, max_value=10),
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
shared_dtype=True,
)
)
with BackendHandler.update_backend(test_globals.CURRENT_BACKEND) as ivy_backend:
x_list = ivy_backend.nested_map(lambda x: x.tolist(), x, shallow=False)
sh = draw(helpers.get_shape(min_num_dims=1))
sh = ivy_backend.Shape(sh)
# np_array = x[0]
# dim = draw(helpers.get_shape(min_num_dims=1))
# nested_values = draw(
# helpers.create_nested_input(dim, [sh, np_array, x_list[0]])
# )
dtype = draw(
helpers.get_castable_dtype(
draw(helpers.get_dtypes("numeric")), dtype=x_dtype[0]
)
)[-1]
dtype = draw(st.sampled_from([dtype, None]))
x = draw(
st.sampled_from(
[
x,
x_list,
sh,
# nested_values,
]
)
)
return x_dtype, x, dtype
@st.composite
def _dtype_and_values(draw):
return draw(
helpers.dtype_and_values(
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
dtype=draw(helpers.get_dtypes("numeric", full=False, key="dtype")),
)
)
@st.composite
def _dtype_indices_depth_axis(draw):
depth = draw(helpers.ints(min_value=2, max_value=100))
dtype, indices, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=0,
max_value=depth - 1,
small_abs_safety_factor=4,
ret_shape=True,
)
)
axis = draw(st.integers(min_value=-1, max_value=len(shape) - 1))
return dtype, indices, depth, axis
@st.composite
def _fill_value(draw):
dtype = draw(helpers.get_dtypes("numeric", full=False, key="dtype"))[0]
with BackendHandler.update_backend(test_globals.CURRENT_BACKEND) as ivy_backend:
if ivy_backend.is_uint_dtype(dtype):
return draw(helpers.ints(min_value=0, max_value=5))
if ivy_backend.is_int_dtype(dtype):
return draw(helpers.ints(min_value=-5, max_value=5))
return draw(helpers.floats(min_value=-5, max_value=5))
@st.composite
def _get_dtype_buffer_count_offset(draw):
dtype, value = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
)
)
value = np.array(value)
length = value.size
value = value.tobytes()
offset = draw(helpers.ints(min_value=0, max_value=length - 1))
count = draw(helpers.ints(min_value=-(2**30), max_value=length - offset))
if count == 0:
count = -1
offset = offset * np.dtype(dtype[0]).itemsize
return dtype, value, count, offset
@st.composite
def _on_off_dtype(draw):
dtype, value = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=(2,),
safety_factor_scale="log",
)
)
[on_value, off_value] = value[0]
return on_value, off_value, dtype[0]
# --- Main --- #
# ------------ #
def is_capsule(o):
t = type(o)
return t.__module__ == "builtins" and t.__name__ == "PyCapsule"
# arange
@handle_test(
fn_tree="functional.ivy.arange",
start=helpers.ints(min_value=0, max_value=50),
stop=helpers.ints(min_value=0, max_value=50) | st.none(),
step=helpers.ints(min_value=-50, max_value=50).filter(
lambda x: True if x != 0 else False
),
dtype=helpers.get_dtypes("numeric", full=False),
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_arange(
*,
start,
stop,
step,
dtype,
test_flags,
backend_fw,
fn_name,
on_device,
):
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
start=start,
stop=stop,
step=step,
dtype=dtype[0],
device=on_device,
)
# asarray
# TODO: Fix container, instance methods and as_variable
@handle_test(
fn_tree="functional.ivy.asarray",
x_dtype_x_and_dtype=_asarray_helper(),
test_gradients=st.just(False),
test_instance_method=st.just(False),
test_with_copy=st.just(True),
)
def test_asarray(
*,
x_dtype_x_and_dtype,
test_flags,
backend_fw,
fn_name,
on_device,
):
x_dtype, x, dtype = x_dtype_x_and_dtype
if isinstance(x, list) and len(x) == 1:
x = x[0]
assume(not test_flags.container[0])
# avoid casting complex to non-complex
if dtype is not None:
assume(not ("complex" in x_dtype[0] and "complex" not in dtype))
helpers.test_function(
input_dtypes=x_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
object_in=x,
dtype=dtype,
device=on_device,
)
# copy array
@handle_test(
fn_tree="functional.ivy.copy_array",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
to_ivy_array_bool=st.booleans(),
test_with_copy=st.just(True),
)
def test_copy_array(
*,
test_flags,
dtype_and_x,
to_ivy_array_bool,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
# avoid enabling gradients for non-float arrays
if test_flags.as_variable[0]:
assume("float" in dtype[0])
# smoke test
with BackendHandler.update_backend(backend_fw) as ivy_backend:
x = test_flags.apply_flags(
x, dtype, 0, backend=backend_fw, on_device=on_device
)[0]
test_flags.instance_method = (
test_flags.instance_method if not test_flags.native_arrays[0] else False
)
if test_flags.instance_method:
ret = x.copy_array(to_ivy_array=to_ivy_array_bool)
else:
ret = ivy_backend.copy_array(x, to_ivy_array=to_ivy_array_bool)
# type test
test_ret = ret
test_x = x
if test_flags.container[0]:
assert ivy_backend.is_ivy_container(ret)
test_ret = ret["a"]
test_x = x["a"]
if to_ivy_array_bool:
assert ivy_backend.is_ivy_array(test_ret)
else:
assert ivy_backend.is_native_array(test_ret)
# cardinality test
assert test_ret.shape == test_x.shape
# value test
x, ret = ivy_backend.to_ivy(x), ivy_backend.to_ivy(ret)
x_np, ret_np = helpers.flatten_and_to_np(
backend=backend_fw, ret=x
), helpers.flatten_and_to_np(backend=backend_fw, ret=ret)
helpers.value_test(
backend=backend_fw,
ground_truth_backend=backend_fw,
ret_np_flat=ret_np,
ret_np_from_gt_flat=x_np,
)
assert id(x) != id(ret)
# empty
@handle_test(
fn_tree="functional.ivy.empty",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
dtype=helpers.get_dtypes("numeric", full=False),
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_empty(*, shape, dtype, test_flags, backend_fw, fn_name, on_device):
ret = helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
shape=shape,
dtype=dtype[0],
device=on_device,
test_values=False,
return_flat_np_arrays=True,
)
helpers.assert_same_type_and_shape(ret)
# empty_like
@handle_test(
fn_tree="functional.ivy.empty_like",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
test_gradients=st.just(False),
)
def test_empty_like(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_and_x
ret = helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
dtype=dtype[0],
device=on_device,
test_values=False,
return_flat_np_arrays=True,
)
helpers.assert_same_type_and_shape(ret)
# eye
@handle_test(
n_rows=helpers.ints(min_value=0, max_value=10),
n_cols=st.none() | helpers.ints(min_value=0, max_value=10),
k=helpers.ints(min_value=-10, max_value=10),
batch_shape=st.lists(
helpers.ints(min_value=1, max_value=10), min_size=1, max_size=2
),
dtype=helpers.get_dtypes("valid", full=False),
fn_tree="functional.ivy.eye",
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_eye(
*, n_rows, n_cols, k, batch_shape, dtype, test_flags, backend_fw, fn_name, on_device
):
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
on_device=on_device,
fn_name=fn_name,
n_rows=n_rows,
n_cols=n_cols,
k=k,
batch_shape=batch_shape,
dtype=dtype[0],
device=on_device,
)
# from_dlpack
@handle_test(
fn_tree="functional.ivy.from_dlpack",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(kind="float", full=False, key="dtype"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
test_gradients=st.just(False),
)
def test_from_dlpack(*, dtype_and_x, backend_fw):
if backend_fw == "numpy":
return
ivy.set_backend(backend_fw)
input_dtype, x = dtype_and_x
native_array = ivy.native_array(x[0])
cap = ivy.to_dlpack(native_array)
array = ivy.from_dlpack(cap)
assert ivy.is_native_array(array)
@handle_test(
fn_tree="functional.ivy.frombuffer",
dtype_buffer_count_offset=_get_dtype_buffer_count_offset(),
test_instance_method=st.just(False),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_frombuffer(
dtype_buffer_count_offset, test_flags, backend_fw, fn_name, on_device
):
input_dtype, buffer, count, offset = dtype_buffer_count_offset
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
buffer=buffer,
dtype=input_dtype[0],
count=count,
offset=offset,
)
# full
@handle_test(
fn_tree="functional.ivy.full",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
fill_value=_fill_value(),
dtypes=helpers.get_dtypes("valid", full=False),
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_full(*, shape, fill_value, dtypes, test_flags, backend_fw, fn_name, on_device):
if dtypes[0].startswith("uint") and fill_value < 0:
fill_value = -fill_value
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
shape=shape,
fill_value=fill_value,
dtype=dtypes[0],
device=on_device,
)
# full_like
@handle_test(
fn_tree="functional.ivy.full_like",
dtype_and_x=_dtype_and_values(),
dtypes=helpers.get_dtypes("valid", full=False),
fill_value=_fill_value(),
test_gradients=st.just(False),
)
def test_full_like(
*, dtype_and_x, dtypes, fill_value, test_flags, backend_fw, fn_name, on_device
):
dtype, x = dtype_and_x
if dtypes[0].startswith("uint") and fill_value < 0:
fill_value = -fill_value
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
fill_value=fill_value,
dtype=dtype[0],
device=on_device,
)
# linspace
@handle_test(
fn_tree="functional.ivy.linspace",
dtype_and_start_stop_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=-1e5,
max_value=1e5,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
allow_inf=False,
shared_dtype=True,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
valid_axis=True,
force_int_axis=True,
),
dtype=helpers.get_dtypes("float", full=False),
num=helpers.ints(min_value=1, max_value=5),
endpoint=st.booleans(),
)
def test_linspace(
*,
dtype_and_start_stop_axis,
num,
endpoint,
dtype,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtypes, start_stop, axis = dtype_and_start_stop_axis
helpers.test_function(
input_dtypes=input_dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=0.8,
start=start_stop[0],
stop=start_stop[1],
num=num,
axis=axis,
endpoint=endpoint,
dtype=dtype[0],
device=on_device,
)
# logspace
@handle_test(
fn_tree="functional.ivy.logspace",
dtype_and_start_stop_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=-1e5,
max_value=1e5,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
allow_inf=False,
shared_dtype=True,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
valid_axis=True,
force_int_axis=True,
),
dtype=helpers.get_dtypes("float", full=False),
num=helpers.ints(min_value=1, max_value=5),
base=helpers.floats(min_value=0.1, max_value=20.0),
endpoint=st.booleans(),
)
def test_logspace(
*,
dtype_and_start_stop_axis,
dtype,
num,
base,
endpoint,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtypes, start_stop, axis = dtype_and_start_stop_axis
helpers.test_function(
input_dtypes=input_dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1, # if It's less than one it'll test for inf
atol_=0.8,
start=start_stop[0],
stop=start_stop[1],
num=num,
base=base,
axis=axis,
endpoint=endpoint,
dtype=dtype[0],
device=on_device,
)
# meshgrid
@handle_test(
fn_tree="functional.ivy.meshgrid",
dtype_and_arrays=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=st.integers(min_value=2, max_value=5),
min_num_dims=1,
max_num_dims=1,
shared_dtype=True,
),
sparse=st.booleans(),
indexing=st.sampled_from(["xy", "ij"]),
test_with_out=st.just(False),
)
def test_meshgrid(
*, dtype_and_arrays, test_flags, sparse, indexing, backend_fw, fn_name, on_device
):
dtype, arrays = dtype_and_arrays
kw = {}
i = 0
for x_ in arrays:
kw[f"x{i}"] = x_
i += 1
test_flags.num_positional_args = len(arrays)
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
**kw,
sparse=sparse,
indexing=indexing,
)
# native_array
@handle_test(
fn_tree="functional.ivy.native_array",
dtype_and_x_and_cast_dtype=astype_helper(),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_native_array(
*,
dtype_and_x_and_cast_dtype,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, x, dtype = dtype_and_x_and_cast_dtype
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
dtype=dtype[0],
device=on_device,
)
# one_hot
@handle_test(
fn_tree="functional.ivy.one_hot",
dtype_indices_depth_axis=_dtype_indices_depth_axis(),
on_off_dtype=_on_off_dtype(),
test_gradients=st.just(False),
)
def test_one_hot(
dtype_indices_depth_axis, on_off_dtype, test_flags, backend_fw, fn_name, on_device
):
input_dtype, indices, depth, axis = dtype_indices_depth_axis
on_value, off_value, dtype = on_off_dtype
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
indices=indices[0],
depth=depth,
on_value=on_value,
off_value=off_value,
axis=axis,
dtype=dtype,
)
# ones
@handle_test(
fn_tree="functional.ivy.ones",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
dtype=helpers.get_dtypes("numeric", full=False),
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_ones(*, shape, dtype, test_flags, backend_fw, fn_name, on_device):
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
shape=shape,
dtype=dtype[0],
device=on_device,
)
# ones_like
@handle_test(
fn_tree="functional.ivy.ones_like",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_ones_like(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
dtype=dtype[0],
device=on_device,
)
# to_dlpack
@handle_test(
fn_tree="functional.ivy.to_dlpack",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(kind="float", full=False, key="dtype"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
test_gradients=st.just(False),
)
def test_to_dlpack(*, dtype_and_x, backend_fw):
ivy.set_backend(backend_fw)
input_dtype, x = dtype_and_x
native_array = ivy.native_array(x[0])
cap = ivy.to_dlpack(native_array)
assert is_capsule(cap)
# tril
@handle_test(
fn_tree="functional.ivy.tril",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
k=helpers.ints(min_value=-10, max_value=10),
)
def test_tril(*, dtype_and_x, k, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
k=k,
)
# triu
@handle_test(
fn_tree="functional.ivy.triu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
k=helpers.ints(min_value=-10, max_value=10),
)
def test_triu(*, dtype_and_x, k, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
k=k,
)
@handle_test(
fn_tree="functional.ivy.triu_indices",
n_rows=st.integers(min_value=0, max_value=5),
n_cols=st.integers(min_value=0, max_value=5) | st.just(None),
k=st.integers(min_value=-5, max_value=5),
input_dtype=helpers.get_dtypes("integer"),
test_with_out=st.just(False),
test_gradients=st.just(False),
test_instance_method=st.just(False),
)
def test_triu_indices(
*, n_rows, n_cols, k, input_dtype, test_flags, backend_fw, fn_name, on_device
):
input_dtype = input_dtype
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
on_device=on_device,
fn_name=fn_name,
n_rows=n_rows,
n_cols=n_cols,
k=k,
)
# zeros
@handle_test(
fn_tree="functional.ivy.zeros",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
dtype=helpers.get_dtypes("valid", full=False),
test_instance_method=st.just(False),
test_gradients=st.just(False),
)
def test_zeros(*, shape, dtype, test_flags, backend_fw, fn_name, on_device):
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
shape=shape,
dtype=dtype[0],
device=on_device,
)
# zeros_like
@handle_test(
fn_tree="functional.ivy.zeros_like",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
test_gradients=st.just(False),
)
def test_zeros_like(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
dtype=dtype[0],
device=on_device,
)
| ivy/ivy_tests/test_ivy/test_functional/test_core/test_creation.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_creation.py",
"repo_id": "ivy",
"token_count": 11679
} | 66 |
# global
from hypothesis import strategies as st
# local
import ivy
import numpy as np
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_method
# --- Helpers --- #
# --------------- #
@st.composite
def _sparse_bsc_indices_values_shape(draw):
nblockrows = draw(helpers.ints(min_value=2, max_value=5))
nblockcols = draw(helpers.ints(min_value=2, max_value=5))
dim1 = draw(helpers.ints(min_value=2, max_value=5))
dim2 = draw(helpers.ints(min_value=3, max_value=5))
value_dtype = draw(helpers.get_dtypes("numeric", full=False))[0]
ccol_indices, row_indices, values = (
[0],
[],
[
[
[],
],
],
)
for _ in range(dim2):
index = draw(
helpers.ints(
min_value=max(ccol_indices[-1] + 1, 1),
max_value=ccol_indices[-1] + dim1,
)
)
cur_num_elem = index - ccol_indices[-1]
row_indices += list(range(cur_num_elem))
ccol_indices.append(index)
shape = (dim1 * nblockrows, dim2 * nblockcols)
values = draw(
helpers.array_values(
dtype=value_dtype,
shape=(ccol_indices[-1], nblockrows, nblockcols),
min_value=0,
)
)
return ccol_indices, row_indices, value_dtype, values, shape
@st.composite
def _sparse_bsr_indices_values_shape(draw):
nblockrows = draw(helpers.ints(min_value=2, max_value=5))
nblockcols = draw(helpers.ints(min_value=2, max_value=5))
dim1 = draw(helpers.ints(min_value=3, max_value=5))
dim2 = draw(helpers.ints(min_value=2, max_value=5))
value_dtype = draw(helpers.get_dtypes("numeric", full=False))[0]
crow_indices, col_indices, values = (
[0],
[],
[
[
[],
],
],
)
for _ in range(dim1):
index = draw(
helpers.ints(
min_value=max(crow_indices[-1] + 1, 1),
max_value=crow_indices[-1] + dim2,
)
)
cur_num_elem = index - crow_indices[-1]
col_indices += list(range(cur_num_elem))
crow_indices.append(index)
shape = (dim1 * nblockrows, dim2 * nblockcols)
values = draw(
helpers.array_values(
dtype=value_dtype,
shape=(crow_indices[-1], nblockrows, nblockcols),
min_value=0,
)
)
return crow_indices, col_indices, value_dtype, values, shape
@st.composite
def _sparse_coo_indices_values_shape(draw):
num_elem = draw(helpers.ints(min_value=2, max_value=8))
dim1 = draw(helpers.ints(min_value=2, max_value=5))
dim2 = draw(helpers.ints(min_value=5, max_value=10))
value_dtype = draw(helpers.get_dtypes("numeric", full=False))[0]
coo_indices = draw(
helpers.array_values(
dtype="int64",
shape=(2, num_elem),
min_value=0,
max_value=dim1,
exclude_min=False,
)
)
values = draw(helpers.array_values(dtype=value_dtype, shape=(num_elem,)))
shape = (dim1, dim2)
return coo_indices, value_dtype, values, shape
@st.composite
def _sparse_csc_indices_values_shape(draw):
num_elem = draw(helpers.ints(min_value=2, max_value=8))
dim1 = draw(helpers.ints(min_value=5, max_value=10))
dim2 = draw(helpers.ints(min_value=2, max_value=5))
value_dtype = draw(helpers.get_dtypes("numeric", full=False))[0]
values = draw(helpers.array_values(dtype=value_dtype, shape=(num_elem,)))
row_indices = draw(
helpers.array_values(
dtype="int64",
shape=(num_elem,),
min_value=0,
max_value=dim1,
exclude_min=False,
)
)
indices = draw(
helpers.array_values(
dtype="int64",
shape=(dim2 - 1,),
min_value=0,
max_value=num_elem,
exclude_min=False,
)
)
ccol_indices = [0] + sorted(indices) + [num_elem]
shape = (dim1, dim2)
return ccol_indices, row_indices, value_dtype, values, shape
@st.composite
def _sparse_csr_indices_values_shape(draw):
num_elem = draw(helpers.ints(min_value=2, max_value=8))
dim1 = draw(helpers.ints(min_value=2, max_value=5))
dim2 = draw(helpers.ints(min_value=5, max_value=10))
value_dtype = draw(helpers.get_dtypes("numeric", full=False))[0]
values = draw(helpers.array_values(dtype=value_dtype, shape=(num_elem,)))
col_indices = draw(
helpers.array_values(
dtype="int64",
shape=(num_elem,),
min_value=0,
max_value=dim2,
exclude_min=False,
)
)
indices = draw(
helpers.array_values(
dtype="int64",
shape=(dim1 - 1,),
min_value=0,
max_value=num_elem,
exclude_min=False,
)
)
crow_indices = [0] + sorted(indices) + [num_elem]
shape = (dim1, dim2)
return crow_indices, col_indices, value_dtype, values, shape
# --- Main --- #
# ------------ #
# adding sparse array to dense array
@handle_method(
init_tree="ivy.array",
method_tree="Array.__add__",
sparse_data=_sparse_coo_indices_values_shape(),
)
def test_array_add_sparse(
sparse_data,
method_name,
class_name,
on_device,
):
coo_ind, val_dtype, val, shp = sparse_data
# set backed to 'torch' as this is the only backend which supports sparse arrays
ivy.set_backend("torch")
# initiate a sparse array
sparse_inst = ivy.sparse_array.SparseArray(
coo_indices=coo_ind,
values=val,
dense_shape=shp,
format="coo",
)
# create an Array instance
array_class = getattr(ivy, class_name)
x = np.random.random_sample(shp)
x = ivy.array(x, dtype=val_dtype, device=on_device)
# call add method
add_method = getattr(array_class, method_name)
res = add_method(x, sparse_inst)
# make sure the result is an Array instance
assert isinstance(res, array_class)
# bsc - to_dense_array
@handle_method(
method_tree="SparseArray.to_dense_array",
sparse_data=_sparse_bsc_indices_values_shape(),
method_num_positional_args=st.just(0), # TODO should not be hardcoded
init_num_positional_args=st.just(0), # TODO should not be hardcoded
)
def test_sparse_bsc(
sparse_data,
class_name,
method_name,
on_device,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
):
ccol_indices, row_indices, value_dtype, values, shape = sparse_data
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
init_input_dtypes=["int64", "int64", value_dtype],
init_all_as_kwargs_np={
"ccol_indices": ccol_indices,
"row_indices": row_indices,
"values": values,
"dense_shape": shape,
"format": "bsc",
},
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
# bsr - to_dense_array
@handle_method(
method_tree="SparseArray.to_dense_array",
sparse_data=_sparse_bsr_indices_values_shape(),
method_num_positional_args=st.just(0), # TODO should not be hardcoded
init_num_positional_args=st.just(0), # TODO should not be hardcoded
)
def test_sparse_bsr(
sparse_data,
class_name,
method_name,
on_device,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
):
crow_indices, col_indices, value_dtype, values, shape = sparse_data
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
on_device=on_device,
method_flags=method_flags,
init_input_dtypes=["int64", "int64", value_dtype],
init_all_as_kwargs_np={
"crow_indices": crow_indices,
"col_indices": col_indices,
"values": values,
"dense_shape": shape,
"format": "bsr",
},
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
# coo - to_dense_array
@handle_method(
method_tree="SparseArray.to_dense_array",
sparse_data=_sparse_coo_indices_values_shape(),
method_num_positional_args=st.just(0), # TODO should not be hardcoded
init_num_positional_args=st.just(0), # TODO should not be hardcoded
)
def test_sparse_coo(
sparse_data,
class_name,
method_name,
backend_fw,
init_flags,
method_flags,
on_device,
ground_truth_backend,
):
coo_ind, val_dtype, val, shp = sparse_data
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
init_input_dtypes=["int64", val_dtype],
init_all_as_kwargs_np={
"coo_indices": coo_ind,
"values": val,
"dense_shape": shp,
"format": "coo",
},
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
# csc - to_dense_array
@handle_method(
method_tree="SparseArray.to_dense_array",
sparse_data=_sparse_csc_indices_values_shape(),
method_num_positional_args=st.just(0), # TODO should not be hardcoded
init_num_positional_args=st.just(0), # TODO should not be hardcoded
)
def test_sparse_csc(
sparse_data,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
on_device,
method_flags,
):
ccol_indices, row_indices, value_dtype, values, shape = sparse_data
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
init_input_dtypes=["int64", "int64", value_dtype],
init_all_as_kwargs_np={
"ccol_indices": ccol_indices,
"row_indices": row_indices,
"values": values,
"dense_shape": shape,
"format": "csc",
},
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
# csr - to_dense_array
@handle_method(
method_tree="SparseArray.to_dense_array",
sparse_data=_sparse_csr_indices_values_shape(),
method_num_positional_args=st.just(0), # TODO should not be hardcoded
init_num_positional_args=st.just(0), # TODO should not be hardcoded
)
def test_sparse_csr(
sparse_data,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
on_device,
method_flags,
):
crow_indices, col_indices, value_dtype, values, shape = sparse_data
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
init_input_dtypes=["int64", "int64", value_dtype],
init_all_as_kwargs_np={
"crow_indices": crow_indices,
"col_indices": col_indices,
"values": values,
"dense_shape": shape,
"format": "csr",
},
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
| ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_sparse_array.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_sparse_array.py",
"repo_id": "ivy",
"token_count": 5769
} | 67 |
import pickle
import numpy as np
import os
from hypothesis import given, assume
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
# pickling array test to disk
@given(
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
min_num_dims=0,
max_num_dims=5,
min_dim_size=0,
max_dim_size=5,
),
)
def test_pickle_to_and_from_disk(dtype_and_x, on_device, backend_fw):
ivy.set_backend(backend_fw)
input_dtype, x = dtype_and_x
assume("bfloat16" not in input_dtype)
x = ivy.array(x[0], dtype=input_dtype[0], device=on_device)
# paddle tensors can't be pickled directly as referenced in this issue
# https://github.com/PaddlePaddle/Paddle/issues/41107
if ivy.backend == "paddle":
x = x.to_numpy()
save_filepath = "ivy_array.pickle"
pickle.dump(x, open(save_filepath, "wb"))
assert os.path.exists(save_filepath)
unpickled_arr = pickle.load(open(save_filepath, "rb"))
os.remove(save_filepath)
# check for equality
assert np.allclose(ivy.to_numpy(x), ivy.to_numpy(unpickled_arr))
ivy.previous_backend()
# Tests #
# ------#
# pickling array test to str
@given(
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
min_num_dims=0,
max_num_dims=5,
min_dim_size=0,
max_dim_size=5,
),
)
def test_pickle_to_string(dtype_and_x, on_device, backend_fw):
ivy.set_backend(backend_fw)
input_dtype, x = dtype_and_x
assume("bfloat16" not in input_dtype)
x = ivy.array(x[0], dtype=input_dtype[0], device=on_device)
# paddle tensors can't be pickled directly as referenced in this issue
# https://github.com/PaddlePaddle/Paddle/issues/41107
if ivy.backend == "paddle":
x = x.to_numpy()
pickled_arr = pickle.dumps(x)
unpickled_arr = pickle.loads(pickled_arr)
# check for equality
assert np.allclose(ivy.to_numpy(x), ivy.to_numpy(unpickled_arr))
ivy.previous_backend()
| ivy/ivy_tests/test_ivy/test_misc/test_pickling.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_pickling.py",
"repo_id": "ivy",
"token_count": 937
} | 68 |
import os
if __name__ == "__main__":
tag = os.environ["TAG"]
python_tag, abi_tag, plat_name = tag.split("-")
if os.path.exists("dist"):
for file in os.listdir("dist"):
old_name = f"{python_tag}-none-{plat_name}.whl"
new_name = f"{python_tag}-{abi_tag}-{plat_name}.whl"
if file.endswith(old_name):
os.rename(
os.path.join("dist", file),
os.path.join("dist", file[: -len(old_name)] + new_name),
)
| ivy/scripts/rename_wheels.py/0 | {
"file_path": "ivy/scripts/rename_wheels.py",
"repo_id": "ivy",
"token_count": 293
} | 69 |
import sys
backends = ["numpy", "jax", "tensorflow", "torch"]
submodules = [
"activations",
"converters",
"initializers",
"layers",
"modules",
"norms",
"optimizers",
"sequential",
]
run = int(sys.argv[1])
N = len(backends)
M = len(submodules)
num_tests = N * M
run %= num_tests
i = run // M
j = run % M
backend = backends[i]
submodule = submodules[j]
with open("./fwsubmod.txt", "w") as outfile:
outfile.write(f"{backend}-{submodule}")
with open("./backend.txt", "w") as f:
f.write(f"{backend}")
with open("./submodule.txt", "w") as f:
f.write(f"test_{submodule}")
| ivy/scripts/setup_tests/run_ivy_stateful_test.py/0 | {
"file_path": "ivy/scripts/setup_tests/run_ivy_stateful_test.py",
"repo_id": "ivy",
"token_count": 281
} | 70 |
#!/bin/bash -e
docker run --rm --env REDIS_URL="$3" --env REDIS_PASSWD="$4" -v "$(pwd)":/ivy -v "$(pwd)"/.hypothesis:/.hypothesis unifyai/ivy:latest python3 -m pytest --backend "$1" ivy_tests/test_ivy/test_stateful/"$2".py --tb=line
| ivy/scripts/shell/test_ivy_stateful.sh/0 | {
"file_path": "ivy/scripts/shell/test_ivy_stateful.sh",
"repo_id": "ivy",
"token_count": 105
} | 71 |
{
"name": "Ivy GPU Development Environment (build)",
"build": {
"dockerfile": "../../docker/DockerfileGPU",
"context": "../..",
"args": {
"IMAGE_NAME": "unifyai/ivy",
"IMAGE_TAG": "latest-gpu"
}
},
"runArgs": ["--gpus","all"],
"customizations": {
"vscode": {
"extensions": [
"ms-python.python"
],
"settings": {
"python.defaultInterpreterPath": "/opt/miniconda/envs/multienv/bin/python3"
}
}
},
"postCreateCommand": {
"post_create": "bash .devcontainer/post_create_commands.sh",
"bashrc": "echo \"alias python=python3\" >> ~/.bashrc"
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment the next line to run commands after the container is created - for example installing curl.
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
// "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
// "remoteUser": "vscode",
"features": {
"ghcr.io/devcontainers/features/common-utils:2": {
"installzsh": true,
"configurezshasdefaultshell": true,
"installohmyzsh": true,
"upgradePackages": false
},
"ghcr.io/devcontainers/features/docker-outside-of-docker:1": {
"moby": true,
"installdockerbuildx": true,
"version": "20.10",
"dockerdashcomposeversion": "v2"
},
"ghcr.io/devcontainers/features/github-cli:1": {
"installDirectlyFromGitHubRelease": true,
"version": "latest"
}
}
}
| ivy/.devcontainer/build_gpu/devcontainer.json/0 | {
"file_path": "ivy/.devcontainer/build_gpu/devcontainer.json",
"repo_id": "ivy",
"token_count": 756
} | 0 |
FROM ubuntu:20.04
WORKDIR /ivy
ARG CLI
FROM unifyai/ivy:latest as base
COPY requirements/optional_applied.txt .
RUN pip3 install --no-cache-dir -r optional_applied.txt
| ivy/docker/DockerfileApplied/0 | {
"file_path": "ivy/docker/DockerfileApplied",
"repo_id": "ivy",
"token_count": 67
} | 1 |
{% extends "top_level_module.rst" %}
{% set base_module = fullname + "." + fullname.split('.')[-1] %}
{%- block module_name -%}
{{base_module}}
{%- endblock -%}
{%- block discussion_module_name -%}
{{base_module}}
{%- endblock -%}
{% block toctree -%}
.. autosummary::
:toctree: {{name}}
:template: data_module.rst
:hide-table:
:recursive:
{% for submodule in modules -%}
{% if base_module != submodule %}
{{ submodule }}
{% endif -%}
{% endfor -%}
{% endblock %}
| ivy/docs/_templates/top_data_module.rst/0 | {
"file_path": "ivy/docs/_templates/top_data_module.rst",
"repo_id": "ivy",
"token_count": 209
} | 2 |
Deep Dive
=========
.. _`issues`: https://github.com/unifyai/ivy/issues
.. _`pull-requests`: https://github.com/unifyai/ivy/pulls
For general users of the framework, who are mainly concerned with learning how to *use* Ivy, then the `Design <design.rst>`_ section is the best place to start 🙂
This *deep dive* section is more targeted at people who would like to dive deeper into how Ivy actually works under the hood 🔧
Going through the sections outlined below will get you right into the weeds of the framework 🌱, and hopefully give you a better understanding of what is actually going on behind the scenes 🎬
It's best to go through the sub-sections from start to finish, but you can also dive in at any stage!
We're excited for you to get involved! 🦾
| (a) `Navigating the Code <deep_dive/navigating_the_code.rst>`_ 🧭
| A quick tour through the codebase
|
| (b) `Function Types <deep_dive/function_types.rst>`_ 🧮
| Primary, compositional, mixed, and nestable functions
|
| (c) `Superset Behaviour <deep_dive/superset_behaviour.rst>`_ ⊃
| Ivy goes for the superset when unifying the backend functions
|
| (d) `Backend Setting <deep_dive/backend_setting.rst>`_ ⚙
| How the backend is set, and what this means for each function type️
|
| (e) `Arrays <deep_dive/arrays.rst>`_ 🔢
| Different types of arrays, and how they're handled
|
| (f) `Containers <deep_dive/containers.rst>`_ 🗂
| What the :class:`ivy.Container` does
|
| (g) `Data Types <deep_dive/data_types.rst>`_ 💾
| How functions infer the correct data type
|
| (h) `Devices <deep_dive/devices.rst>`_ 📱
| How functions infer the correct device
|
| (i) `Inplace Updates <deep_dive/inplace_updates.rst>`_ 🎯
| How the :code:`out` argument is used to specify the output target
|
| (j) `Function Wrapping <deep_dive/function_wrapping.rst>`_ 🎁
| How functions are dynamically wrapped at runtime
|
| (k) `Formatting <deep_dive/formatting.rst>`_ 📋
| How the code is automatically formatted
|
| (l) `Ivy Lint <deep_dive/ivy_lint.rst>`_ 🧹
| Ivy's Custom Code Formatters
|
| (m) `Function Arguments <deep_dive/function_arguments.rst>`_ 📑
| How to add the correct function arguments
|
| (n) `Docstrings <deep_dive/docstrings.rst>`_ 📄
| How to properly write docstrings
|
| (o) `Docstring Examples <deep_dive/docstring_examples.rst>`_ 💯
| How to add useful examples to the docstrings
|
| (p) `Array API Tests <deep_dive/array_api_tests.rst>`_ 🤝
| How we're borrowing the test suite from the Array API Standard
|
| (q) `Ivy Tests <deep_dive/ivy_tests.rst>`_ 🧪
| How to add new tests for each Ivy function
|
| (r) `Ivy Frontends <deep_dive/ivy_frontends.rst>`_ ➡
| How to implement frontend functions
|
| (s) `Ivy Frontend Tests <deep_dive/ivy_frontends_tests.rst>`_ 🧪
| How to add new tests for each frontend function
|
| (t) `Exception Handling <deep_dive/exception_handling.rst>`_ ⚠
| How to handle exceptions and assertions in a function
|
| (u) `Continuous Integration <deep_dive/continuous_integration.rst>`_ 🔁
| Ivy Tests running on the Repository
|
| (v) `Gradients <deep_dive/gradients.rst>`_ 🔁
| Everything about our Gradients API
|
| (w) `Operating Modes <deep_dive/operating_modes.rst>`_ 🧮
| Everything about modes Ivy can operate in, along with their purposes
|
| (x) `Building the Docs Pipeline <deep_dive/building_the_docs_pipeline.rst>`_ 📚
| How are we building our docs
.. toctree::
:hidden:
:maxdepth: -1
:caption: Deep Dive
deep_dive/navigating_the_code.rst
deep_dive/function_types.rst
deep_dive/superset_behaviour.rst
deep_dive/backend_setting.rst
deep_dive/arrays.rst
deep_dive/containers.rst
deep_dive/data_types.rst
deep_dive/devices.rst
deep_dive/inplace_updates.rst
deep_dive/function_wrapping.rst
deep_dive/formatting.rst
deep_dive/ivy_lint.rst
deep_dive/function_arguments.rst
deep_dive/docstrings.rst
deep_dive/docstring_examples.rst
deep_dive/array_api_tests.rst
deep_dive/ivy_tests.rst
deep_dive/ivy_frontends.rst
deep_dive/ivy_frontends_tests.rst
deep_dive/exception_handling.rst
deep_dive/continuous_integration.rst
deep_dive/gradients.rst
deep_dive/operating_modes.rst
deep_dive/building_the_docs_pipeline.rst
deep_dive/fix_failing_tests.rst
| ivy/docs/overview/deep_dive.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive.rst",
"repo_id": "ivy",
"token_count": 1605
} | 3 |
Function Wrapping
=================
.. _`wrapped`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/utils/backend/handler.py#L259
.. _`_wrap_function`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L965
.. _`abs`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/functional/ivy/elementwise.py#L28
.. _`creation submodule`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/functional/ivy/creation.py
.. _`zeros`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/functional/ivy/creation.py#L482
.. _`asarray`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/functional/ivy/creation.py#L383
.. _`inputs_to_native_arrays`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L405
.. _`inputs_to_ivy_arrays`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L445
.. _`outputs_to_ivy_arrays`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L525
.. _`to_native_arrays_and_back`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L595
.. _`infer_dtype`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L725
.. _`infer_device`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L763
.. _`handle_out_argument`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L800
.. _`handle_nestable`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L896
.. _`inputs_to_native_shapes`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L488
.. _`outputs_to_ivy_shapes`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L501
.. _`to_native_shapes_and_back`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L514
.. _`handle_view`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L627
.. _`handle_view_indexing`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L659
.. _`handle_array_function`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L299
.. _`handle_complex_input`: https://github.com/unifyai/ivy/blob/bd9b5b1080d33004e821a48c486b3a879b9d6616/ivy/func_wrapper.py#L1393
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`function wrapping thread`: https://discord.com/channels/799879767196958751/1189906704775794688
.. _`handle_partial_mixed_function`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L944
.. _`stored as an attribute`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/func_wrapper.py#L1054
.. _`ivy.linear`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/functional/ivy/layers.py#L81
.. _`handle_exceptions`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/utils/exceptions.py#L189
.. _`example`: https://github.com/unifyai/ivy/blob/5658401b266352d3bf72c95e4af6ae9233115722/ivy/functional/backends/torch/layers.py#L30
.. _`Arrays`: arrays.rst
.. _`Inplace Updates`: inplace_updates.rst
.. _`Data Types`: data_types.rst
.. _`Devices`: devices.rst
.. _`Backend Setting`: backend_setting.rst
When a backend framework is set by calling :code:`ivy.set_backend(backend_name)`, then all Ivy functions are `wrapped`_.
This is achieved by calling `_wrap_function`_, which will apply the appropriate wrapping to the given function, based on what decorators it has.
For example, `abs`_ has the decorators :code:`@to_native_arrays_and_back` and :code:`@handle_out_argument`, and so the backend implementations will also be wrapped with the `to_native_arrays_and_back`_ and `handle_out_argument`_ wrappers.
The new function returned by :code:`_wrap_function` is a replacement of the original function with extra code added to support requirements common to many functions in the API.
This is the main purpose of the wrapping, to avoid code duplication which would exist if we added identical logic in every single function independently.
Depending on the function being wrapped, the new function might handle `Arrays`_, `Inplace Updates`_, `Data Types`_ and/or `Devices`_.
Our test decorators actually transforms to :code:`@given` decorators at Pytest collecting time, therefore this allows us to use other **Hypothesis** decorators like, :code:`@reproduce_failure`, :code:`@settings`, :code:`@seed`.
Decorator order
^^^^^^^^^^^^^^^
The order in which Ivy decorators are applied is important. It is important to follow this order, as the functionality of many functions depends on it. If the decorators are applied in the wrong order, the test may fail or the function may not behave as expected.
The following is the recommended order to follow :
#. :code:`@handle_complex_input`
#. :code:`@infer_device`
#. :code:`@handle_device_shifting`
#. :code:`@infer_dtype`
#. :code:`@handle_array_function`
#. :code:`@outputs_to_ivy_arrays`
#. :code:`@outputs_to_ivy_shapes`
#. :code:`@outputs_to_native_arrays`
#. :code:`@inputs_to_native_arrays`
#. :code:`@inputs_to_native_shapes`
#. :code:`@inputs_to_ivy_arrays`
#. :code:`@handle_out_argument`
#. :code:`@handle_view_indexing`
#. :code:`@handle_view`
#. :code:`@handle_array_like_without_promotion`
#. :code:`@handle_partial_mixed_function`
#. :code:`@handle_nestable`
#. :code:`@handle_ragged`
#. :code:`@handle_backend_invalid`
#. :code:`@handle_exceptions`
#. :code:`@handle_nans`
This recommended order is followed to ensure that tests are efficient and accurate. It is important to follow this order because the decorators depend on each other. For example, the :code:`@infer_device` decorator needs to be applied before the :code:`@infer_dtype` decorator, because the :code:`@infer_dtype` decorator needs to know the device of the function in order to infer the data type.
Conversion Wrappers
^^^^^^^^^^^^^^^^^^^
#. `inputs_to_native_arrays`_ : This wrapping function converts all :class:`ivy.Array` instances in the arguments to their :class:`ivy.NativeArray` counterparts, based on the `Backend Setting`_ before calling the function.
#. `inputs_to_ivy_arrays`_ : This wrapping function converts all :class:`ivy.NativeArray` instances in the arguments to their :class:`ivy.Array` counterparts, based on the `Backend Setting`_ before calling the function.
#. `outputs_to_ivy_arrays`_ : This wrapping function converts all :class:`ivy.NativeArray` instances in the outputs to their :class:`ivy.Array` counterparts, based on the `Backend Setting`_ before calling the function.
#. `to_native_arrays_and_back`_ : This wrapping function converts all :class:`ivy.Array` instances in the arguments to their :class:`ivy.NativeArray` counterparts, calls the function with those arguments and then converts the :class:`ivy.NativeArray` instances in the output back to :class:`ivy.Array`.
This wrapping function is heavily used because it enables achieving the objective of ensuring that every ivy function could accept an :class:`ivy.Array` and return an :class:`ivy.Array`, making it independent of the `Backend Setting`_.
Inference Wrappers
^^^^^^^^^^^^^^^^^^
#. `infer_dtype`_ : This wrapping function infers the `dtype` argument to be passed to a function based on the array arguments passed to it.
If :code:`dtype` is explicitly passed to the function, then it is used directly.
This wrapping function could be found in functions from the `creation submodule`_ such as `zeros`_ where we then allow the user to not enter the :code:`dtype` argument to such functions.
#. `infer_device`_ : Similar to the `infer_dtype`_ wrapping function, the `infer_device`_ function wrapping infers the :code:`device` argument to be passed to a function based on the first array argument passed to it.
This wrapping function is also used a lot in functions from the `creation submodule`_ such as `asarray`_, where we want to create the `ivy.Array` on the same device as the input array.
Out Argument Support
^^^^^^^^^^^^^^^^^^^^
#. `handle_out_argument`_ : This wrapping function is used in nearly all ivy functions.
It enables appropriate handling of the :code:`out` argument of functions.
In cases where the backend framework natively supports the :code:`out` argument for a function, we prefer to use it as it's a more efficient implementation of the :code:`out` argument for that particular backend framework.
But in cases when it isn't supported, we support it anyway with `Inplace Updates`_.
Nestable Support
^^^^^^^^^^^^^^^^
#. `handle_nestable`_ : This wrapping function enables the use of :class:`ivy.Container` arguments in functions and directly calling them through the :code:`ivy` namespace, just like calling a function with :class:`ivy.Array` arguments instead. Thus, the function can be called by passing an :class:`ivy.Container` to any or all of its arguments.
Partial Mixed Function Support
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#. `handle_partial_mixed_function`_: This wrapping function enables switching between compositional and primary implementations of :ref:`overview/deep_dive/function_types:Mixed Functions` based on some condition on the arguments of the function.
#. The condition is specified through a lambda function which when evaluates to `True` the primary implementation is run and otherwise the compositional implementation is executed.
#. For backends that have a primary implementation of a mixed function, the reference to the compositional implementation is `stored as an attribute`_ inside the backend function during backend setting. To make use of this decorator, one must
#. add the :code:`partial_mixed_handler` attribute containing the lambda function to the backend implementation. Here's an `example`_ from the torch backend implementation of linear.
Shape Conversion
^^^^^^^^^^^^^^^^
#. `inputs_to_native_shapes`_ : This wrapping function converts all :class:`ivy.Shape` instances in the arguments to their :class:`ivy.NativeShape` counterparts, based on the `Backend Setting`_ before calling the function.
#. `outputs_to_ivy_shapes`_ : This wrapping function converts all :class:`ivy.NativeShape` instances in the outputs to their :class:`ivy.Shape` counterparts, based on the `Backend Setting`_ before calling the function.
#. `to_native_shapes_and_back`_ : This wrapping function converts all :class:`ivy.Shape` instances in the arguments to their :class:`ivy.NativeShape` counterparts, calls the function with those arguments and then converts the :class:`ivy.NativeShape` instances in the output back to :class:`ivy.Shape`.
View Handling
^^^^^^^^^^^^^
#. `handle_view`_ : This wrapping function performs view handling based on our :ref:`overview/deep_dive/inplace_updates:Views` policy.
#. `handle_view_indexing`_ : This wrapping function is aimed at handling views for indexing.
Exception Handling
^^^^^^^^^^^^^^^^^^
#. `handle_exceptions`_ : This wrapping function helps in catching native exceptions and unifying them into `IvyException` or the relevant subclasses. More information can be found in the :ref:`overview/deep_dive/function_wrapping:Exception Handling` section.
Miscellaneous Wrappers
^^^^^^^^^^^^^^^^^^^^^^
#. `handle_array_function`_ : This wrapping function enables :ref:`overview/deep_dive/arrays:Integrating custom classes with Ivy`
#. `handle_complex_input`_ : This wrapping function enables handling of complex numbers. It introduces a keyword argument :code:`complex_mode`, which is used to choose the function's behaviour as per the wrapper's docstring.
When calling `_wrap_function`_ during `Backend Setting`_, firstly the attributes of the functions are checked to get all the wrapping functions for a particular function.
Then all the wrapping functions applicable to a function are used to wrap the function.
Each of these topics and each associated piece of logic added by the various wrapper functions are covered in more detail in the next sections.
For now, suffice it to say that they do quite a lot.
**Round Up**
This should have hopefully given you a good feel for how function wrapping is applied to functions in Ivy.
If you have any questions, please feel free to reach out on `discord`_ in the `function wrapping thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/-RGXxrP849k" class="video">
</iframe>
| ivy/docs/overview/deep_dive/function_wrapping.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/function_wrapping.rst",
"repo_id": "ivy",
"token_count": 4360
} | 4 |
Ivy as a Transpiler
===================
On the `Building Blocks <building_blocks.rst>`_ page, we explored the role of the Backend functional APIs, the Ivy functional API, the Backend handler, and the Tracer.
These parts are labelled (a) in the image below.
Here, we explain the role of the backend-specific frontends in Ivy, and how these enable automatic code conversions between different ML frameworks.
This part is labelled as (b) in the image below.
The code conversion tools described on this page are works in progress, as indicated by the construction signs 🚧.
This is in keeping with the rest of the documentation.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/design/submodule_dependency_graph.png?raw=true
:align: center
:width: 100%
Frontend Functional APIs 🚧
---------------------------
While the backend API, Ivy API, and backend handler enable all Ivy code to be framework-agnostic, they do not, for example, enable PyTorch code to be framework agnostic.
But with frontend APIs, we can also achieve this!
Let’s take a look at how the implementation of :code:`clip` method would seem like in the frontends:
.. code-block:: python
# ivy/functional/frontends/jax/lax/functions.py
def clamp(x_min,x, x_max):
return ivy.clip(x, x_min, x_max)
.. code-block:: python
# ivy/functional/frontends/numpy/general.py
def clip(x, x_min, x_max):
return ivy.clip(x, x_min, x_max)
.. code-block:: python
# ivy/functional/frontends/tensorflow/general.py
def clip_by_value(x, x_min, x_max):
return ivy.clip(x, x_min, x_max)
.. code-block:: python
# ivy/functional/frontends/torch/general.py
def clamp(x, x_min, x_max):
return ivy.clip(x, x_min, x_max)
combined, we have the following situation:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/design/clip_backends_n_frontends.png?raw=true
:align: center
:width: 100%
Importantly, we can select the backend and frontend **independently** from one another.
For example, this means we can select a JAX backend, but also select the PyTorch frontend and write Ivy code which fully adheres to the PyTorch functional API.
In the reverse direction: we can take pre-written pure PyTorch code, replace each PyTorch function with the equivalent function using Ivy’s PyTorch frontend, and then run this PyTorch code using JAX:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/design/clip_conversion.png?raw=true
:align: center
:width: 100%
|
For this example it’s very simple, the differences are only syntactic, but the above process works for **any** function.
If there are semantic differences then these will be captured (a) in the wrapped frontend code which expresses the frontend method as a composition of Ivy functions, and (b) in the wrapped backend code which expressed the Ivy functions as compositions of backend methods.
Let’s take a more complex example and convert the PyTorch method :func:`torch.nn.functional.one_hot` into NumPy code.
The frontend is implemented by wrapping a single Ivy method :func:`ivy.one_hot` as follows:
.. code-block:: python
# ivy/functional/frontends/torch/nn/sparse_functions.py
def one_hot(tensor, num_classes=-1):
return ivy.one_hot(tensor, num_classes)
Let’s look at the NumPy backend code for this Ivy method:
.. code-block:: python
# ivy/functional/backends/numpy/general.py
def one_hot(
indices: np.ndarray, depth: int, *, device: str, out: Optional[np.ndarray] = None
) -> np.ndarray:
res = np.eye(depth)[np.array(indices).reshape(-1)]
return res.reshape(list(indices.shape) + [depth])
By chaining these methods together, we can now call :func:`torch.nn.functional.one_hot` using NumPy:
.. code-block:: python
import ivy
import ivy.frontends.torch as torch
ivy.set_backend('numpy')
x = np.array([0., 1., 2.])
ret = torch.nn.functional.one_hot(x, 3)
Let’s take one more example and convert TensorFlow method :func:`tf.cumprod` into PyTorch code.
This time, the frontend is implemented by wrapping two Ivy methods :func:`ivy.cumprod`, and :func:`ivy.flip` as follows:
.. code-block:: python
# ivy/functional/frontends/tensorflow/math.py
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
ret = ivy.cumprod(x, axis, exclusive)
if reverse:
return ivy.flip(ret, axis)
return ret
Let’s look at the PyTorch backend code for both of these Ivy methods:
.. code-block:: python
# ivy/functional/backends/torch/general.py
def cumprod(
x: torch.Tensor,
axis: int = 0,
exclusive: bool = False,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if exclusive:
x = torch.transpose(x, axis, -1)
x = torch.cat((torch.ones_like(x[..., -1:]), x[..., :-1]), -1, out=out)
res = torch.cumprod(x, -1, out=out)
return torch.transpose(res, axis, -1)
return torch.cumprod(x, axis, out=out)
.. code-block:: python
# ivy/functional/backends/torch/manipulation.py
def flip(
x: torch.Tensor,
axis: Optional[Union[int, Sequence[int]]] = None,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
num_dims: int = len(x.shape)
if not num_dims:
return x
if axis is None:
new_axis: List[int] = list(range(num_dims))
else:
new_axis: List[int] = axis
if isinstance(new_axis, int):
new_axis = [new_axis]
else:
new_axis = new_axis
new_axis = [item + num_dims if item < 0 else item for item in new_axis]
ret = torch.flip(x, new_axis)
return ret
Again, by chaining these methods together, we can now call :func:`tf.math.cumprod` using PyTorch:
.. code-block:: python
import ivy
import ivy.frontends.tensorflow as tf
ivy.set_backend('torch')
x = torch.tensor([[0., 1., 2.]])
ret = tf.math.cumprod(x, -1)
Role of the Tracer 🚧
-----------------------------
The very simple example above worked well, but what about even more complex PyTorch code involving Modules, Optimizers, and other higher level objects? This is where the tracer plays a vital role.
The tracer can convert any code into its constituent functions at the functional API level for any ML framework.
For example, let’s take the following PyTorch code and run it using JAX:
.. code-block:: python
import torch
class Network(torch.nn.Module):
def __init__(self):
super().__init__()
self._linear = torch.nn.Linear(3, 3)
def forward(self, x):
return self._linear(x)
x = torch.tensor([1., 2., 3.])
net = Network()
net(x)
We cannot simply :code:`import ivy.frontends.torch` in place of :code:`import torch` as we did in the previous examples.
This is because the Ivy frontend only supports the functional API for each framework, whereas the code above makes use of higher level classes through the use of the :mod:`torch.nn` namespace.
In general, the way we convert code is by first decomposing the code into its constituent functions in the core API using Ivy’s tracer, and then we convert this executable graph into the new framework.
For the example above, this would look like:
.. code-block:: python
import jax
import ivy
jax_graph = ivy.trace_graph(net, x).to_backend('jax')
x = jax.numpy.array([1., 2., 3.])
jax_graph(x)
However, when calling :func:`ivy.trace` the graph only connects the inputs to the outputs.
Any other tensors or variables which are not listed in the inputs are treated as constants in the graph.
In this case, this means the learnable weights in the Module will be treated as constants.
This works fine if we only care about running inference on our graph post-training, but this won’t enable training of the Module in JAX.
Converting Network Models 🚧
----------------------------
In order to convert a model from PyTorch to JAX, we first must convert the :class:`torch.nn.Module` instance to an :class:`ivy.Module` instance using the method :func:`ivy.to_ivy_module` like so:
.. code-block:: python
net = ivy.to_ivy_module(net)
In its current form, the :class:`ivy.Module` instance thinly wraps the PyTorch model into the :class:`ivy.Module` interface, whilst preserving the pure PyTorch backend.
We can trace a graph of this network using Ivy’s tracer like so:
.. code-block:: python
net = net.trace_graph()
In this case, the learnable weights are treated as inputs to the graph rather than constants.
Now, with a traced graph under the hood of our model, we can call :meth:`to_backend` directly on the :class:`ivy.Module` instance to convert it to any backend of our choosing, like so:
.. code-block:: python
net = net.to_backend('jax')
The network can now be trained using Ivy’s optimizer classes with a JAX backend like so:
.. code-block:: python
optimizer = ivy.Adam(1e-4)
x_in = ivy.array([1., 2., 3.])
target = ivy.array([0.])
def loss_fn(v):
out = model(x_in, v=v)
return ivy.reduce_mean((out - target)**2)
for step in range(100):
loss, grads = ivy.execute_with_gradients(loss_fn, model.v)
model.v = optimizer.step(model.v, grads)
To convert this :class:`ivy.Module` instance to a :class:`haiku.Module` instance, we can call :meth:`to_haiku_module` like so:
.. code-block:: python
net = net.to_haiku_module()
If we want to remove Ivy from the pipeline entirely, we can then train the model in Haiku like so:
.. code-block:: python
import haiku as hk
import jax.numpy as jnp
x_in = jnp.array([1., 2., 3.])
target = jnp.array([0.])
def loss_fn():
out = net(x_in)
return jnp.mean((out - target)**2)
loss_fn_t = hk.transform(loss_fn)
loss_fn_t = hk.without_apply_rng(loss_fn_t)
rng = jax.random.PRNGKey(42)
params = loss_fn_t.init(rng)
def update_rule(param, update):
return param - 0.01 * update
for i in range(100):
grads = jax.grad(loss_fn_t.apply)(params)
params = jax.tree_multimap(update_rule, params, grads)
Other JAX-specific network libraries such as Flax, Trax, and Objax are also supported.
Overall, we have taken a :class:`torch.nn.Module` instance, which can be trained using PyTorch’s optimizer classes, and converted this to a :class:`haiku.Module` instance which can be trained using Haiku’s optimizer classes.
The same is true for any combination of frameworks, and for any network architecture, regardless of its complexity!
**Round Up**
Hopefully, this has explained how, with the addition of backend-specific frontends, Ivy will be able to easily convert code between different ML frameworks 🙂 works in progress, as indicated by the construction signs 🚧.
This is in keeping with the rest of the documentation.
Please reach out on `discord <https://discord.gg/sXyFF8tDtm>`_ if you have any questions!
| ivy/docs/overview/design/ivy_as_a_transpiler.rst/0 | {
"file_path": "ivy/docs/overview/design/ivy_as_a_transpiler.rst",
"repo_id": "ivy",
"token_count": 3887
} | 5 |
.. _`RWorks Exchange Formats`:
Exchange Formats
================
.. _`Open Neural Network Exchange (ONNX)`: https://onnx.ai/
.. _`ONNX`: https://onnx.ai/
.. _`Neural Network Exchange Format (NNEF)`: https://www.khronos.org/nnef
.. _`CoreML`: https://developer.apple.com/documentation/coreml
.. _`Khronos Group`: https://www.khronos.org/
.. _`Apple`: https://www.apple.com/
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. |onnx| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/exchange_formats/onnx.png
:height: 20pt
:class: dark-light
.. |nnef| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/exchange_formats/nnef.png
:height: 15pt
:class: dark-light
.. |coreml| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/exchange_formats/coreml.png
:height: 20pt
:class: dark-light
Neural network exchange formats define a standardized file representation specifically for neural networks.
The idea is that these can be used as an intermediate representation for communicating or “exchanging” neural network architectures between different ML frameworks or between ML frameworks and the target hardware.
The focus is generally on simplifying the deployment of neural networks, with a typical workflow being: train the model, save in an exchange format, and use this exchange format to communicate with the target compilers and hardware for model inference.
ONNX |onnx|
-----------
The `Open Neural Network Exchange (ONNX)`_ is a standardized static file format which fully defines the structure of a neural network and all of its weights.
Third parties can implement their own bindings to the ONNX standard format, which then enables the model to be saved to disk in the standard ONNX file format, and be deployed on any hardware which supports the ONNX format.
Some frameworks have also added support to "load in" ONNX models from disk, as well as support for exporting to the format.
This enables some degree of model conversion between frameworks, but generally only for model deployment and not training.
ONNX focuses on core neural network operations, with limited support for other more general array processing functions such as high order optimization, signal processing, and advanced linear algebra.
NNEF |nnef|
-----------
Similar to `ONNX`_, the `Neural Network Exchange Format (NNEF)`_ is also a standardized static file format which fully defines the structure of a neural network and all of its weights, with some support also for training tools.
The format was developed and is maintained by the `Khronos Group`_.
Overall, NNEF shares a lot of similarities with ONNX, but has not reached the same level of adoption.
CoreML |coreml|
---------------
`CoreML`_ itself is not an exchange format, it is a framework which enables models to be trained and deployed on `Apple`_ devices with a simple zero-code interactive interface.
However, CoreML is built upon its own Core ML format, and Apple has open sourced :code:`coremltools`, which provides a set of tools to convert ML models from various frameworks into the Core ML format.
The Core ML format is itself an exchange format, albeit with the sole purpose of exchanging to Apple’s CoreML framework, rather than enabling exchanges between multiple different parties as is the case for the other exchange formats.
| ivy/docs/overview/related_work/exchange_formats.rst/0 | {
"file_path": "ivy/docs/overview/related_work/exchange_formats.rst",
"repo_id": "ivy",
"token_count": 919
} | 6 |
from typing import Callable, Optional, List, Union, Iterable, Sequence, Mapping
def trace_graph(
*objs: Callable,
stateful: Optional[List] = None,
arg_stateful_idxs: Optional[List] = None,
kwarg_stateful_idxs: Optional[List] = None,
to: Optional[str] = None,
include_generators: bool = True,
array_caching: bool = True,
with_numpy: bool = True,
modes_to_trace: str = "all",
backend_compile: bool = False,
static_argnums: Optional[Union[int, Iterable[int]]] = None,
static_argnames: Optional[Union[str, Iterable[str]]] = None,
compile_mode: Optional[str] = None,
graph_caching: bool = False,
args: Optional[Sequence] = None,
kwargs: Optional[Mapping] = None,
params_v=None,
v=None
):
"""Takes `fn` and traces it into a more efficient composition of backend
operations.
Parameters
----------
objs
callable(s) to trace and create a graph of
stateful
list of instances to be considered stateful during the graph tracing
arg_stateful_idxs
positional arguments to be considered stateful during the graph tracing
kwarg_stateful_idxs
keyword arguments to be considered stateful during the graph tracing
include_generators
include array creation/generation functions as part of the graph
array_caching
cache the constant arrays that appear as arguments to the functions in the graph
modes_to_trace
the module mode(s) which should be traced when tracing a trainable module
can be either "all", "train" or "eval".
backend_compile
whether to apply the native compilers, i.e. tf.function, after ivy's tracing
static_argnums
for jax's jit compilation
static_argnames
for jax's jit compilation
compile_mode
mode for torch's compilation
graph_caching
whether to cache the traced graph
args
positional arguments for `obj`
kwargs
keyword arguments for `obj`
Returns
-------
the traced `Graph` object.
Examples
--------
>>> import ivy, time
>>> from ivy import trace_graph
>>> ivy.set_backend("torch")
>>> x = ivy.array([1.])
>>> def fn(x):
... y = ivy.sum(x)
... z = ivy.prod(x)
... a = ivy.sin(y)
... b = ivy.cos(z)
... c = ivy.tan(z)
... i = ivy.round(a)
... j = ivy.floor(b)
... k = ivy.ceil(c)
... return i, j, k
>>> graph = trace_graph(fn, args=(x,))
Notice how the time taken to execute the traced function is lower than
the original function. A typical run:
>>> start = time.time()
>>> fn(x)
>>> print(time.time() - start)
0.0003559589385986328
>>> start = time.time()
>>> graph(x)
>>> print(time.time() - start)
0.0001785755157470703
"""
from ._compiler import trace_graph as _trace_graph
return _trace_graph(
*objs,
stateful=stateful,
arg_stateful_idxs=arg_stateful_idxs,
kwarg_stateful_idxs=kwarg_stateful_idxs,
to=to,
include_generators=include_generators,
array_caching=array_caching,
with_numpy=with_numpy,
modes_to_trace=modes_to_trace,
backend_compile=backend_compile,
static_argnums=static_argnums,
static_argnames=static_argnames,
compile_mode=compile_mode,
graph_caching=graph_caching,
args=args,
kwargs=kwargs,
params_v=params_v,
v=v,
)
def transpile(
*objs: Callable,
source: Optional[str] = None,
to: Optional[str] = None,
with_numpy: bool = True,
backend_compile: bool = False,
static_argnums: Optional[Union[int, Iterable[int]]] = None,
static_argnames: Optional[Union[str, Iterable[str]]] = None,
compile_mode: Optional[str] = None,
graph_caching: bool = False,
graph_optimizations: bool = True,
modes_to_trace: str = "all",
stateful: Optional[List] = None,
arg_stateful_idxs: Optional[List] = None,
kwarg_stateful_idxs: Optional[List] = None,
args: Optional[Sequence] = None,
kwargs: Optional[Mapping] = None,
params_v=None,
v=None
):
"""Transpiles Callable objects passed as arguments. If args and kwargs are
specified, transpilation is performed eagerly, otherwise, transpilation
will happen lazily.
Parameters
----------
objs
The native Callables to be transpiled
source
The framework that `obj` is from.
to
The target framework to transpile `obj` to.
args
If specified, arguments that will be used to transpile eagerly.
kwargs
If specified, keyword arguments that will be used to transpile eagerly.
Returns
-------
Either a transpiled Graph or a non-initialized LazyGraph.
"""
from ._compiler import transpile as _transpile
return _transpile(
*objs,
source=source,
to=to,
with_numpy=with_numpy,
backend_compile=backend_compile,
static_argnums=static_argnums,
static_argnames=static_argnames,
compile_mode=compile_mode,
graph_caching=graph_caching,
graph_optimizations=graph_optimizations,
modes_to_trace=modes_to_trace,
stateful=stateful,
arg_stateful_idxs=arg_stateful_idxs,
kwarg_stateful_idxs=kwarg_stateful_idxs,
args=args,
kwargs=kwargs,
params_v=params_v,
v=v,
)
def unify(
*objs: Callable,
source: Optional[str] = None,
graph_caching: bool = False,
graph_optimizations: bool = True,
args: Optional[Sequence] = None,
kwargs: Optional[Mapping] = None,
with_numpy: bool = True,
modes_to_trace: str = "all",
**transpile_kwargs
):
from ._compiler import unify as _unify
return _unify(
*objs,
source=source,
graph_caching=graph_caching,
graph_optimizations=graph_optimizations,
args=args,
kwargs=kwargs,
with_numpy=with_numpy,
modes_to_trace=modes_to_trace,
**transpile_kwargs,
)
| ivy/ivy/compiler/compiler.py/0 | {
"file_path": "ivy/ivy/compiler/compiler.py",
"repo_id": "ivy",
"token_count": 2606
} | 7 |
# global
import abc
class _ArrayWithDeviceExperimental(abc.ABC):
pass
| ivy/ivy/data_classes/array/experimental/device.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/device.py",
"repo_id": "ivy",
"token_count": 26
} | 8 |
# global
import abc
import numpy as np
from numbers import Number
from typing import Any, Iterable, Union, Optional, Dict, Callable, List, Tuple
# ToDo: implement all methods here as public instance methods
# local
import ivy
class _ArrayWithGeneral(abc.ABC):
def is_native_array(
self: ivy.Array,
/,
*,
exclusive: bool = False,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.is_native_array. This
method simply wraps the function, and so the docstring for
ivy.is_native_array also applies to this method with minimal changes.
Parameters
----------
self
The input to check
exclusive
Whether to check if the data type is exclusively an array, rather than a
variable or traced array.
Returns
-------
ret
Boolean, whether or not x is a native array.
Examples
--------
>>> x = ivy.array([0, 1, 2])
>>> ret = x.is_native_array()
>>> print(ret)
False
"""
return ivy.is_native_array(self, exclusive=exclusive)
def is_ivy_array(self: ivy.Array, /, *, exclusive: bool = False) -> bool:
"""ivy.Array instance method variant of ivy.is_ivy_array. This method
simply wraps the function, and so the docstring for ivy.is_ivy_array
also applies to this method with minimal changes.
Parameters
----------
self
input array
exclusive
Whether to check if the data type is exclusively an array, rather than a
variable or traced array.
Returns
-------
ret
Boolean, whether or not x is an ivy array.
Examples
--------
>>> x = ivy.array([0, 1, 2])
>>> ret = x.is_ivy_array()
>>> print(ret)
True
"""
return ivy.is_ivy_array(self, exclusive=exclusive)
def is_array(self: ivy.Array, /, *, exclusive: bool = False) -> bool:
"""ivy.Array instance method variant of ivy.is_array. This method
simply wraps the function, and so the docstring for ivy.is_array also
applies to this method with minimal changes.
Parameters
----------
self
The input to check
exclusive
Whether to check if the data type is exclusively an array, rather than a
variable or traced array.
Returns
-------
ret
Boolean, whether or not x is an array.
Examples
--------
>>> x = ivy.array([0, 1, 2])
>>> print(x.is_array())
True
"""
return ivy.is_array(self, exclusive=exclusive)
def is_ivy_container(self: ivy.Array) -> bool:
"""ivy.Array instance method variant of ivy.is_ivy_container. This
method simply wraps the function, and so the docstring for
ivy.is_ivy_container also applies to this method with minimal changes.
Parameters
----------
self
The input to check
Returns
-------
ret
Boolean, whether or not x is an ivy container.
Examples
--------
>>> x = ivy.array([0, 1, 2])
>>> print(x.is_ivy_container())
False
"""
return ivy.is_ivy_container(self)
def all_equal(
self: ivy.Array, *x2: Iterable[Any], equality_matrix: bool = False
) -> Union[bool, ivy.Array, ivy.NativeArray]:
"""ivy.Array instance method variant of ivy.all_equal. This method
simply wraps the function, and so the docstring for ivy.all_equal also
applies to this method with minimal changes.
Parameters
----------
self
input array
x2
input iterable to compare to ``self``
equality_matrix
Whether to return a matrix of equalities comparing each input with every
other. Default is ``False``.
Returns
-------
ret
Boolean, whether or not the inputs are equal, or matrix array of booleans if
equality_matrix=True is set.
Examples
--------
>>> x1 = ivy.array([1, 1, 0, 0, 1, -1])
>>> x2 = ivy.array([1, 1, 0, 0, 1, -1])
>>> y = x1.all_equal(x2)
>>> print(y)
True
>>> x1 = ivy.array([0, 0])
>>> x2 = ivy.array([0, 0])
>>> x3 = ivy.array([1, 0])
>>> y = x1.all_equal(x2, x3, equality_matrix=True)
>>> print(y)
ivy.array([[ True, True, False],
[ True, True, False],
[False, False, True]])
"""
arrays = [self] + [x for x in x2]
return ivy.all_equal(*arrays, equality_matrix=equality_matrix)
def has_nans(self: ivy.Array, /, *, include_infs: bool = True):
"""ivy.Array instance method variant of ivy.has_nans. This method
simply wraps the function, and so the docstring for ivy.has_nans also
applies to this method with minimal changes.
Parameters
----------
self
input array
include_infs
Whether to include ``+infinity`` and ``-infinity`` in the check.
Default is ``True``.
Returns
-------
ret
Boolean as to whether the array contains nans.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = x.has_nans()
>>> print(y)
False
"""
return ivy.has_nans(self, include_infs=include_infs)
def gather(
self: ivy.Array,
indices: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: int = -1,
batch_dims: int = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.gather. This method simply
wraps the function, and so the docstring for ivy.gather also applies to
this method with minimal changes.
Parameters
----------
self
The array from which to gather values.
indices
The array which indicates the indices that will be gathered along
the specified axis.
axis
The axis from which the indices will be gathered. Default is ``-1``.
batch_dims
Optional int, lets you gather different items from each element of a batch.
Default is ``0``.
out
Optional array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
New array with the values gathered at the specified indices along
the specified axis.
Examples
--------
>>> x = ivy.array([0., 1., 2.])
>>> y = ivy.array([0, 1])
>>> gather = x.gather(y)
>>> print(gather)
ivy.array([0., 1.])
>>> x = ivy.array([[0., 1., 2.],[3., 4., 5.]])
>>> y = ivy.array([[0, 1],[1, 2]])
>>> z = ivy.zeros((2, 2, 2))
>>> gather = x.gather(y, out=z)
>>> print(z)
ivy.array([[[0., 1.],[1., 2.]],[[3., 4.],[4., 5.]]])
>>> x = ivy.array([[[0., 1.], [2., 3.]],
... [[8., 9.], [10., 11.]]])
>>> y = ivy.array([[0, 1]])
>>> z = ivy.zeros((1, 2, 2, 2))
>>> gather = x.gather(y, axis=0, out=z)
>>> print(z)
ivy.array(
[[[[ 0., 1.],
[ 2., 3.]],
[[ 8., 9.],
[10., 11.]]]])
>>> x = ivy.array([[0, 10, 20, 0, 0],
... [0, 0, 0, 30, 40],
... [0, 10, 0, 0, 40]])
>>> y = ivy.array([[1, 2],[3, 4],[1, 4]])
>>> gather = x.gather(y, batch_dims=1)
>>> print(gather)
ivy.array([[10, 20], [30, 40],[10, 40]])
"""
return ivy.gather(self, indices, axis=axis, batch_dims=batch_dims, out=out)
def scatter_nd(
self: ivy.Array,
updates: Union[ivy.Array, ivy.NativeArray],
/,
shape: Optional[ivy.Array] = None,
*,
reduction: str = "sum",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Scatter updates into an array according to indices.
Parameters
----------
self
array of indices
updates
values to update input tensor with
shape
The shape of the result. Default is ``None``, in which case tensor
argument must be provided.
reduction
The reduction method for the scatter, one of 'sum', 'min', 'max'
or 'replace'
out
optional output array, for writing the result to.
Returns
-------
ret
New array of given shape, with the values scattered at the indices.
Examples
--------
With scatter values into an array
>>> arr = ivy.array([1,2,3,4,5,6,7,8, 9, 10])
>>> indices = ivy.array([[4], [3], [1], [7]])
>>> updates = ivy.array([9, 10, 11, 12])
>>> scatter = indices.scatter_nd(updates, reduction='replace', out=arr)
>>> print(scatter)
ivy.array([ 1, 11, 3, 10, 9, 6, 7, 12, 9, 10])
With scatter values into an empty array
>>> shape = ivy.array([2, 5])
>>> indices = ivy.array([[1,4], [0,3], [1,1], [0,2]])
>>> updates = ivy.array([25, 40, 21, 22])
>>> scatter = indices.scatter_nd(updates, shape=shape)
>>> print(scatter)
ivy.array([[ 0, 0, 22, 40, 0],
[ 0, 21, 0, 0, 25]])
"""
return ivy.scatter_nd(self, updates, shape, reduction=reduction, out=out)
def gather_nd(
self: ivy.Array,
indices: Union[ivy.Array, ivy.NativeArray],
/,
*,
batch_dims: int = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.gather_nd. This method
simply wraps the function, and so the docstring for ivy.gather_nd also
applies to this method with minimal changes.
Parameters
----------
self
The array from which to gather values.
indices
Index array.
batch_dims
optional int, lets you gather different items from each element of a batch.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
New array of given shape, with the values gathered at the indices.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([1])
>>> z = x.gather_nd(y)
>>> print(z)
ivy.array(2)
"""
return ivy.gather_nd(self, indices, batch_dims=batch_dims, out=out)
def einops_rearrange(
self: ivy.Array,
pattern: str,
/,
*,
out: Optional[ivy.Array] = None,
**axes_lengths: Dict[str, int],
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.einops_rearrange. This
method simply wraps the function, and so the docstring for
ivy.einops_rearrange also applies to this method with minimal changes.
Parameters
----------
self
Input array to be re-arranged.
pattern
Rearrangement pattern.
axes_lengths
Any additional specifications for dimensions.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
New array with einops.rearrange having been applied.
Examples
--------
With :class:`ivy.Array` instance method:
>>> x = ivy.array([[1, 2, 3],
... [-4, -5, -6]])
>>> y = x.einops_rearrange("height width -> width height")
>>> print(y)
ivy.array([[ 1, -4],
[ 2, -5],
[ 3, -6]])
>>> x = ivy.array([[[ 1, 2, 3],
... [ 4, 5, 6]],
... [[ 7, 8, 9],
... [10, 11, 12]]])
>>> y = x.einops_rearrange("c h w -> c (h w)")
>>> print(y)
ivy.array([[ 1, 2, 3, 4, 5, 6],
[ 7, 8, 9, 10, 11, 12]])
>>> x = ivy.array([[1, 2, 3, 4, 5, 6]
... [7, 8, 9, 10, 11, 12]])
>>> y = x.einops_rearrange("c (h w) -> (c h) w", h=2, w=3)
ivy.array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]])
"""
return ivy.einops_rearrange(self._data, pattern, out=out, **axes_lengths)
def einops_reduce(
self: ivy.Array,
pattern: str,
reduction: Union[str, Callable],
/,
*,
out: Optional[ivy.Array] = None,
**axes_lengths: Dict[str, int],
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.einops_reduce. This method
simply wraps the function, and so the docstring for ivy.einops_reduce
also applies to this method with minimal changes.
Parameters
----------
self
Input array to be reduced.
pattern
Reduction pattern.
reduction
One of available reductions ('min', 'max', 'sum', 'mean', 'prod'), or
callable.
axes_lengths
Any additional specifications for dimensions.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
New array with einops.reduce having been applied.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([[[5, 4],
... [11, 2]],
... [[3, 5],
... [9, 7]]])
>>> reduced = x.einops_reduce('a b c -> b c', 'max')
>>> print(reduced)
ivy.array([[ 5, 5],
[11, 7]])
With :class:`ivy.Array` inputs:
>>> x = ivy.array([[[5, 4, 3],
... [11, 2, 9]],
... [[3, 5, 7],
... [9, 7, 1]]])
>>> reduced = x.einops_reduce('a b c -> a () c', 'min')
>>> print(reduced)
ivy.array([[[5, 2, 3]],
[[3, 5, 1]]])
"""
return ivy.einops_reduce(
self._data, pattern, reduction, out=out, **axes_lengths
)
def einops_repeat(
self: ivy.Array,
pattern: str,
/,
*,
out: Optional[ivy.Array] = None,
**axes_lengths: Dict[str, int],
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.einops_repeat. This method
simply wraps the function, and so the docstring for ivy.einops_repeat
also applies to this method with minimal changes.
Parameters
----------
self
Input array to be repeated.
pattern
Rearrangement pattern.
axes_lengths
Any additional specifications for dimensions.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
New array with einops.repeat having been applied.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([5,4])
>>> y = x.einops_repeat('a -> a c', c=3)
>>> print(y)
ivy.array([[5, 5, 5],
[4, 4, 4]])
With :class:`ivy.Array` inputs:
>>> x = ivy.array([[5,4],
... [2, 3]])
>>> y = x.einops_repeat('a b -> a b c', c=3)
>>> print(y)
ivy.array([[[5, 5, 5], [4, 4, 4]], [[2, 2, 2], [3, 3, 3]]])
>>> print(y.shape)
(2, 2, 3)
"""
return ivy.einops_repeat(self._data, pattern, out=out, **axes_lengths)
def to_numpy(self: ivy.Array, /, *, copy: bool = True) -> np.ndarray:
"""ivy.Array instance method variant of ivy.to_numpy. This method
simply wraps the function, and so the docstring for ivy.to_numpy also
applies to this method with minimal changes.
Parameters
----------
self
input array.
copy
whether to copy the array to a new address or not. Default is ``True``.
Returns
-------
ret
a numpy array copying all the element of the array ``self``.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([-1, 0, 1])
>>> y = x.to_numpy()
>>> print(y)
[-1 0 1]
>>> x = ivy.array([[-1, 0, 1],[-1, 0, 1], [1,0,-1]])
>>> y = x.to_numpy()
>>> print(y)
[[-1 0 1]
[-1 0 1]
[ 1 0 -1]]
"""
return ivy.to_numpy(self, copy=copy)
def to_list(self: ivy.Array, /) -> List:
"""ivy.Array instance method variant of ivy.to_list. This method simply
wraps the function, and so the docstring for ivy.to_list also applies
to this method with minimal changes.
Parameters
----------
self
input array.
Returns
-------
ret
A list representation of the input array ``x``.
Examples
--------
With :class:`ivy.Array` instance method:
>>> x = ivy.array([0, 1, 2])
>>> y = x.to_list()
>>> print(y)
[0, 1, 2]
"""
return ivy.to_list(self)
def to_file(
self: ivy.Array, fid: Union[str, bytes, int], sep: str = "", format_: str = "%s"
) -> None:
"""ivy.Array instance method variant of to_file. Write array to a file
as text or binary. The data is always written in 'C' order.
Parameters
----------
self : ivy.Array
Input array.
fid : str, bytes, int
An open file object, or a string containing a filename.
sep : str, optional
Separator between array items for text output.
If '', a binary file is written.
format_ : str, optional
Format string for text file output.
Returns
-------
None
Examples
--------
With ivy.Array instance method:
>>> x = ivy.array([1, 2, 3])
>>> x.to_file('data.txt', sep=',', format_='%d')
Notes
-----
The data produced by this method can be recovered using
appropriate methods or functions depending on the data type.
"""
return ivy.to_file(self, fid, sep, format_)
def supports_inplace_updates(self: ivy.Array, /) -> bool:
"""ivy.Array instance method variant of ivy.supports_inplace_updates.
This method simply wraps the function, and so the docstring for
ivy.supports_inplace_updates also applies to this method with minimal
changes.
Parameters
----------
self
The input array whose elements' data type is to be checked.
Returns
-------
ret
Bool value depends on whether the currently active backend
framework supports in-place operations with argument's data type.
Examples
--------
With :class:`ivy.Array` input and default backend set as `numpy`:
>>> x = ivy.array([0, 1, 2])
>>> ret = x.supports_inplace_updates()
>>> print(ret)
True
With `ivy.Array` input and backend set as "tensorflow":
>>> x = ivy.array([1., 4.2, 2.2])
>>> ret = x.supports_inplace_updates()
>>> print(ret)
False
"""
return ivy.supports_inplace_updates(self)
def inplace_decrement(
self: Union[ivy.Array, ivy.NativeArray], val: Union[ivy.Array, ivy.NativeArray]
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.inplace_decrement. This
method simply wraps the function, and so the docstring for
ivy.inplace_decrement also applies to this method with minimal changes.
Parameters
----------
self
The input array to be decremented by the defined value.
val
The value of decrement.
Returns
-------
ret
The array following an in-place decrement.
Examples
--------
With :class:`ivy.Array` instance methods:
>>> x = ivy.array([5.7, 4.3, 2.5, 1.9])
>>> y = x.inplace_decrement(1)
>>> print(y)
ivy.array([4.7, 3.3, 1.5, 0.9])
>>> x = ivy.asarray([4., 5., 6.])
>>> y = x.inplace_decrement(2.5)
>>> print(y)
ivy.array([1.5, 2.5, 3.5])
"""
return ivy.inplace_decrement(self, val)
def stable_divide(
self,
denominator: Union[Number, ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
min_denominator: Optional[
Union[Number, ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.stable_divide. This method
simply wraps the function, and so the docstring for ivy.stable_divide
also applies to this method with minimal changes.
Parameters
----------
self
input array, used as the numerator for division.
denominator
denominator for division.
min_denominator
the minimum denominator to use, use global ivy._MIN_DENOMINATOR by default.
Returns
-------
ret
a numpy array containing the elements of numerator divided by
the corresponding element of denominator
Examples
--------
With :class:`ivy.Array` instance method:
>>> x = ivy.asarray([4., 5., 6.])
>>> y = x.stable_divide(2)
>>> print(y)
ivy.array([2., 2.5, 3.])
>>> x = ivy.asarray([4, 5, 6])
>>> y = x.stable_divide(4, min_denominator=1)
>>> print(y)
ivy.array([0.8, 1. , 1.2])
>>> x = ivy.asarray([[4., 5., 6.], [7., 8., 9.]])
>>> y = ivy.asarray([[1., 2., 3.], [2., 3., 4.]])
>>> z = x.stable_divide(y)
>>> print(z)
ivy.array([[4. , 2.5 , 2. ],
[3.5 , 2.67, 2.25]])
"""
return ivy.stable_divide(self, denominator, min_denominator=min_denominator)
def clip_vector_norm(
self: ivy.Array,
max_norm: float,
/,
*,
p: float = 2.0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.clip_vector_norm. This
method simply wraps the function, and so the docstring for
ivy.clip_vector_norm also applies to this method with minimal changes.
Parameters
----------
self
input array
max_norm
float, the maximum value of the array norm.
p
optional float, the p-value for computing the p-norm.
Default is 2.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
An array with the vector norm downscaled to the max norm if needed.
Examples
--------
With :class:`ivy.Array` instance method:
>>> x = ivy.array([0., 1., 2.])
>>> y = x.clip_vector_norm(2.0)
>>> print(y)
ivy.array([0., 0.894, 1.79])
"""
return ivy.clip_vector_norm(self, max_norm, p=p, out=out)
def array_equal(self: ivy.Array, x: Union[ivy.Array, ivy.NativeArray], /) -> bool:
"""ivy.Array instance method variant of ivy.array_equal. This method
simply wraps the function, and so the docstring for ivy.array_equal
also applies to this method with minimal changes.
Parameters
----------
self
input array
x
input array to compare to ``self``
Returns
-------
ret
Boolean, whether or not the input arrays are equal
Examples
--------
>>> x = ivy.array([-1,0])
>>> y = ivy.array([1,0])
>>> z = x.array_equal(y)
>>> print(z)
False
>>> a = ivy.array([1, 2])
>>> b = ivy.array([1, 2])
>>> c = a.array_equal(b)
>>> print(c)
True
>>> i = ivy.array([1, 2])
>>> j = ivy.array([1, 2, 3])
>>> k = i.array_equal(j)
>>> print(k)
False
"""
return ivy.array_equal(self, x)
def assert_supports_inplace(self: ivy.Array, /) -> bool:
"""ivy.Array instance method variant of ivy.assert_supports_inplace.
This method simply wraps the function, and so the docstring for
ivy.assert_supports_inplace also applies to this method with minimal
changes.
Parameters
----------
self
input array
Returns
-------
ret
True if supports, raises IvyBackendException otherwise
Examples
--------
With :class:`ivy.Array` input and default backend set as `torch`:
>>> ivy.set_backend("torch")
>>> x = ivy.array([1, 2, 3])
>>> print(x.assert_supports_inplace())
True
With :class:`ivy.Array` input and default backend set as `numpy`:
>>> ivy.set_backend("numpy")
>>> x = ivy.array([1, 2, 3])
>>> print(x.assert_supports_inplace())
True
"""
return ivy.assert_supports_inplace(self)
def to_scalar(self: ivy.Array) -> Number:
"""ivy.Array instance method variant of ivy.to_scalar. This method
simply wraps the function, and so the docstring for ivy.to_scalar also
applies to this method with minimal changes.
Parameters
----------
self
input array.
Returns
-------
ret
a scalar copying the element of the array ``x``.
Examples
--------
With :class:`ivy.Array` instance method:
>>> x = ivy.array([3])
>>> y = x.to_scalar()
>>> print(y)
3
"""
return ivy.to_scalar(self)
def fourier_encode(
self: ivy.Array,
max_freq: Union[float, ivy.Array, ivy.NativeArray],
/,
*,
num_bands: int = 4,
linear: bool = False,
concat: bool = True,
flatten: bool = False,
) -> Union[ivy.Array, ivy.NativeArray, Tuple]:
"""ivy.Array instance method variant of ivy.fourier_encode. This method
simply wraps the function, and so the docstring for ivy.fourier_encode
also applies to this method with minimal changes.
Parameters
----------
self
input array to encode
max_freq
The maximum frequency of the encoding.
num_bands
The number of frequency bands for the encoding. Default is 4.
linear
Whether to space the frequency bands linearly as opposed to geometrically.
Default is ``False``.
concat
Whether to concatenate the position, sin and cos values, or return
separately. Default is ``True``.
flatten
Whether to flatten the position dimension into the batch dimension.
Default is ``False``.
Returns
-------
ret
New array with the final dimension expanded, and the encodings stored in
this channel.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = 1.5
>>> z = x.fourier_encode(y)
>>> print(z)
ivy.array([[ 1.0000000e+00, 1.2246468e-16, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, -1.0000000e+00, 1.0000000e+00, 1.0000000e+00,
1.0000000e+00],
[ 2.0000000e+00, -2.4492936e-16, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00,
1.0000000e+00],
[ 3.0000000e+00, 3.6739404e-16, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, -1.0000000e+00, 1.0000000e+00, 1.0000000e+00,
1.0000000e+00]])
>>> x = ivy.array([3, 10])
>>> y = 2.5
>>> z = x.fourier_encode(y, num_bands=3)
>>> print(z)
ivy.array([[ 3.0000000e+00, 3.6739404e-16, 3.6739404e-16, 3.6739404e-16,
-1.0000000e+00, -1.0000000e+00, -1.0000000e+00],
[ 1.0000000e+01, -1.2246468e-15, -1.2246468e-15, -1.2246468e-15,
1.0000000e+00, 1.0000000e+00, 1.0000000e+00]])
"""
return ivy.fourier_encode(
self,
max_freq,
num_bands=num_bands,
linear=linear,
concat=concat,
flatten=flatten,
)
def value_is_nan(self: ivy.Array, /, *, include_infs: bool = True) -> bool:
"""ivy.Array instance method variant of ivy.value_is_nan. This method
simply wraps the function, and so the docstring for ivy.value_is_nan
also applies to this method with minimal changes.
Parameters
----------
self
input array
include_infs
Whether to include infs and -infs in the check. Default is ``True``.
Returns
-------
ret
Boolean as to whether the input value is a nan or not.
Examples
--------
With one :class:`ivy.Array` instance method:
>>> x = ivy.array([92])
>>> y = x.value_is_nan()
>>> print(y)
False
>>> x = ivy.array([float('inf')])
>>> y = x.value_is_nan()
>>> print(y)
True
>>> x = ivy.array([float('nan')])
>>> y = x.value_is_nan()
>>> print(y)
True
>>> x = ivy.array([float('inf')])
>>> y = x.value_is_nan(include_infs=False)
>>> print(y)
False
"""
return ivy.value_is_nan(self, include_infs=include_infs)
def exists(self: ivy.Array, /) -> bool:
"""ivy.Array instance method variant of ivy.exists. This method simply
wraps the function, and so the docstring for ivy.exists also applies to
this method with minimal changes.
Parameters
----------
self
input array.
Returns
-------
ret
True if input is not None, else False.
Examples
--------
>>> x = ivy.array([1, 2, 3, 1.2])
>>> y = x.exists()
>>> print(y)
True
>>> x = ivy.array([])
>>> y = x.exists()
>>> print(y)
True
"""
return ivy.exists(self)
def default(
self: ivy.Array,
/,
default_val: Any,
*,
catch_exceptions: bool = False,
rev: bool = False,
with_callable: bool = False,
) -> Any:
"""ivy.Array instance method variant of ivy.default. This method simply
wraps the function, and so the docstring for ivy.default also applies
to this method with minimal changes.
Parameters
----------
self
input array
default_val
The default value.
catch_exceptions
Whether to catch exceptions from callable x. Default is ``False``.
rev
Whether to reverse the input x and default_val. Default is ``False``.
with_callable
Whether either of the arguments might be callable functions.
Default is ``False``.
Returns
-------
ret
x if x exists (is not None), else default.
Examples
--------
>>> x = ivy.array([1, 2, 3, 1.2])
>>> y = x.default(0)
>>> print(y)
ivy.array([1. , 2. , 3. , 1.2])
"""
return ivy.default(
self,
default_val,
catch_exceptions=catch_exceptions,
rev=rev,
with_callable=with_callable,
)
def stable_pow(
self: ivy.Array,
exponent: Union[Number, ivy.Array, ivy.NativeArray],
/,
*,
min_base: Optional[float] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.stable_pow. This method
simply wraps the function, and so the docstring for ivy.stable_pow also
applies to this method with minimal changes.
Parameters
----------
self
input array, used as the base.
exponent
The exponent number.
min_base
The minimum base to use, use global ivy.min_base by default.
Returns
-------
ret
The new item following the numerically stable power.
Examples
--------
With :class:`ivy.Array` instance method:
>>> x = ivy.asarray([2, 4])
>>> y = x.stable_pow(2)
>>> print(y)
ivy.array([ 4.00004, 16.00008])
>>> x = ivy.asarray([[2., 4.], [6., 8.]])
>>> y = ivy.asarray([2., 4.])
>>> z = x.stable_pow(y)
>>> print(z)
ivy.array([[4.00004000e+00, 2.56002560e+02],
[3.60001200e+01, 4.09602048e+03]])
"""
return ivy.stable_pow(self, exponent, min_base=min_base)
def inplace_update(
self: ivy.Array,
val: Union[ivy.Array, ivy.NativeArray],
/,
*,
ensure_in_backend: bool = False,
keep_input_dtype: bool = False,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.inplace_update. This method
simply wraps the function, and so the docstring for ivy.inplace_update
also applies to this method with minimal changes.
Parameters
----------
self
input array to update
val
The array to update the variable with.
ensure_in_backend
Whether to ensure that the `ivy.NativeArray` is also inplace updated.
In cases where it should be, backends which do not natively support inplace
updates will raise an exception.
keep_input_dtype
Whether or not to preserve `x` data type after the update, otherwise `val`
data type will be applied. Defaults to False.
Returns
-------
ret
The array following the in-place update.
Examples
--------
With :class:`ivy.Array` input and default backend set as `numpy`:
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([0])
>>> x.inplace_update(y)
>>> print(x)
ivy.array([0])
With :class:`ivy.Array` input and default backend set as `numpy`:
>>> x = ivy.array([1, 2, 3], dtype=ivy.float32)
>>> y = ivy.array([0, 0, 0], dtype=ivy.int32)
>>> x.inplace_update(y, keep_input_dtype=True)
>>> print(x)
ivy.array([0., 0., 0.])
With :class:`ivy.Array` input and default backend set as `torch`:
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([0])
>>> x.inplace_update(y)
>>> print(x)
ivy.array([0])
With :class:`ivy.Array` input and default backend set as `jax`:
>>> x = ivy.array([4, 5, 6])
>>> y = ivy.array([1])
>>> x.inplace_update(y)
IvyBackendException: jax: inplace_update: JAX does not natively
support inplace updates
"""
return ivy.inplace_update(
self,
val,
ensure_in_backend=ensure_in_backend,
keep_input_dtype=keep_input_dtype,
)
def inplace_increment(
self: ivy.Array, val: Union[ivy.Array, ivy.NativeArray]
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.inplace_increment. This
method wraps the function, and so the docstring for
ivy.inplace_increment also applies to this method with minimal changes.
Parameters
----------
self
The input array to be incremented by the defined value.
val
The value of increment.
Returns
-------
ret
The array following an in-place increment.
Examples
--------
With :class:`ivy.Array` instance methods:
>>> x = ivy.array([5.7, 4.3, 2.5, 1.9])
>>> y = x.inplace_increment(1)
>>> print(y)
ivy.array([6.7, 5.3, 3.5, 2.9])
>>> x = ivy.asarray([4., 5., 6.])
>>> y = x.inplace_increment(2.5)
>>> print(y)
ivy.array([6.5, 7.5, 8.5])
"""
return ivy.inplace_increment(self, val)
def clip_matrix_norm(
self: ivy.Array,
max_norm: float,
/,
*,
p: float = 2.0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.clip_matrix_norm. This
method simply wraps the function, and so the docstring for
ivy.clip_matrix_norm also applies to this method with minimal changes.
Parameters
----------
self
input array
max_norm
The maximum value of the array norm.
p
The p-value for computing the p-norm. Default is 2.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
An array with the matrix norm downscaled to the max norm if needed.
Examples
--------
With :class:`ivy.Array` instance method:
>>> x = ivy.array([[0., 1., 2.]])
>>> y = x.clip_matrix_norm(2.0)
>>> print(y)
ivy.array([[0. , 0.894, 1.79 ]])
"""
return ivy.clip_matrix_norm(self, max_norm, p=p, out=out)
def scatter_flat(
self: ivy.Array,
updates: Union[ivy.Array, ivy.NativeArray],
/,
*,
size: Optional[int] = None,
reduction: str = "sum",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.scatter_flat. This method
simply wraps the function, and so the docstring for ivy.scatter_flat
also applies to this method with minimal changes.
Parameters
----------
self
input array containing the indices where the new values will occupy
updates
Values for the new array to hold.
size
The size of the result. Default is `None`, in which case tensor
argument out must be provided.
reduction
The reduction method for the scatter, one of 'sum', 'min', 'max' or
'replace'
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
New array of given shape, with the values scattered at the indices.
Examples
--------
With :class:`ivy.Array` input:
>>> indices = ivy.array([0, 0, 1, 0, 2, 2, 3, 3])
>>> updates = ivy.array([5, 1, 7, 2, 3, 2, 1, 3])
>>> size = 8
>>> out = indices.scatter_flat(updates, size=size)
>>> print(out)
ivy.array([2, 7, 2, 3, 0, 0, 0, 0])
With :class:`ivy.Array` input:
>>> indices = ivy.array([0, 0, 1, 0, 2, 2, 3, 3])
>>> updates = ivy.array([5, 1, 7, 2, 3, 2, 1, 3])
>>> out = ivy.array([0, 0, 0, 0, 0, 0, 0, 0])
>>> indices.scatter_flat(updates, out=out)
>>> print(out)
ivy.array([8, 7, 5, 4, 0, 0, 0, 0])
"""
return ivy.scatter_flat(self, updates, size=size, reduction=reduction, out=out)
def get_num_dims(self: ivy.Array, /, *, as_array: bool = False) -> int:
"""ivy.Array instance method variant of ivy.shape. This method simply
wraps the function, and so the docstring for ivy.shape also applies to
this method with minimal changes.
Parameters
----------
self
input array to infer the number of dimensions for
as_array
Whether to return the shape as a array, default False.
Returns
-------
ret
Shape of the array
Examples
--------
>>> x = ivy.array([[0.,1.,1.],[1.,0.,0.],[8.,2.,3.]])
>>> b = x.get_num_dims()
>>> print(b)
2
>>> x = ivy.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]],\
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],\
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]])
>>> b = x.get_num_dims(as_array=False)
>>> print(b)
3
>>> b = x.get_num_dims(as_array=True)
>>> print(b)
ivy.array(3)
"""
return ivy.get_num_dims(self, as_array=as_array)
def isin(
self: ivy.Array,
test_elements: ivy.Array,
/,
*,
assume_unique: bool = False,
invert: bool = False,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.isin. This method simply
wraps the function, and so the docstring for ivy.isin also applies to
this method with minimal changes.
Parameters
----------
self
input array
test_elements
values against which to test for each input element
assume_unique
If True, assumes both elements and test_elements contain unique elements,
which can speed up the calculation. Default value is False.
invert
If True, inverts the boolean return array, resulting in True values for
elements not in test_elements. Default value is False.
Returns
-------
ret
output a boolean array of the same shape as elements that is True for
elements in test_elements and False otherwise.
Examples
--------
>>> x = ivy.array([[10, 7, 4], [3, 2, 1]])
>>> y = ivy.array([1, 2, 3])
>>> x.isin(y)
ivy.array([[False, False, False], [ True, True, True]])
>>> x = ivy.array([3, 2, 1, 0])
>>> y = ivy.array([1, 2, 3])
>>> x.isin(y, invert=True)
ivy.array([False, False, False, True])
"""
return ivy.isin(
self._data, test_elements, assume_unique=assume_unique, invert=invert
)
| ivy/ivy/data_classes/array/general.py/0 | {
"file_path": "ivy/ivy/data_classes/array/general.py",
"repo_id": "ivy",
"token_count": 21024
} | 9 |
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
from typing import Optional, Union, List, Dict, Literal
# ToDo: implement all methods here as public instance methods
# noinspection PyMissingConstructor
class _ContainerWithActivations(ContainerBase):
@staticmethod
def _static_relu(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.relu. This method simply
wraps the function, and so the docstring for ivy.relu also applies to
this method with minimal changes.
Parameters
----------
x
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the rectified linear activation unit function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = ivy.Container.static_relu(x)
>>> print(y)
{
a: ivy.array([1., 0.]),
b: ivy.array([0.40000001, 0.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"relu",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
def relu(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.relu. This method
simply wraps the function, and so the docstring for ivy.relu also
applies to this method with minimal changes.
Parameters
----------
self
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the rectified linear activation unit function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = x.relu()
>>> print(y)
{
a: ivy.array([1., 0.]),
b: ivy.array([0.40000001, 0.])
}
"""
return self._static_relu(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
@staticmethod
def _static_leaky_relu(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
alpha: ivy.Container = 0.2,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.leaky_relu. This method
simply wraps the function, and so the docstring for ivy.leaky_relu also
applies to this method with minimal changes.
Parameters
----------
x
input container.
alpha
array or scalar specifying the negative slope.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the leaky relu unit function applied element-wise.
Examples
--------
>>> x = x = ivy.Container(a=ivy.array([0.39, -0.85]), b=ivy.array([1., -0.2]))
>>> y = ivy.Container.static_leaky_relu(x)
>>> print(y)
{
a: ivy.array([0.38999999, -0.17]),
b: ivy.array([1., -0.04])
}
"""
return ContainerBase.cont_multi_map_in_function(
"leaky_relu",
x,
alpha=alpha,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
def leaky_relu(
self: ivy.Container,
/,
*,
alpha: ivy.Container = 0.2,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.leaky_relu. This method
simply wraps the function, and so the docstring for ivy.leaky_relu also
applies to this method with minimal changes.
Parameters
----------
self
input container.
alpha
array or scalar specifying the negative slope.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the leaky relu unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0.39, -0.85]), b=ivy.array([1., -0.2]))
>>> y = x.leaky_relu()
>>> print(y)
{
a: ivy.array([0.38999999, -0.17]),
b: ivy.array([1., -0.04])
}
"""
return self._static_leaky_relu(
self,
alpha=alpha,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
@staticmethod
def _static_gelu(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
approximate: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.gelu. This method simply
wraps the function, and so the docstring for ivy.gelu also applies to
this method with minimal changes.
Parameters
----------
x
input container.
approximate
whether to use the gelu approximation algorithm or exact formulation.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the gelu unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a =ivy.array([0.3, -0.1]))
>>> y = ivy.Container.static_gelu(x)
>>> print(y)
{
a: ivy.array([0.185, -0.046])
}
"""
return ContainerBase.cont_multi_map_in_function(
"gelu",
x,
approximate=approximate,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
def gelu(
self: ivy.Container,
/,
*,
approximate: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.gelu. This method
simply wraps the function, and so the docstring for ivy.gelu also
applies to this method with minimal changes.
Parameters
----------
self
input container.
approximate
whether to use the gelu approximation algorithm or exact formulation.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the gelu unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1., 2.]), b=ivy.array([-0.9, -1.]))
>>> y = x.gelu()
print(y)
{
a: ivy.array([0.841, 1.95]),
b: ivy.array([-0.166, -0.159])
}
"""
return self._static_gelu(
self,
approximate=approximate,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
@staticmethod
def _static_sigmoid(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.sigmoid. This method
simply wraps the function, and so the docstring for ivy.sigmoid also
applies to this method with minimal changes.
Parameters
----------
x
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the sigmoid unit function applied element-wise.
Examples
--------
>>> ivy.Container(a=ivy.array([-1., 1., 2.]), b=ivy.array([0.5, 0., -0.1]))
>>> y = ivy.Container.static_sigmoid(x)
>>> print(y)
{
a: ivy.array([0.2689414, 0.7310586, 0.88079703]),
b: ivy.array([0.62245935, 0.5, 0.4750208])
}
"""
return ContainerBase.cont_multi_map_in_function(
"sigmoid",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
def sigmoid(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.sigmoid. This method
simply wraps the function, and so the docstring for ivy.sigmoid also
applies to this method with minimal changes.
Parameters
----------
self
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the sigmoid unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1., 1., 2.]), b=ivy.array([0.5, 0., -0.1]))
>>> y = x.sigmoid()
>>> print(y)
{
a: ivy.array([0.2689414, 0.7310586, 0.88079703]),
b: ivy.array([0.62245935, 0.5, 0.4750208])
}
"""
return self._static_sigmoid(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
@staticmethod
def _static_softmax(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axis: Optional[ivy.Container] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.softmax. This method
simply wraps the function, and so the docstring for ivy.softmax also
applies to this method with minimal changes.
Parameters
----------
x
input container.
axis
the axis or axes along which the softmax should be computed
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the softmax unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, 0]), b=ivy.array([1.3, 0, -1.0]))
>>> y = ivy.Container.static_softmax(x)
>>> print(y)
{
a: ivy.array([0.7310586, 0.2689414]),
b: ivy.array([0.72844321, 0.19852395, 0.07303288])
}
"""
return ContainerBase.cont_multi_map_in_function(
"softmax",
x,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
def softmax(
self: ivy.Container,
/,
*,
axis: Optional[ivy.Container] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.softmax. This method
simply wraps the function, and so the docstring for ivy.softmax also
applies to this method with minimal changes.
Parameters
----------
self
input container.
axis
the axis or axes along which the softmax should be computed
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the softmax unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, 0]), b=ivy.array([1.3, 0, -1.0]))
>>> y = x.softmax()
>>> print(y)
{
a: ivy.array([0.7310586, 0.2689414]),
b: ivy.array([0.72844321, 0.19852395, 0.07303288])
}
"""
return self._static_softmax(
self,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
@staticmethod
def _static_softplus(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
beta: Optional[Union[int, float, ivy.Container]] = None,
threshold: Optional[Union[int, float, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.softplus. This method
simply wraps the function, and so the docstring for ivy.softplus also
applies to this method with minimal changes.
Parameters
----------
x
input container.
beta
The beta value for the softplus formation. Default: ``None``.
threshold
values above this revert to a linear function. Default: ``None``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the softplus unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-0.3461, -0.6491]), b=ivy.array([1., 0.]))
>>> y = ivy.Container.static_softplus(x)
>>> print(y)
{
a: ivy.array([0.53499615, 0.42036411]),
b: ivy.array([1.31326175, 0.69314718])
}
>>> x = ivy.Container(a=ivy.array([-1., 2., 4.]))
>>> y = ivy.Container.static_softplus(x, beta=0.5, threshold=2)
>>> print(y)
{
a: ivy.array([0.948, 2.63, 4.25])
}
"""
return ContainerBase.cont_multi_map_in_function(
"softplus",
x,
beta=beta,
threshold=threshold,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
def softplus(
self: ivy.Container,
/,
*,
beta: Optional[Union[int, float, ivy.Container]] = None,
threshold: Optional[Union[int, float, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.softplus. This method
simply wraps the function, and so the docstring for ivy.softplus also
applies to this method with minimal changes.
Parameters
----------
self
input container.
beta
The beta value for the softplus formation. Default: ``None``.
threshold
values above this revert to a linear function. Default: ``None``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the softplus unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-0.3461, -0.6491]))
>>> y = x.softplus()
>>> print(y)
{
a: ivy.array([0.535, 0.42])
}
>>> x = ivy.Container(a=ivy.array([-1., 2., 4.]))
>>> y = x.softplus(beta=0.5, threshold=2)
>>> print(y)
{
a: ivy.array([0.948, 2.63, 4.25])
}
"""
return self._static_softplus(
self,
beta=beta,
threshold=threshold,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
@staticmethod
def _static_log_softmax(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axis: Optional[ivy.Container] = -1,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.log_softmax. This method
simply wraps the function, and so the docstring for ivy.log_softmax
also applies to this method with minimal changes.
Parameters
----------
x
input container.
axis
the axis or axes along which the log_softmax should be computed
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the log_softmax unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1.0, -0.98, 2.3]))
>>> y = ivy.Container.static_log_softmax(x)
>>> print(y)
{
a: ivy.array([-3.37, -3.35, -0.0719])
}
>>> x = ivy.Container(a=ivy.array([1.0, 2.4]), b=ivy.array([-0.2, -1.0]))
>>> y = ivy.Container.static_log_softmax(x)
>>> print(y)
{
a: ivy.array([-1.62, -0.22]),
b: ivy.array([-0.371, -1.17])
}
"""
return ContainerBase.cont_multi_map_in_function(
"log_softmax",
x,
axis=axis,
complex_mode=complex_mode,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def log_softmax(
self: ivy.Container,
/,
*,
axis: Optional[ivy.Container] = -1,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
):
"""ivy.Container instance method variant of ivy.log_softmax. This
method simply wraps the function, and so the docstring for
ivy.log_softmax also applies to this method with minimal changes.
Parameters
----------
self
input container.
axis
the axis or axes along which the log_softmax should be computed
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the log_softmax unit function applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1.0, -0.98, 2.3]))
>>> y = x.log_softmax()
>>> print(y)
{
a: ivy.array([-3.37, -3.35, -0.0719])
}
>>> x = ivy.Container(a=ivy.array([1.0, 2.4]), b=ivy.array([-0.2, -1.0]))
>>> y = x.log_softmax()
>>> print(y)
{
a: ivy.array([-1.62, -0.22]),
b: ivy.array([-0.371, -1.17])
}
"""
return self._static_log_softmax(
self,
axis=axis,
complex_mode=complex_mode,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_mish(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.mish. This method simply
wraps the function, and so the docstring for ivy.mish also applies to
this method with minimal changes.
Parameters
----------
x
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the rectified linear activation unit function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = ivy.Container.static_mish(x)
>>> print(y)
{
a: ivy.array([0.86509842, -0.30883577]),
b: ivy.array([0.28903052, -0.10714479])
}
"""
return ContainerBase.cont_multi_map_in_function(
"mish",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
def mish(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.mish. This method
simply wraps the function, and so the docstring for ivy.mish also
applies to this method with minimal changes.
Parameters
----------
self
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the rectified linear activation unit function
applied element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> y = x.mish()
>>> print(y)
{
a: ivy.array([0.86509842, -0.30883577]),
b: ivy.array([0.28903052, -0.10714479])
}
"""
return self._static_mish(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
@staticmethod
def _static_hardswish(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.hardswish. This method
simply wraps the function, and so the docstring for ivy.hardswish also
applies to this method with minimal changes.
Parameters
----------
x
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the hardswish activation function applied
element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-3., 4., 5.]), b=ivy.array([0., 5.]))
>>> x = ivy.hardswish(x, out=x)
>>> x
{
a: ivy.array([-0., 4., 5.]),
b: ivy.array([0., 5.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"hardswish",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
def hardswish(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.hardswish. This method
simply wraps the function, and so the docstring for ivy.hardswish also
applies to this method with minimal changes.
Parameters
----------
self
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the hardswish activation function applied
element-wise.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-3., 4., 5.]), b=ivy.array([0., 5.]))
>>> x = ivy.hardswish(x, out=x)
>>> x
{
a: ivy.array([-0., 4., 5.]),
b: ivy.array([0., 5.])
}
"""
return self._static_hardswish(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
| ivy/ivy/data_classes/container/activations.py/0 | {
"file_path": "ivy/ivy/data_classes/container/activations.py",
"repo_id": "ivy",
"token_count": 21075
} | 10 |
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithGradientsExperimental(ContainerBase):
pass
| ivy/ivy/data_classes/container/experimental/gradients.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/gradients.py",
"repo_id": "ivy",
"token_count": 34
} | 11 |
# global
from typing import Optional, Tuple, Union, List, Callable, Dict, Sequence
# local
from ivy.data_classes.container.base import ContainerBase
import ivy
# ToDo: implement all methods here as public instance methods
# ToDo: update docstrings and typehints according to ivy\layers
# noinspection PyMissingConstructor
class _ContainerWithLayers(ContainerBase):
@staticmethod
def _static_linear(
x: ivy.Container,
weight: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
bias: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.linear. This method
simply wraps the function, and so the docstring for ivy.linear also
applies to this method with minimal changes.
Parameters
----------
x
The input x to compute linear transformation on.
*[outer_batch_shape,inner_batch_shape,in_features]*
weight
The weight matrix. *[outer_batch_shape,out_features,in_features]*
bias
The bias vector, default is ``None``. *[outer_batch_shape,out_features]*
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Result array of the linear transformation.
*[outer_batch_shape,inner_batch_shape,out_features]*
Examples
--------
>>> x = ivy.Container(a=ivy.array([[1.1, 2.2, 3.3], \
[11., 22., 33.]]), \
b=ivy.array([[1.245, 0.278, 4.105], \
[7., 13., 17.]]))
>>> w = ivy.array([[1., 2., 3.], \
[4., 5., 6.], \
[7., 8., 9.]])
>>> b = ivy.array([1., 0., -1.])
>>> y = ivy.Container.static_linear(x, w, bias=b)
>>> print(y)
{
a: ivy.array([[16.4, 35.2, 54.],
[155., 352., 549.]]),
b: ivy.array([[15.1, 31., 46.9],
[85., 195., 305.]])
}
>>> x = ivy.Container(a=ivy.array([[1.1, 2.2, 3.3], \
[.0, .1, .2]]), \
b=ivy.array([[1.245, 0.278, 4.105], \
[.7, .8, .9]]))
>>> w = ivy.Container(a=ivy.array([[1., 2., 3.]]), \
b=ivy.array([[.1, .2, .3]]))
>>> b = ivy.Container(a=ivy.array([1.]), b=ivy.array([-1.]))
>>> y = ivy.Container.static_linear(x, w, bias=b)
>>> print(y)
{
a: ivy.array([[16.4],
[1.8]]),
b: ivy.array([[0.412],
[-0.5]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"linear",
x,
weight,
bias=bias,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def linear(
self: ivy.Container,
weight: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
bias: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.linear. This method
simply wraps the function, and so the docstring for ivy.linear also
applies to this method with minimal changes.
Parameters
----------
self
The input container to compute linear transformation on.
*[outer_batch_shape,inner_batch_shape,in_features]*
weight
The weight matrix. *[outer_batch_shape,out_features,in_features]*
bias
The bias vector, default is ``None``. *[outer_batch_shape,out_features]*
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Result array of the linear transformation.
*[outer_batch_shape,inner_batch_shape,out_features]*
Examples
--------
>>> x = ivy.Container(a=ivy.array([[1.1, 2.2, 3.3],
... [11., 22., 33.]]),
... b=ivy.array([[1.245, 0.278, 4.105],
... [7., 13., 17.]]))
>>> w = ivy.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> b = ivy.Container(a=ivy.array([1., 0., -1.]),
... b=ivy.array([1., 1., 0.]))
>>> y = x.linear(w, bias=b, out=x)
>>> print(y)
{
a: ivy.array([[16.39999962, 35.19999695, 54.],
[155., 352., 549.]]),
b: ivy.array([[15.11600018, 32., 47.88399887],
[85., 196., 306.]])
}
"""
return self._static_linear(
self,
weight,
bias=bias,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_dropout(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
prob: Union[float, ivy.Container],
/,
*,
scale: Union[bool, ivy.Container] = True,
dtype: Optional[Union[ivy.Dtype, ivy.Container]] = None,
training: Union[bool, ivy.Container] = True,
seed: Optional[Union[int, ivy.Container]] = None,
noise_shape: Optional[Union[Sequence[int], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.dropout. This method
simply wraps the function, and so the docstring for ivy.dropout also
applies to this method with minimal changes.
Parameters
----------
x
The input container x to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
scale
Whether to scale the output by `1/(1-prob)`, default is ``True``.
dtype
Output array data type. If dtype is None, the output array data type
must be inferred from x. Default: ``None``.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
seed
Set a default seed for random number generating (for reproducibility).
Default is ``None``.
noise_shape
a sequence representing the shape of the binary dropout mask that will be
multiplied with the input.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Result array of the output after dropout is performed.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[1., 2., 3.], [4., 5., 6.]]),
... b=ivy.array([7., 8., 9.]))
>>> y = ivy.Container.static_dropout(x, 0.3)
>>> print(y)
{
a: ivy.array([[0., 0., 4.28571415],
[5.71428585, 7.14285755, 0.]]),
b: ivy.array([0., 11.4285717, 12.8571434])
}
"""
return ContainerBase.cont_multi_map_in_function(
"dropout",
x,
prob,
scale=scale,
dtype=dtype,
training=training,
seed=seed,
noise_shape=noise_shape,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def dropout(
self: ivy.Container,
prob: Union[float, ivy.Container],
/,
*,
scale: Union[bool, ivy.Container] = True,
dtype: Optional[Union[ivy.Dtype, ivy.Container]] = None,
training: Union[bool, ivy.Container] = True,
seed: Optional[Union[int, ivy.Container]] = None,
noise_shape: Optional[Union[Sequence[int], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.dropout. This method
simply wraps the function, and so the docstring for ivy.dropout also
applies to this method with minimal changes.
Parameters
----------
self
The input container to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
scale
Whether to scale the output by `1/(1-prob)`, default is ``True``.
dtype
output array data type. If dtype is None, the output array data type
must be inferred from x. Default: ``None``.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
seed
Set a default seed for random number generating (for reproducibility).
Default is ``None``.
noise_shape
a sequence representing the shape of the binary dropout mask that will be
multiplied with the input.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Result array of the output after dropout is performed.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[1., 2., 3.], [4., 5., 6.]]),
... b=ivy.array([7., 8., 9.]))
>>> y = x.dropout(0.3)
>>> print(y)
{
a: ivy.array([[0., 0., 4.28571415],
[5.71428585, 7.14285755, 0.]]),
b: ivy.array([0., 11.4285717, 12.8571434])
}
"""
return self._static_dropout(
self,
prob,
scale=scale,
dtype=dtype,
training=training,
seed=seed,
noise_shape=noise_shape,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_dropout1d(
x: ivy.Container,
prob: Union[float, ivy.Container],
/,
*,
training: Union[bool, ivy.Container] = True,
data_format: Union[str, ivy.Container] = "NWC",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.dropout1d. This method
simply wraps the function, and so the docstring for ivy.dropout1d also
applies to this method with minimal changes.
Parameters
----------
x
The input container to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
data_format
"NWC" or "NCW". Default is ``"NCW"``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Result container of the output after dropout is performed.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2, 3]).reshape([1, 1, 3]),
... b=ivy.array([4, 5, 6]).reshape([1, 1, 3]))
>>> y = ivy.Container.static_dropout1d(x, 0.5)
>>> print(y)
{
a: ivy.array([[[0., 4., 0.]]]),
b: ivy.array([[[0., 0., 12.]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"dropout1d",
x,
prob,
training=training,
data_format=data_format,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def dropout1d(
self: ivy.Container,
prob: Union[float, ivy.Container],
/,
*,
training: Union[bool, ivy.Container] = True,
data_format: Union[str, ivy.Container] = "NWC",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.dropout1d. This method
simply wraps the function, and so the docstring for ivy.dropout1d also
applies to this method with minimal changes.
Parameters
----------
self
The input container to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
data_format
"NWC" or "NCW". Default is ``"NCW"``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Result container of the output after dropout is performed.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2, 3]).reshape([1, 1, 3]),
... b=ivy.array([4, 5, 6]).reshape([1, 1, 3]))
>>> y = x.dropout1d(x, 0.5)
>>> print(y)
{
a: ivy.array([[[0., 4., 0.]]]),
b: ivy.array([[[0., 0., 12.]]])
}
"""
return self._static_dropout1d(
self,
prob,
training=training,
data_format=data_format,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_dropout2d(
x: ivy.Container,
prob: Union[float, ivy.Container],
/,
*,
training: Union[bool, ivy.Container] = True,
data_format: Union[str, ivy.Container] = "NHWC",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.dropout2d. This method
simply wraps the function, and so the docstring for ivy.dropout2d also
applies to this method with minimal changes.
Parameters
----------
x
The input container to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
data_format
"NHWC" or "NCHW". Default is ``"NHWC"``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Result container of the output after dropout is performed.
"""
return ContainerBase.cont_multi_map_in_function(
"dropout2d",
x,
prob,
training=training,
data_format=data_format,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def dropout2d(
self: ivy.Container,
prob: Union[float, ivy.Container],
/,
*,
training: Union[bool, ivy.Container] = True,
data_format: Union[str, ivy.Container] = "NHWC",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.dropout2d. This method
simply wraps the function, and so the docstring for ivy.dropout2d also
applies to this method with minimal changes.
Parameters
----------
self
The input container to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
data_format
"NHWC" or "NCHW". Default is ``"NHWC"``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Result container of the output after dropout is performed.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[100, 200, 300]]),
... b=ivy.array([[400, 500, 600]]))
>>> y = x.dropout2d(0.5)
>>> print(y)
{
a: ivy.array([[200., 0., 600.]]),
b: ivy.array([[0., 0., 0.]])
}
"""
return self._static_dropout2d(
self,
prob,
training=training,
data_format=data_format,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_dropout3d(
x: ivy.Container,
prob: Union[float, ivy.Container],
/,
*,
training: Union[bool, ivy.Container] = True,
data_format: Union[str, ivy.Container] = "NDHWC",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.dropout3d. This method
simply wraps the function, and so the docstring for ivy.dropout3d also
applies to this method with minimal changes.
Parameters
----------
x
The input container to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
data_format
"NDHWC" or "NCDHW". Default is ``"NDHWC"``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Result container of the output after dropout is performed.
"""
return ContainerBase.cont_multi_map_in_function(
"dropout3d",
x,
prob,
training=training,
data_format=data_format,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def dropout3d(
self: ivy.Container,
prob: Union[float, ivy.Container],
/,
*,
training: Union[bool, ivy.Container] = True,
data_format: Union[str, ivy.Container] = "NDHWC",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.dropout3d. This method
simply wraps the function, and so the docstring for ivy.dropout3d also
applies to this method with minimal changes.
Parameters
----------
self
The input container to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
data_format
"NDHWC" or "NCDHW". Default is ``"NDHWC"``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Result container of the output after dropout is performed.
"""
return self._static_dropout3d(
self,
prob,
training=training,
data_format=data_format,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_scaled_dot_product_attention(
query: Union[ivy.Array, ivy.NativeArray, ivy.Container],
key: Union[ivy.Array, ivy.NativeArray, ivy.Container],
value: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
scale: Union[float, ivy.Container],
mask: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
dropout_p: Optional[float] = 0.0,
is_causal: Optional[bool] = False,
training: Optional[bool] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of
ivy.scaled_dot_product_attention. This method simply wraps the
function, and so the docstring for ivy.scaled_dot_product_attention
also applies to this method with minimal changes.
Parameters
----------
query
The queries input container. The shape of queries input array leaves should
be in *[batch_shape,num_queries,feat_dim]*. The queries input array leaves
should have the same size as keys and values.
key
The keys input array container. The shape of keys input array leaves
should be in *[batch_shape,num_keys,feat_dim]*. The keys input array
leaves should have the same size as queries and values.
value
The values input array container. The shape of values input array
leaves should be in *[batch_shape,num_keys,feat_dim]*. The values
input array leaves should have the same size as queries and keys.
scale
The scale float value.
The scale float value is used to scale the query-key pairs before softmax.
mask
The mask input array/container. The mask to apply to the query-key values.
Default is None. The shape of mask input array leaves should be in
*[batch_shape,num_queries,num_keys]*.
dropout_p
Specifies the dropout probability, if greater than 0.0, dropout is applied
is_causal
If true, assumes causal attention masking and errors if both `mask` and
`is_causal` are set.
training
If True, dropout is used, otherwise dropout is not activated.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The output container following applications of scaled dot-product
attention. The output array is the weighted sum produced by the
attention score and value. The shape of output array is
*[batch_shape,num_queries,feat_dim]* .
Examples
--------
With :class:`ivy.Container` input:
>>> q = ivy.Container(a=ivy.array([[[0.2, 1.], [2.7, 3.], [4.4, 5.6]]]),
... b=ivy.array([[[1.2, 1.], [2.2, 3.], [4.4, 5.6]]]))
>>> k = ivy.Container(a=ivy.array([[[4.2, 1.], [2.2, 3.3],[4.4, 5.6]]]),
... b=ivy.array([[[3.2, 1.], [2.2, 3.6], [4.0, 5.6]]]))
>>> v = ivy.Container(a=ivy.array([[[5.2, 1.], [2.1, 3.],[4.4, 5.6]]]),
... b=ivy.array([[[0.2, 1.], [2.2, 3.],[4.4, 5.6]]]))
>>> mask =
... ivy.Container(a=ivy.array([[[1.0, 1.0, 1.0],
... [1.0, 1.0, 1.0],
... [1.0, 1.0,1.0]]]),
... b=ivy.array([[[1.0, 1.0, 1.0],
... [1.0, 1.0, 1.0],
... [1.0, 1.0,1.0]]]))
>>> result = ivy.Container.static_scaled_dot_product_attention(q,
... k,
... v,
... 1,
... mask=mask)
>>> print(result)
{
a: ivy.array([[[4.27, 5.4],
[4.4, 5.6],
[4.4, 5.6]]]),
b: ivy.array([[[4.35, 5.54],
[4.4, 5.6],
[4.4, 5.6]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"scaled_dot_product_attention",
query,
key,
value,
scale=scale,
mask=mask,
dropout_p=dropout_p,
is_causal=is_causal,
training=training,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def scaled_dot_product_attention(
self: ivy.Container,
key: Union[ivy.Array, ivy.NativeArray, ivy.Container],
value: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
scale: Union[float, ivy.Container],
mask: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
dropout_p: Optional[float] = 0.0,
is_causal: Optional[bool] = False,
training: Optional[bool] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of
ivy.scaled_dot_product_attention. This method simply wraps the
function, and so the docstring for ivy.scaled_dot_product_attention
also applies to this method with minimal changes.
Parameters
----------
self
The queries input container. The shape of queries input array leaves should
be in *[batch_shape,num_queries,feat_dim]*. The queries input array leaves
should have the same size as keys and values.
key
The keys input array container. The shape of keys input array leaves
should be in *[batch_shape,num_keys,feat_dim]*. The keys input array
leaves should have the same size as queries and values.
value
The values input array container. The shape of values input array
leaves should be in *[batch_shape,num_keys,feat_dim]*. The values
input array leaves should have the same size as queries and keys.
scale
The scale float value.
The scale float value is used to scale the query-key pairs before softmax.
mask
The mask input array/container. The mask to apply to the query-key values.
Default is None. The shape of mask input array leaves should be in
*[batch_shape,num_queries,num_keys]*.
dropout_p
Specifies the dropout probability, if greater than 0.0, dropout is applied
is_causal
If true, assumes causal attention masking and errors if both `mask` and
`is_causal` are set.
training
If True, dropout is used, otherwise dropout is not activated.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The output container following applications of scaled dot-product
attention. The output array is the weighted sum produced by the
attention score and value. The shape of output array is
*[batch_shape,num_queries,feat_dim]* .
Examples
--------
With :class:`ivy.Container` input:
>>> q = ivy.Container(a=ivy.array([[[0.2, 1.], [2.7, 3.], [4.4, 5.6]]]),
... b=ivy.array([[[1.2, 1.], [2.2, 3.], [4.4, 5.6]]]))
>>> k = ivy.Container(a=ivy.array([[[4.2, 1.], [2.2, 3.3], [4.4, 5.6]]]),
... b=ivy.array([[[3.2, 1.], [2.2, 3.6], [4.0, 5.6]]]))
>>> v = ivy.Container(a=ivy.array([[[5.2, 1.], [2.1, 3.], [4.4, 5.6]]]),
... b=ivy.array([[[0.2, 1.], [2.2, 3.], [4.4, 5.6]]]))
>>> result = ivy.scaled_dot_product_attention(q, k, v, scale=1, dropout_p=0.1,
... is_causal=True, training=True)
>>> print(result)
{
a: ivy.array([[[5.19999981, 1.],
[2.59249449, 2.68226194],
[4.4000001, 5.5999999]]]),
b: ivy.array([[[0.2, 1.],
[2.19603825, 2.9960382],
[4.4000001, 5.5999999]]])
}
>>> q = ivy.Container(a=ivy.array([[[0.2, 1.], [2.7, 3.], [4.4, 5.6]]]),
... b=ivy.array([[[1.2, 1.], [2.2, 3.], [4.4, 5.6]]]))
>>> k = ivy.Container(a=ivy.array([[[4.2, 1.], [2.2, 3.3], [4.4, 5.6]]]),
... b=ivy.array([[[3.2, 1.], [2.2, 3.6], [4.0, 5.6]]]))
>>> v = ivy.Container(a=ivy.array([[[5.2, 1.], [2.1, 3.], [4.4, 5.6]]]),
... b=ivy.array([[[0.2, 1.], [2.2, 3.], [4.4, 5.6]]]))
>>> mask =
... ivy.Container(a=ivy.array([[[1.0, 1.0, 1.0],
... [1.0, 1.0, 1.0],
... [1.0, 1.0, 1.0]]]),
... b=ivy.array([[[1.0, 1.0, 1.0],
... [1.0, 1.0, 1.0],
... [1.0, 1.0,1.0]]]))
>>> result = ivy.scaled_dot_product_attention(q,k,v,scale=1,mask=mask)
>>> print(result)
{
a: ivy.array([[[4.26894283, 5.40236187],
[4.39999437, 5.59999037],
[4.4000001, 5.5999999]]]),
b: ivy.array([[[4.35046196, 5.54282808],
[4.39989519, 5.5998764],
[4.4000001, 5.5999999]]])
}
"""
return self._static_scaled_dot_product_attention(
self,
key,
value,
scale=scale,
mask=mask,
dropout_p=dropout_p,
is_causal=is_causal,
training=training,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_multi_head_attention(
query: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
value: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
num_heads: Union[int, ivy.Container] = 8,
scale: Optional[Union[float, ivy.Container]] = None,
attention_mask: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
in_proj_weights: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
q_proj_weights: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
k_proj_weights: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
v_proj_weights: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
out_proj_weights: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
in_proj_bias: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
out_proj_bias: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
is_causal: Union[bool, ivy.Container] = False,
key_padding_mask: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
bias_k: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
bias_v: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
static_k: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
static_v: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
add_zero_attn: Union[bool, ivy.Container] = False,
return_attention_weights: Union[bool, ivy.Container] = False,
average_attention_weights: Union[bool, ivy.Container] = True,
dropout: Union[float, ivy.Container] = 0.0,
training: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"multi_head_attention",
query,
key=key,
value=value,
num_heads=num_heads,
scale=scale,
attention_mask=attention_mask,
in_proj_weights=in_proj_weights,
q_proj_weights=q_proj_weights,
k_proj_weights=k_proj_weights,
v_proj_weights=v_proj_weights,
out_proj_weights=out_proj_weights,
in_proj_bias=in_proj_bias,
out_proj_bias=out_proj_bias,
is_causal=is_causal,
key_padding_mask=key_padding_mask,
bias_k=bias_k,
bias_v=bias_v,
static_k=static_k,
static_v=static_v,
add_zero_attn=add_zero_attn,
return_attention_weights=return_attention_weights,
average_attention_weights=average_attention_weights,
dropout=dropout,
training=training,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def multi_head_attention(
self: ivy.Container,
/,
*,
key: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
value: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
num_heads: Union[int, ivy.Container] = 8,
scale: Optional[Union[float, ivy.Container]] = None,
attention_mask: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
in_proj_weights: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
q_proj_weights: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
k_proj_weights: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
v_proj_weights: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
out_proj_weights: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
in_proj_bias: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
out_proj_bias: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
is_causal: Union[bool, ivy.Container] = False,
key_padding_mask: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
bias_k: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
bias_v: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
static_k: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
static_v: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
add_zero_attn: Union[bool, ivy.Container] = False,
return_attention_weights: Union[bool, ivy.Container] = False,
average_attention_weights: Union[bool, ivy.Container] = True,
dropout: Union[float, ivy.Container] = 0.0,
training: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
return self._static_multi_head_attention(
self,
key=key,
value=value,
num_heads=num_heads,
scale=scale,
attention_mask=attention_mask,
in_proj_weights=in_proj_weights,
q_proj_weights=q_proj_weights,
k_proj_weights=k_proj_weights,
v_proj_weights=v_proj_weights,
out_proj_weights=out_proj_weights,
in_proj_bias=in_proj_bias,
out_proj_bias=out_proj_bias,
is_causal=is_causal,
key_padding_mask=key_padding_mask,
bias_k=bias_k,
bias_v=bias_v,
static_k=static_k,
static_v=static_v,
add_zero_attn=add_zero_attn,
return_attention_weights=return_attention_weights,
average_attention_weights=average_attention_weights,
dropout=dropout,
training=training,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_conv1d(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, Tuple[int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
data_format: str = "NWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int]] = 1,
dilations: Union[int, Tuple[int]] = 1,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
bias: Optional[ivy.Container] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.conv1d. This method
simply wraps the function, and so the docstring for ivy.conv1d also
applies to this method with minimal changes.
Parameters
----------
x
Input image *[batch_size,w, d_in]*.
filters
Convolution filters *[fw,d_in, d_out]*. (d_in must be the same as d from x)
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating the
per-dimension paddings.
data_format
"NWC" or "NCW". Defaults to "NWC".
filter_format
Either "channel_first" or "channel_last". Defaults to "channel_last".
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
dilations
The dilation factor for each dimension of input. (Default value = 1)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
bias
Bias array of shape *[d_out]*.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[[2., 3., 4.], [5., 6., 7.]]]),
... b=ivy.array([[[7., 8., 9.], [10., 11., 12]]]))
>>> filters = ivy.array([[[0., 0.5, 1.], [0.25, 0.5, 0.75], [-0.5, 0., 0.5 ]]])
>>> result= ivy.Container.static_conv1d(x,filters,(1,),'VALID')
>>> print(result)
{
... a: ivy.array([[[-1.25, 2.5, 6.25],
... [-2., 5.5, 13.]]]),
... b: ivy.array([[[-2.5, 7.5, 17.5],
... [-3.25, 10.5, 24.2]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"conv1d",
x,
filters,
strides,
padding,
data_format=data_format,
filter_format=filter_format,
x_dilations=x_dilations,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
bias=bias,
out=out,
)
def conv1d(
self: ivy.Container,
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, Tuple[int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
data_format: str = "NWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int]] = 1,
dilations: Union[int, Tuple[int]] = 1,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
bias: Optional[ivy.Container] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.conv1d. This method
simply wraps the function, and so the docstring for ivy.conv1d also
applies to this method with minimal changes.
Parameters
----------
self
Input image *[batch_size,w, d_in]*.
filters
Convolution filters *[fw,d_in, d_out]*. (d_in must be the same as d from x)
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating the
per-dimension paddings.
data_format
"NWC" or "NCW". Defaults to "NWC".
filter_format
Either "channel_first" or "channel_last". Defaults to "channel_last".
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
dilations
The dilation factor for each dimension of input. (Default value = 1)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
bias
Bias array of shape *[d_out]*.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> x = ivy.Container(a=ivy.array([[[2., 3., 4.], [5., 6., 7.]]]),
... b=ivy.array([[[7., 8., 9.], [10., 11., 12]]]))
>>> filters = ivy.array([[[0., 0.5, 1.], [0.25, 0.5, 0.75], [-0.5, 0., 0.5 ]]])
>>> result= x.conv1d(filters, (1,), 'VALID')
>>> print(result)
{
... a: ivy.array([[[-1.25, 2.5, 6.25],
... [-2., 5.5, 13.]]]),
... b: ivy.array([[[-2.5, 7.5, 17.5],
... [-3.25, 10.5, 24.2]]])
}
"""
return self._static_conv1d(
self,
filters,
strides,
padding,
data_format=data_format,
filter_format=filter_format,
x_dilations=x_dilations,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
bias=bias,
out=out,
)
@staticmethod
def _static_conv2d(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, Tuple[int, int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
data_format: str = "NHWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int, int]] = 1,
dilations: Union[int, Tuple[int, int]] = 1,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
bias: Optional[ivy.Container] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.conv2d. This method
simply wraps the function, and so the docstring for ivy.conv2d also
applies to this method with minimal changes.
Parameters
----------
x
Input image *[batch_size,h,w,d_in]*.
filters
Convolution filters *[fh,fw,d_in,d_out]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> x = ivy.Container(a = ivy.eye(3, 3).reshape((1, 3, 3, 1)),
... b = ivy.eye(5, 5).reshape((1, 5, 5, 1)))
>>> filters = ivy.array([[2., 0., 1.],
... [1., 3., 1.],
... [0., 1., 1.]]).reshape((3, 3, 1, 1))
>>> result = ivy.Container.static_conv2d(x, filters, (2,), 'SAME')
>>> print(result)
{
a:ivy.array([[[[4.],[0.]],[[1.],[5.]]]]),
b:ivy.array([[[[4.],[0.],[0.]],[[1.],[6.],[0.]],[[0.],[1.],[5.]]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"conv2d",
x,
filters,
strides,
padding,
data_format=data_format,
filter_format=filter_format,
x_dilations=x_dilations,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
bias=bias,
out=out,
)
def conv2d(
self: ivy.Container,
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, Tuple[int, int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
data_format: str = "NHWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int, int]] = 1,
dilations: Union[int, Tuple[int, int]] = 1,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
bias: Optional[ivy.Container] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of `ivy.conv2d`. This method
simply wraps the function, and so the docstring for `ivy.conv2d` also
applies to this method with minimal changes.
Parameters
----------
self
Input image *[batch_size,h,w,d_in]*.
filters
Convolution filters *[fh,fw,d_in,d_out]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> x = ivy.Container(a = ivy.eye(3, 3).reshape((1, 3, 3, 1)),
... b = ivy.eye(5, 5).reshape((1, 5, 5, 1)))
>>> filters = ivy.array([[2, 0, 1],
... [1, 3, 1],
... [0, 1, 1]], dtype=ivy.float32).reshape((3, 3, 1, 1))
>>> result = x.conv2d(filters, 2, 'SAME')
>>> print(result)
{
a:ivy.array([[[[4.],[0.]],[[1.],[5.]]]]),
b:ivy.array([[[[4.],[0.],[0.]],[[1.],[6.],[0.]],[[0.],[1.],[5.]]]])
}
"""
return self._static_conv2d(
self,
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
filter_format=filter_format,
x_dilations=x_dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
bias=bias,
out=out,
)
@staticmethod
def _static_conv1d_transpose(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, Tuple[int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
output_shape: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
filter_format: str = "channel_last",
data_format: str = "NWC",
dilations: Union[int, Tuple[int]] = 1,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
bias: Optional[ivy.Container] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.conv1d_transpose. This
method simply wraps the function, and so the docstring for
ivy.conv1d_transpose also applies to this method with minimal changes.
Parameters
----------
x
Input image *[batch_size,w,d_in]* or *[batch_size,d_in,w]*.
filters
Convolution filters *[fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’ (no
padding), or a sequence of n (low, high) integer pairs that give the padding
to apply before and after each spatial dimension.
output_shape
Shape of the output (Default value = None)
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IOW",input data formats, while "channel_last" corresponds to "WOI".
data_format
The ordering of the dimensions in the input, one of "NWC" or "NCW". "NWC"
corresponds to input with shape (batch_size, width, channels), while "NCW"
corresponds to input with shape (batch_size, channels, width).
dilations
The dilation factor for each dimension of input. (Default value = 1)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
bias
Bias array of shape *[d_out]*.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the transpose convolution operation.
Examples
--------
>>> x = ivy.Container(a=ivy.random_normal(mean=0, std=1, shape=[1, 28, 3]),
... b=ivy.random_normal(mean=0, std=1, shape=[1, 56, 3]))
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 6, 3])
>>> y = ivy.Container.static_conv1d_transpose(x, filters, 2, 'SAME')
>>> print(y.shape)
{
a: [1,56,6],
b: [1,112,6]
}
"""
return ContainerBase.cont_multi_map_in_function(
"conv1d_transpose",
x,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
bias=bias,
out=out,
)
def conv1d_transpose(
self: ivy.Container,
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
output_shape: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
filter_format: str = "channel_last",
data_format: str = "NWC",
dilations: int = 1,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
bias: Optional[ivy.Container] = None,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> Union[ivy.Array, ivy.NativeArray, ivy.Container]:
"""ivy.Container instance method variant of ivy.conv1d_transpose. This
method simply wraps the function, and so the docstring for
ivy.conv1d_transpose also applies to this method with minimal changes.
Parameters
----------
self
Input image *[batch_size,w,d_in]* or *[batch_size,d_in,w]*.
filters
Convolution filters *[fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’ (no
padding), or a sequence of n (low, high) integer pairs that give the padding
to apply before and after each spatial dimension.
output_shape
Shape of the output (Default value = None)
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IOW",input data formats, while "channel_last" corresponds to "WOI".
data_format
The ordering of the dimensions in the input, one of "NWC" or "NCW". "NWC"
corresponds to input with shape (batch_size, width, channels), while "NCW"
corresponds to input with shape (batch_size, channels, width).
dilations
The dilation factor for each dimension of input. (Default value = 1)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
bias
Bias array of shape *[d_out]*.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the transpose convolution operation.
Examples
--------
>>> x = ivy.Container(a=ivy.random_normal(mean=0, std=1, shape=[1, 28, 3]),
... b=ivy.random_normal(mean=0, std=1, shape=[1, 56, 3]))
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 6, 3])
>>> y = x.conv1d_transpose(filters, 2, 'SAME')
>>> print(y.shape)
{
a: ivy.Shape(1, 56, 6),
b: ivy.Shape(1, 112, 6)
}
"""
return self._static_conv1d_transpose(
self,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
bias=bias,
out=out,
)
@staticmethod
def _static_conv2d_transpose(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, Tuple[int, int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
output_shape: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
filter_format: str = "channel_last",
data_format: str = "NHWC",
dilations: Union[int, Tuple[int, int]] = 1,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
bias: Optional[ivy.Container] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.conv2d_transpose. This
method simply wraps the function, and so the docstring for ivy.conv2d
also applies to this method with minimal changes.
Parameters
----------
x
Input image *[batch_size,h,w,d_in]*.
filters
Convolution filters *[fh,fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
output_shape
Shape of the output (Default value = None)
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IOHW",input data formats, while "channel_last" corresponds to "HWOI".
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> a = ivy.random_normal(mean=0, std=1, shape=[1, 14, 14, 3])
>>> b = ivy.random_normal(mean=0, std=1, shape=[1, 28, 28, 3])
>>> c = ivy.random_normal(mean=0, std=1, shape=[3, 3, 6, 3])
>>> d = ivy.random_normal(mean=0, std=1, shape=[3, 3, 6, 3])
>>> x = ivy.Container(a=a, b=b)
>>> filters = ivy.Container(c=c, d=d)
>>> y = ivy.Container.static_conv2d_transpose(x, filters, 2, 'SAME')
>>> print(y.shape)
{
a: {
c: [1,28,28,6],
d: [1,28,28,6]
},
b: {
c: [1,56,56,6],
d: [1,56,56,6]
}
}
"""
return ContainerBase.cont_multi_map_in_function(
"conv2d_transpose",
x,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
bias=bias,
out=out,
)
def conv2d_transpose(
self: ivy.Container,
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, Tuple[int, int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
output_shape: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
filter_format: str = "channel_last",
data_format: str = "NHWC",
dilations: Union[int, Tuple[int, int]] = 1,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
bias: Optional[ivy.Container] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.conv2d_transpose. This
method simply wraps the function, and so the docstring for ivy.conv2d
also applies to this method with minimal changes.
Parameters
----------
self
Input image *[batch_size,h,w,d_in]*.
filters
Convolution filters *[fh,fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
output_shape
Shape of the output (Default value = None)
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IOHW",input data formats, while "channel_last" corresponds to "HWOI".
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> a = ivy.random_normal(mean=0, std=1, shape=[1, 14, 14, 3])
>>> b = ivy.random_normal(mean=0, std=1, shape=[1, 28, 28, 3])
>>> c = ivy.random_normal(mean=0, std=1, shape=[6, 3, 3, 3])
>>> d = ivy.random_normal(mean=0, std=1, shape=[6, 3, 3, 3])
>>> x = ivy.Container(a=a, b=b)
>>> filters = ivy.Container(c=c, d=d)
>>> y = x.conv2d_transpose(filters,2,'SAME')
>>> print(y.shape)
{
a: {
c: ivy.Shape(1, 28, 28, 3),
d: ivy.Shape(1, 28, 28, 3)
},
b: {
c: ivy.Shape(1, 56, 56, 3),
d: ivy.Shape(1, 56, 56, 3)
},
c: {
c: ivy.Shape(6, 6, 6, 3),
d: ivy.Shape(6, 6, 6, 3)
},
d: {
c: ivy.Shape(6, 6, 6, 3),
d: ivy.Shape(6, 6, 6, 3)
}
}
"""
return self._static_conv2d_transpose(
self,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
bias=bias,
out=out,
)
@staticmethod
def _static_depthwise_conv2d(
x: ivy.Container,
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, Tuple[int], Tuple[int, int], ivy.Container],
padding: Union[str, List[int], ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NHWC",
dilations: Union[int, Tuple[int], Tuple[int, int], ivy.Container] = 1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.depthwise_conv2d. This
method simply wraps the function, and so the docstring for
ivy.depthwise_conv2d also applies to this method with minimal changes.
Parameters
----------
x
Input image *[batch_size,h,w,d]*.
filters
Convolution filters *[fh,fw,d_in]*. (d_in must be the same as d from x)
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating the
per-dimension paddings.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> a = ivy.randint(0, 255, shape=(1, 128, 128, 3)).astype(ivy.float32) / 255.0
>>> b = ivy.randint(0, 255, shape=(1, 128, 128, 3)).astype(ivy.float32) / 255.0
>>> inp = ivy.Container(a=a, b=b)
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3])
>>> y = ivy.Container.static_depthwise_conv2d(
... inp,
... filters,
... strides=2,
... padding='SAME')
>>> print(y.shape)
[1, 64, 64, 3]
"""
return ContainerBase.cont_multi_map_in_function(
"depthwise_conv2d",
x,
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def depthwise_conv2d(
self: ivy.Container,
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, Tuple[int], Tuple[int, int], ivy.Container],
padding: Union[str, List[int], ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NHWC",
dilations: Union[int, Tuple[int], Tuple[int, int], ivy.Container] = 1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.depthwise_conv2d. This
method simply wraps the function, and so the docstring for
ivy.depthwise_conv2d also applies to this method with minimal changes.
Parameters
----------
self
Input image *[batch_size,h,w,d]*.
filters
Convolution filters *[fh,fw,d_in]*. (d_in must be the same as d from self)
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating the
per-dimension paddings.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> a = ivy.randint(0, 255, shape=(1, 128, 128, 3)).astype(ivy.float32) / 255.0
>>> b = ivy.randint(0, 255, shape=(1, 128, 128, 3)).astype(ivy.float32) / 255.0
>>> inp = ivy.Container(a=a, b=b)
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3])
>>> y = inp.depthwise_conv2d(filters, 2, 'SAME')
>>> print(y.shape)
[1, 64, 64, 3]
"""
return self._static_depthwise_conv2d(
self,
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_conv3d(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, Tuple[int, int, int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
data_format: str = "NDHWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int, int, int]] = 1,
dilations: Union[int, Tuple[int, int, int]] = 1,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
bias: Optional[ivy.Container] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.conv3d. This method
simply wraps the function, and so the docstring for ivy.conv3d also
applies to this method with minimal changes.
Parameters
----------
x
Input volume *[batch_size,d,h,w,d_in]*.
filters
Convolution filters *[fdfh,fw,d_in,d_out]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
"NDHWC" or "NCDHW". Defaults to "NDHWC".
filter_format
Either "channel_first" or "channel_last". Defaults to "channel_last".
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> x = ivy.Container(a = ivy.full((1, 2, 3, 3, 1),0.5),\
b = ivy.full((1, 2, 5, 5, 1),1.))
>>> filters = ivy.ones((3, 3, 3, 1, 1))
>>> result = ivy.Container.static_conv3d(x, filters, 2, 'SAME')
>>> print(result)
{
a: ivy.array([[[[[4.],[4.]],[[4.],[4.]]]]]),
b: ivy.array([[[[[8.],[12.],[8.]],[[12.],[18.],[12.]],[[8.],[12.],[8.]]]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"conv3d",
x,
filters,
strides,
padding,
data_format=data_format,
filter_format=filter_format,
x_dilations=x_dilations,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
bias=bias,
out=out,
)
def conv3d(
self: ivy.Container,
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, Tuple[int, int, int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
data_format: str = "NDHWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int, int, int]] = 1,
dilations: Union[int, Tuple[int, int, int]] = 1,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
bias: Optional[ivy.Container] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.conv3d. This method
simply wraps the function, and so the docstring for ivy.conv3d also
applies to this method with minimal changes.
Parameters
----------
x
Input volume *[batch_size,d,h,w,d_in]*.
filters
Convolution filters *[fdfh,fw,d_in,d_out]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
"NDHWC" or "NCDHW". Defaults to "NDHWC".
filter_format
Either "channel_first" or "channel_last". Defaults to "channel_last".
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Examples
--------
>>> x = ivy.Container(a = ivy.full((1, 2, 3, 3, 1),0.5),\
b = ivy.full((1, 2, 5, 5, 1),1.))
>>> filters = ivy.ones((3, 3, 3, 1, 1))
>>> result = x.conv3d(filters, 2, 'SAME')
>>> print(result)
{
a: ivy.array([[[[[4.],[4.]],[[4.],[4.]]]]]),
b: ivy.array([[[[[8.],[12.],[8.]],[[12.],[18.],[12.]],[[8.],[12.],[8.]]]]])
}
"""
return self._static_conv3d(
self,
filters,
strides,
padding,
data_format=data_format,
filter_format=filter_format,
x_dilations=x_dilations,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
bias=bias,
out=out,
)
@staticmethod
def _static_conv3d_transpose(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[
int, Tuple[int], Tuple[int, int], Tuple[int, int, int], ivy.Container
],
padding: Union[str, List[int], ivy.Container],
/,
*,
output_shape: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
filter_format: str = "channel_last",
data_format: str = "NDHWC",
dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
bias: Optional[ivy.Container] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.conv3d_transpose. This
method simply wraps the function, and so the docstring for
ivy.conv3d_transpose also applies to this method with minimal changes.
Parameters
----------
x
Input container with leaves of volume *[batch_size,d,h,w,d_in]*
or *[batch_size,d_in,d,h,w]*.
filters
Convolution filters *[fd,fh,fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
output_shape
Shape of the output (Default value = None)
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IODHW",input data formats, while "channel_last" corresponds to "DHWOI".
data_format
The ordering of the dimensions in the input, one of "NDHWC" or
"NCDHW". "NDHWC" corresponds to inputs with shape (batch_size,
depth, height, width, channels), while "NCDHW" corresponds
to input with shape (batch_size, channels, depth, height,
width).
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output container, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
The result of the transpose convolution operation in a container.
>>> a = ivy.random_normal(mean=0, std=1, shape=[1, 3, 14, 14, 3])
>>> b = ivy.random_normal(mean=0, std=1, shape=[1, 3, 28, 28, 3]))
>>> c = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 6, 3])
>>> d = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 6, 3]))
>>> x = ivy.Container(a=a, b=b)
>>> filters = ivy.Container(c=c, d=d)
>>> y = ivy.Container.static_conv3d_transpose(x, filters, 2, 'SAME')
>>> print(y.shape)
{
a: {
c: [1, 6, 28, 28, 6],
d: [1, 6, 28, 28, 6]
},
b: {
c: [1, 6, 56, 56, 6],
d: [1, 6, 56, 56, 6]
}
}
"""
return ContainerBase.cont_multi_map_in_function(
"conv3d_transpose",
x,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
bias=bias,
out=out,
)
def conv3d_transpose(
self: ivy.Container,
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[
int, Tuple[int], Tuple[int, int], Tuple[int, int, int], ivy.Container
],
padding: Union[str, List[int], ivy.Container],
/,
*,
output_shape: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
filter_format: str = "channel_last",
data_format: str = "NDHWC",
dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
bias: Optional[ivy.Container] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.conv3d_transpose. This
method simply wraps the function, and so the docstring for
ivy.conv3d_transpose also applies to this method with minimal changes.
Parameters
----------
self
Input container with leaves of volume *[batch_size,d,h,w,d_in]*
or *[batch_size,d_in,d,h,w]*.
filters
Convolution filters *[fd,fh,fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
output_shape
Shape of the output (Default value = None)
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IODHW",input data formats, while "channel_last" corresponds to "DHWOI".
data_format
The ordering of the dimensions in the input, one of "NDHWC" or
"NCDHW". "NDHWC" corresponds to inputs with shape (batch_size,
depth, height, width, channels), while "NCDHW" corresponds
to input with shape (batch_size, channels, depth, height,
width).
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output container, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
The result of the transpose convolution operation in a container.
Examples
--------
>>> x = ivy.Container(a = ivy.ones((1, 3, 3, 3, 1)).astype(ivy.float32) )
>>> filters = ivy.ones((3, 3, 3, 1, 1)).astype(ivy.float32)
>>> result = x.conv3d(filters, 2, 'SAME')
>>> print(result)
{
a: ivy.array([[[[[8.],
[8.]],
[[8.],
[8.]]],
[[[8.],
[8.]],
[[8.],
[8.]]]]])
}
"""
return self._static_conv3d_transpose(
self,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
bias=bias,
out=out,
)
@staticmethod
def _static_lstm_update(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
init_h: Union[ivy.Array, ivy.NativeArray, ivy.Container],
init_c: Union[ivy.Array, ivy.NativeArray, ivy.Container],
kernel: Union[ivy.Array, ivy.NativeArray, ivy.Container],
recurrent_kernel: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
bias: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
recurrent_bias: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, ivy.Container]:
return ContainerBase.cont_multi_map_in_function(
"lstm_update",
x,
init_h,
init_c,
kernel,
recurrent_kernel,
bias=bias,
recurrent_bias=recurrent_bias,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def lstm_update(
self: ivy.Container,
init_h: Union[ivy.Array, ivy.NativeArray, ivy.Container],
init_c: Union[ivy.Array, ivy.NativeArray, ivy.Container],
kernel: Union[ivy.Array, ivy.NativeArray, ivy.Container],
recurrent_kernel: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
bias: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
recurrent_bias: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> Tuple[ivy.Container, ivy.Container]:
"""ivy.Container instance method variant of ivy.lstm_update. This
method simply wraps the function, and so the docstring for
ivy.lstm_update also applies to this method with minimal changes.
Parameters
----------
init_h
initial state tensor for the cell output *[batch_shape, out]*.
init_c
initial state tensor for the cell hidden state *[batch_shape, out]*.
kernel
weights for cell kernel *[in, 4 x out]*.
recurrent_kernel
weights for cell recurrent kernel *[out, 4 x out]*.
bias
bias for cell kernel *[4 x out]*. (Default value = None)
recurrent_bias
bias for cell recurrent kernel *[4 x out]*. (Default value = None)
Returns
-------
ret
hidden state for all timesteps *[batch_shape,t,out]* and cell state for last
timestep *[batch_shape,out]*
Examples
--------
>>> x = ivy.Container(
... a=ivy.random_normal(shape=(5, 20, 3)),
... b=ivy.random_normal(shape=(5, 20, 3))
... )
>>> h_i = ivy.random_normal(shape=(5, 6))
>>> c_i = ivy.random_normal(shape=(5, 6))
>>> kernel = ivy.random_normal(shape=(3, 4 * 6))
>>> rc = ivy.random_normal(shape=(6, 4 * 6))
>>> x.lstm_update(h_i, c_i, kernel, rc)
{
a: (tuple(2), <class ivy.array.array.Array>, shape=[5, 20, 6]),
b: (tuple(2), <class ivy.array.array.Array>, shape=[5, 20, 6])
}
"""
return self._static_lstm_update(
self,
init_h,
init_c,
kernel,
recurrent_kernel,
bias=bias,
recurrent_bias=recurrent_bias,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def _static_reduce_window(
operand: Union[ivy.Array, ivy.NativeArray, ivy.Container],
init_value: Union[int, float, ivy.Container],
computation: Union[Callable, ivy.Container],
window_dimensions: Union[int, Sequence[int], ivy.Container],
/,
*,
window_strides: Union[int, Sequence[int], ivy.Container] = 1,
padding: Union[str, int, Sequence[Tuple[int, int]], ivy.Container] = "VALID",
base_dilation: Union[int, Sequence[int], ivy.Container] = 1,
window_dilation: Union[int, Sequence[int], ivy.Container] = 1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"reduce_window",
operand,
init_value,
computation,
window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def reduce_window(
self: ivy.Container,
init_value: Union[int, float, ivy.Container],
computation: Union[Callable, ivy.Container],
window_dimensions: Union[int, Sequence[int], ivy.Container],
/,
*,
window_strides: Union[int, Sequence[int], ivy.Container] = 1,
padding: Union[str, int, Sequence[Tuple[int, int]], ivy.Container] = "VALID",
base_dilation: Union[int, Sequence[int], ivy.Container] = 1,
window_dilation: Union[int, Sequence[int], ivy.Container] = 1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.reduce_window. This
method simply wraps the function, and so the docstring for
ivy.reduce_window also applies to this method with minimal changes.
Parameters
----------
self
A container representing the base areas on which the window is going to
slide over.
init_value
The starting value for the reduction.
computation
The reduction function to apply to elements in each window.
window_dimensions
A sequence containing the window dimensions.
window_strides
A sequence containing the window strides.
padding
Either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’ (no
padding), or a sequence of n (low, high) integer pairs that give the padding
to apply before and after each spatial dimension.
base_dilation
A sequence containing the base dilation values.
window_dilation
A sequence containing the window dilation values.
Returns
-------
ret
The result of the pooling-like operation.
Examples
--------
>>> x = ivy.Container(
... a=ivy.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]]),
... b=ivy.array([[13, 14, 15, 16],
... [17, 18, 19, 20],
... [21, 22, 23, 24]])
... )
>>> x.reduce_window(0, ivy.sum, (2, 2))
{
a: ivy.array([[21 25 29]
[33 37 41]
[45 49 53]]),
b: ivy.array([[63 67 71]
[75 79 83]
[87 91 95]])
}
"""
return self._static_reduce_window(
self,
init_value,
computation,
window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
| ivy/ivy/data_classes/container/layers.py/0 | {
"file_path": "ivy/ivy/data_classes/container/layers.py",
"repo_id": "ivy",
"token_count": 51957
} | 12 |
# local
from .base import FactorizedTensor
import ivy
# global
import warnings
class TRTensor(FactorizedTensor):
def __init__(self, factors):
super().__init__()
shape, rank = TRTensor.validate_tr_tensor(factors)
self.shape = tuple(shape)
self.rank = tuple(rank)
self.factors = factors
# Built-ins #
# ----------#
def __getitem__(self, index):
return self.factors[index]
def __setitem__(self, index, value):
self.factors[index] = value
def __iter__(self):
for index in range(len(self)):
yield self[index]
def __len__(self):
return len(self.factors)
def __repr__(self):
message = (
f"factors list : rank-{self.rank} tensor ring tensor of shape {self.shape}"
)
return message
# Public Methods #
# ---------------#
def to_tensor(self):
return TRTensor.tr_to_tensor(self.factors)
def to_unfolded(self, mode):
return TRTensor.tr_to_unfolded(self.factors, mode)
def to_vec(self):
return TRTensor.tr_to_vec(self.factors)
# Properties #
# ---------------#
@property
def n_param(self):
factors = self.factors
total_params = sum(int(ivy.prod(tensor.shape)) for tensor in factors)
return total_params
# Class Methods #
# ---------------#
@staticmethod
def validate_tr_tensor(factors):
n_factors = len(factors)
if n_factors < 2:
raise ValueError(
"A Tensor Ring tensor should be composed of at least two factors."
f"However, {n_factors} factor was given."
)
rank = []
shape = []
next_rank = None
for index, factor in enumerate(factors):
current_rank, current_shape, next_rank = ivy.shape(factor)
# Check that factors are third order tensors
if len(factor.shape) != 3:
raise ValueError(
"TR expresses a tensor as third order factors (tr-cores).\n"
f"However, ivy.ndim(factors[{index}]) = {len(factor.shape)}"
)
# Consecutive factors should have matching ranks
if ivy.shape(factors[index - 1])[2] != current_rank:
raise ValueError(
"Consecutive factors should have matching ranks\n -- e.g."
" ivy.shape(factors[0])[2]) == ivy.shape(factors[1])[0])\nHowever,"
f" ivy.shape(factor[{index-1}])[2] =="
f" {ivy.shape(factors[index-1])[2]} but"
f" ivy.shape(factor[{index}])[0] == {current_rank}"
)
shape.append(current_shape)
rank.append(current_rank)
# Add last rank (boundary condition)
rank.append(next_rank)
return tuple(shape), tuple(rank)
@staticmethod
def tr_to_tensor(factors):
full_shape = [f.shape[1] for f in factors]
full_tensor = ivy.reshape(factors[0], (-1, factors[0].shape[2]))
for factor in factors[1:-1]:
rank_prev, _, rank_next = factor.shape
factor = ivy.reshape(factor, (rank_prev, -1))
full_tensor = ivy.dot(full_tensor, factor)
full_tensor = ivy.reshape(full_tensor, (-1, rank_next))
full_tensor = ivy.reshape(
full_tensor, (factors[-1].shape[2], -1, factors[-1].shape[0])
)
full_tensor = ivy.moveaxis(full_tensor, 0, -1)
full_tensor = ivy.reshape(
full_tensor, (-1, factors[-1].shape[0] * factors[-1].shape[2])
)
factor = ivy.moveaxis(factors[-1], -1, 1)
factor = ivy.reshape(factor, (-1, full_shape[-1]))
full_tensor = ivy.dot(full_tensor, factor)
return ivy.reshape(full_tensor, full_shape)
@staticmethod
def tr_to_unfolded(factors, mode):
return ivy.unfold(TRTensor.tr_to_tensor(factors), mode)
@staticmethod
def tr_to_vec(factors):
return ivy.reshape(
TRTensor.tr_to_tensor(factors),
(-1,),
)
@staticmethod
def validate_tr_rank(tensor_shape, rank="same", rounding="round"):
if rounding == "ceil":
rounding_fun = ivy.ceil
elif rounding == "floor":
rounding_fun = ivy.floor
elif rounding == "round":
rounding_fun = ivy.round
else:
raise ValueError(
f"Rounding should be round, floor or ceil, but got {rounding}"
)
if rank == "same":
rank = float(1)
n_dim = len(tensor_shape)
if n_dim == 2:
warnings.warn(
"Determining the TR-rank for the trivial case of a matrix"
f" (order 2 tensor) of shape {tensor_shape}, not a higher-order tensor."
)
if isinstance(rank, float):
# Choose the *same* rank for each mode
n_param_tensor = ivy.prod(tensor_shape) * rank
# R_k I_k R_{k+1} = R^2 I_k
solution = int(
rounding_fun(ivy.sqrt(n_param_tensor / ivy.sum(tensor_shape)))
)
rank = (solution,) * (n_dim + 1)
else:
# Check user input for potential errors
n_dim = len(tensor_shape)
if isinstance(rank, int):
rank = (rank,) * (n_dim + 1)
elif n_dim + 1 != len(rank):
message = (
"Provided incorrect number of ranks. Should verify len(rank) =="
f" len(tensor.shape)+1, but len(rank) = {len(rank)} while"
f" len(tensor.shape)+1 = {n_dim + 1}"
)
raise ValueError(message)
# Check first and last rank
if rank[0] != rank[-1]:
message = (
f"Provided rank[0] == {rank[0]} and rank[-1] == {rank[-1]}"
" but boundary conditions dictate rank[0] == rank[-1]"
)
raise ValueError(message)
return list(rank)
@staticmethod
def tr_n_param(tensor_shape, rank):
factor_params = []
for i, s in enumerate(tensor_shape):
factor_params.append(rank[i] * s * rank[i + 1])
return ivy.sum(factor_params)
| ivy/ivy/data_classes/factorized_tensor/tr_tensor.py/0 | {
"file_path": "ivy/ivy/data_classes/factorized_tensor/tr_tensor.py",
"repo_id": "ivy",
"token_count": 3207
} | 13 |
//! A view on a memory slice hosted on a device.
use super::{ArrayElement, ArrayShape, Literal, PjRtDevice, Shape};
use crate::{c_lib, Error, Result};
use pyo3::prelude::*;
/// A buffer represents a view on a memory slice hosted on a device.
#[derive(Clone)]
#[pyclass(unsendable)]
pub struct PjRtBuffer {
pub(super) buffer: c_lib::pjrt_buffer,
pub(super) client: super::PjRtClient,
}
impl PjRtBuffer {
/// The client that owns this buffer.
pub fn client(&self) -> &super::PjRtClient {
&self.client
}
/// Copy the buffer to a different device.
pub fn copy_to_device(&self, device: PjRtDevice) -> Result<PjRtBuffer> {
let mut buffer: c_lib::pjrt_buffer = std::ptr::null_mut();
let status =
unsafe { c_lib::pjrt_buffer_copy_to_device(self.buffer, device.device, &mut buffer) };
super::handle_status(status)?;
Ok(Self { buffer, client: self.client.clone() })
}
/// Copy the buffer back to the host as a literal.
pub fn to_literal_sync(&self) -> Result<Literal> {
let mut result: c_lib::literal = std::ptr::null_mut();
let status = unsafe { c_lib::pjrt_buffer_to_literal_sync(self.buffer, &mut result) };
super::handle_status(status)?;
Ok(Literal(result))
}
/// Retrieve the shape used by this buffer.
pub fn on_device_shape(&self) -> Result<Shape> {
let shape = unsafe { c_lib::pjrt_buffer_on_device_shape(self.buffer) };
let c_shape = super::shape::CShape::from_ptr(shape);
c_shape.shape()
}
/// Copy the data stored in a buffer to host memory in a blocking way.
pub fn copy_raw_to_host_sync<T: ArrayElement>(
&self,
dst: &mut [T],
offset: usize,
) -> Result<()> {
let shape = ArrayShape::try_from(&self.on_device_shape()?)?;
let on_host = T::TY;
let on_device = shape.primitive_type().element_type()?;
if on_device != on_host {
Err(Error::ElementTypeMismatch { on_device, on_host })?
}
if offset + dst.len() > shape.element_count() {
Err(Error::TargetBufferIsTooLarge { offset, shape, buffer_len: dst.len() })?
}
let status = unsafe {
c_lib::pjrt_buffer_copy_raw_to_host_sync(
self.buffer,
dst.as_mut_ptr() as *mut libc::c_void,
offset,
dst.len() * T::ELEMENT_SIZE_IN_BYTES,
)
};
super::handle_status(status)?;
Ok(())
}
}
impl Drop for PjRtBuffer {
fn drop(&mut self) {
unsafe { c_lib::pjrt_buffer_free(self.buffer) }
}
}
| ivy/ivy/engines/XLA/rust_api/src/wrappers/pjrt_buffer.rs/0 | {
"file_path": "ivy/ivy/engines/XLA/rust_api/src/wrappers/pjrt_buffer.rs",
"repo_id": "ivy",
"token_count": 1187
} | 14 |
import jax
def if_else(cond, body_fn, orelse_fn, vars):
cond_vars = list(vars.values())
pred = cond(**vars)
with jax.disable_jit():
final_vars = jax.lax.cond(pred, body_fn, orelse_fn, *cond_vars)
return final_vars
def while_loop(test_fn, body_fn, vars):
def body_fn_wrapper(loop_vars):
return body_fn(*loop_vars)
def test_fn_wrapper(loop_vars):
return test_fn(*loop_vars)
if isinstance(vars, dict):
vars = list(vars.values())
with jax.disable_jit():
final_loop_vars = jax.lax.while_loop(test_fn_wrapper, body_fn_wrapper, vars)
return final_loop_vars
| ivy/ivy/functional/backends/jax/control_flow_ops.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/control_flow_ops.py",
"repo_id": "ivy",
"token_count": 294
} | 15 |